text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
Tests for building models for parameter selection
"""
from collections import OrderedDict
import symengine
from pycalphad import variables as v
from espei.parameter_selection.model_building import build_feature_sets, build_candidate_models
from espei.sublattice_tools import generate_symmetric_group, sorted_interactions
def test_build_feature_sets_generates_desired_binary_features_for_cp_like():
"""Binary feature sets can be correctly generated for heat capacity-like features"""
YS = symengine.Symbol("YS")
Z = symengine.Symbol("Z")
temp_features = [v.T, v.T**2, 1/v.T, v.T**3]
excess_features= [YS, YS*Z, YS*Z**2, YS*Z**3]
feat_sets = build_feature_sets(temp_features, excess_features)
assert len(feat_sets) == 340
assert feat_sets[0] == [v.T*YS]
assert feat_sets[5] == [v.T*YS, v.T*YS*Z, v.T**2*YS*Z]
assert feat_sets[-1] == [
v.T * YS, v.T**2 * YS, 1/v.T * YS, v.T**3 * YS,
v.T * YS * Z, v.T**2 * YS * Z, 1/v.T * YS * Z, v.T**3 * YS * Z,
v.T * YS * Z**2, v.T**2 * YS * Z**2, 1/v.T * YS * Z**2, v.T**3 * YS * Z**2,
v.T * YS * Z**3, v.T**2 * YS * Z**3, 1/v.T * YS * Z**3, v.T**3 * YS * Z**3,
]
def test_binary_candidate_models_are_constructed_correctly():
"""Candidate models should be generated for all valid combinations of possible models in the binary case"""
features = OrderedDict([("CPM_FORM",
(v.T*symengine.log(v.T), v.T**2)),
("SM_FORM", (v.T,)),
("HM_FORM", (symengine.S.One,))
])
YS = symengine.Symbol('YS')
Z = symengine.Symbol('Z')
candidate_models = build_candidate_models((('A', 'B'), 'A'), features)
assert candidate_models == OrderedDict([
('CPM_FORM', [
[v.T*YS*symengine.log(v.T)],
[v.T*YS*symengine.log(v.T), v.T**2*YS],
[v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T)],
[v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z],
[v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T)],
[v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z],
[v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T)],
[v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2],
[v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T)],
[v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2],
[v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T)],
[v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2],
[v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T)],
[v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2],
[v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T*YS*Z**3*symengine.log(v.T)],
[v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T*YS*Z**3*symengine.log(v.T), v.T**2*YS*Z**3],
[v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2, v.T*YS*Z**3*symengine.log(v.T)],
[v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2, v.T*YS*Z**3*symengine.log(v.T), v.T**2*YS*Z**3],
[v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T*YS*Z**3*symengine.log(v.T)],
[v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T*YS*Z**3*symengine.log(v.T), v.T**2*YS*Z**3],
[v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2, v.T*YS*Z**3*symengine.log(v.T)],
[v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2, v.T*YS*Z**3*symengine.log(v.T), v.T**2*YS*Z**3],
[v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T*YS*Z**3*symengine.log(v.T)],
[v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T*YS*Z**3*symengine.log(v.T), v.T**2*YS*Z**3],
[v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2, v.T*YS*Z**3*symengine.log(v.T)],
[v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2, v.T*YS*Z**3*symengine.log(v.T), v.T**2*YS*Z**3],
[v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T*YS*Z**3*symengine.log(v.T)],
[v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T*YS*Z**3*symengine.log(v.T), v.T**2*YS*Z**3],
[v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2, v.T*YS*Z**3*symengine.log(v.T)],
[v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2, v.T*YS*Z**3*symengine.log(v.T), v.T**2*YS*Z**3]
]),
('SM_FORM', [
[v.T*YS],
[v.T*YS, v.T*YS*Z],
[v.T*YS, v.T*YS*Z, v.T*YS*Z**2],
[v.T*YS, v.T*YS*Z, v.T*YS*Z**2, v.T*YS*Z**3]
]),
('HM_FORM', [
[YS],
[YS, YS*Z],
[YS, YS*Z, YS*Z**2],
[YS, YS*Z, YS*Z**2, YS*Z**3]
])
])
def test_ternary_candidate_models_are_constructed_correctly():
"""Candidate models should be generated for all valid combinations of possible models in the ternary case"""
features = OrderedDict([("CPM_FORM",
(v.T*symengine.log(v.T), v.T**2)),
("SM_FORM", (v.T,)),
("HM_FORM", (symengine.S.One,))
])
YS = symengine.Symbol('YS')
V_I, V_J, V_K = symengine.Symbol('V_I'), symengine.Symbol('V_J'), symengine.Symbol('V_K')
candidate_models = build_candidate_models((('A', 'B', 'C'), 'A'), features)
assert candidate_models == OrderedDict([
('CPM_FORM', [
[v.T*YS*symengine.log(v.T)],
[v.T*YS*symengine.log(v.T), v.T**2*YS],
[v.T*V_I*YS*symengine.log(v.T), v.T*V_J*YS*symengine.log(v.T), v.T*V_K*YS*symengine.log(v.T)],
[v.T*V_I*YS*symengine.log(v.T), v.T**2*V_I*YS, v.T*V_J*YS*symengine.log(v.T), v.T**2*V_J*YS, v.T*V_K*YS*symengine.log(v.T), v.T**2*V_K*YS],
]),
('SM_FORM', [
[v.T*YS],
[v.T*V_I*YS, v.T*V_J*YS, v.T*V_K*YS]
]),
('HM_FORM', [
[YS],
[V_I*YS, V_J*YS, V_K*YS]
])
])
def test_symmetric_group_can_be_generated_for_2_sl_mixing_with_symmetry():
"""A phase with two sublattices that are mixing should generate a cross interaction"""
symm_groups = generate_symmetric_group((('AL', 'CO'), ('AL', 'CO')), [[0, 1]])
assert symm_groups == [(('AL', 'CO'), ('AL', 'CO'))]
def test_symmetric_group_can_be_generated_for_2_sl_endmembers_with_symmetry():
"""A phase with symmetric sublattices should find a symmetric endmember """
symm_groups = generate_symmetric_group(('AL', 'CO'), [[0, 1]])
assert symm_groups == [('AL', 'CO'), ('CO', 'AL')]
def test_interaction_sorting_is_correct():
"""High order (order >= 3) interactions should sort correctly"""
# Correct sorting of n-order interactions should sort first by number of
# interactions of order n, then n-1, then n-2... to 1
unsorted_interactions = [
('AL', ('AL', 'CO', 'CR')),
(('AL', 'CO'), ('AL', 'CO', 'CR')),
(('AL', 'CO', 'CR'), ('AL', 'CO', 'CR')),
(('AL', 'CO', 'CR'), 'AL'),
(('AL', 'CO', 'CR'), ('AL', 'CO')),
(('AL', 'CO', 'CR'), ('AL', 'CR')),
(('AL', 'CO', 'CR'), 'CO'),
(('AL', 'CO', 'CR'), ('CO', 'CR')),
(('AL', 'CO', 'CR'), 'CR'),
(('AL', 'CR'), ('AL', 'CO', 'CR')),
('CO', ('AL', 'CO', 'CR')),
(('CO', 'CR'), ('AL', 'CO', 'CR')),
('CR', ('AL', 'CO', 'CR')),
]
interactions = sorted_interactions(unsorted_interactions, max_interaction_order=3, symmetry=None)
# the numbers are the different sort scores. Two of the same sort scores mean
# the order doesn't matter
assert interactions == [
('AL', ('AL', 'CO', 'CR')), # (1, 0, 1)
(('AL', 'CO', 'CR'), 'AL'), # (1, 0, 1)
(('AL', 'CO', 'CR'), 'CO'), # (1, 0, 1)
(('AL', 'CO', 'CR'), 'CR'), # (1, 0, 1)
('CO', ('AL', 'CO', 'CR')), # (1, 0, 1)
('CR', ('AL', 'CO', 'CR')), # (1, 0, 1)
]
|
PhasesResearchLab/ESPEI
|
tests/test_model_building.py
|
Python
|
mit
| 9,390
|
[
"pycalphad"
] |
53f02aa9a222e69a21130e2418733c9762a4f79d45208bd657c006a59f81e461
|
import os
import pandas as pd
import pdb
import math
import time
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from models import AD_View
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn import ensemble
from sklearn import metrics
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.ensemble import IsolationForest
from sklearn.covariance import EllipticEnvelope
from sklearn.mixture import GaussianMixture
from sklearn.neighbors import KNeighborsClassifier
from sklearn.externals import joblib
import gaussian
rng = np.random.RandomState(42)
DB_TYPES = {'canton_id': object,
'ogroup': object,
'district_id': object,
'mountain_region_id': object,
'language_region_id': object,
'job_market_region_id': object,
'agglomeration_id': object,
'metropole_region_id': object,
'tourism_region_id': object,
'lat': object,
'long': object}
def get_data(from_db=False):
if from_db:
return get_from_db()
return pd.read_csv('all.csv', index_col=0, engine='c', dtype=DB_TYPES)
def get_from_db():
engine = create_engine(os.environ.get('DATABASE_URL'))
Session = sessionmaker(bind=engine)
session = Session()
ads = pd.read_sql_query(session.query(AD_View).statement, session.bind)
ads = transform_tags(ads)
ads.to_csv('all.csv', header=True, encoding='utf-8')
return ads
def train_and_evaluate(clf, X_train, y_train, name):
print("Start Fit")
clf.fit(X_train, y_train)
print("END Fit")
joblib.dump(clf, '{}.pkl'.format(name))
print("Coefficient of determination on training set: {}".format(clf.score(X_train, y_train)))
# create a k-fold cross validation iterator of k=5 folds
#cv = KFold(X_train.shape[0], 5, shuffle=True, random_state=33)
#scores = cross_val_score(clf, X_train, y_train, cv=cv)
#print("Average coefficient of determination using 5-fold crossvalidation: {}".format(np.mean(scores)))
def load_or_train_clf(clf, X, y, name):
if not os.path.exists('{}.pkl'.format(name)):
clf.fit(X, y)
joblib.dump(clf, '{}.pkl'.format(name))
else:
print("load {}.pkl".format(name))
clf = joblib.load('{}.pkl'.format(name))
return clf
def get_outliers_gauss(clf, X, y, name):
trainingErrors = abs(clf.predict(X) - y)
outlierIdx = trainingErrors >= np.percentile(trainingErrors, 95)
print("{} found {} outliers".format(name, outlierIdx[np.where(outlierIdx == True)].shape[0]))
return (outlierIdx, trainingErrors)
def isolation_forest(clf, X, name):
outlierIdx = clf.predict(X)
print("{} found {} outliers".format(name, X[np.where(outlierIdx == -1)].shape[0]))
return outlierIdx
def plot_outliers(X, y, keys, outlieridx, name):
fig = plt.figure(figsize=(20, 140)) # Breite, Höhe
for i, key in enumerate(keys):
ax = fig.add_subplot(36, 2, i+1)
try:
ax.scatter(pd.to_numeric(X[np.where((outlieridx == False) |
(outlieridx == 1))][:5000, i]),
y[:5000,],
c=(0,0,1),
s=1)
ax.scatter(pd.to_numeric(X[np.where((outlieridx == True) | (outlieridx == -1))][:, i]), y[:len(X[np.where((outlieridx == True) | (outlieridx == -1))]),], c=(1,0,0), s=1)
except Exception as e:
pdb.set_trace()
ax.set_ylabel('Price')
ax.set_xlabel(key)
ax.set_title('Price - {}'.format(key))
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.savefig('outliers_{}.png'.format(name))
def plot_all(X, y, keys):
fig = plt.figure(figsize=(8, 20))
for i, key in enumerate(keys):
ax = fig.add_subplot(11, 2, i+1)
ax.scatter(pd.to_numeric(X[:2000, i]), y[:2000,], c=(0,0,1))
ax.set_ylabel('Price')
ax.set_xlabel(key)
ax.set_title('Price - {}'.format(key))
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.savefig('plt.png')
def plot(X, y, outlierIdx):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X.living_area[2000:], y[2000:], X.num_rooms[2000:], c=(0,0,1))
ax.scatter(X.living_area[outlierIdx], y[outlierIdx], X.num_rooms[outlierIdx],c=(1,0,0))
def transform_tags(dataframe):
with open('../crawler/taglist.txt') as f:
search_words = set([x.split(':')[0] for x in f.read().splitlines()])
template_dict = dict.fromkeys(search_words, 0)
def transformer(row):
the_dict = template_dict.copy()
for tag in row.tags:
the_dict[tag] = 1
return pd.Series(the_dict)
tag_columns = dataframe.apply(transformer, axis=1)
return dataframe.drop(['tags'], axis=1).merge(tag_columns, left_index=True, right_index=True)
def main():
ads = get_data(from_db=True)
ads = ads.drop(['id'], axis=1)
import sys
sys.exit(0)
# Remove some vars
# If no renovation was found the last renovation was the build year
ads.last_renovation_year = ads.last_renovation_year.fillna(ads.build_year)
ads_cleanup = ads.dropna()
print(ads_cleanup.shape)
dv = DictVectorizer(sparse=True)
dv.fit(ads_cleanup.T.to_dict().values()) # Learn a list of feature name Important Price is present here
print("Len of features: {}".format(len(dv.feature_names_)))
X = ads_cleanup.drop(['price_brutto', 'long', 'lat'], axis=1)
y = ads_cleanup['price_brutto']
train_X = dv.transform(X.T.to_dict().values()) # Transform feature -> value dicts to array or sparse matrix
train_y = y.values
plot_data = X.drop(['otype', 'municipality', 'ogroup'], axis=1)
#plot_all(plot_data.values, y, plot_data.keys())
classifiers = {
'Linear regression': LinearRegression(),
'Isolation forest': IsolationForest(max_samples=100, contamination=0.01, random_state=rng),
#'KNeighbors': (n_neighbors=20)
}
for name, clf in classifiers.items():
classifier = load_or_train_clf(clf, train_X, train_y, name)
if name == 'Isolation forest':
outIdx = isolation_forest(classifier, train_X, name)
else:
outIdx, Error = get_outliers_gauss(classifier, train_X, train_y, name)
plot_outliers(plot_data.values, y, plot_data.keys(), outIdx, name)
if __name__ == "__main__":
main()
|
bhzunami/Immo
|
immo/scikit/scripts/anomaly_detection.py
|
Python
|
mit
| 6,624
|
[
"Gaussian"
] |
f553130dbe6d7a3d18967695bc1ff06a5b506cb0ddb6743f37228728e4b38277
|
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2016-09-15 14:50:00
# @Last modified by: Brian Cherinka
# @Last Modified time: 2017-02-10 17:54:00
from __future__ import print_function, division, absolute_import
import distutils
import warnings
from astropy.io import fits
from astropy.wcs import WCS
import numpy as np
import marvin
import marvin.core.exceptions
import marvin.tools.spaxel
import marvin.utils.general.general
import marvin.tools.maps
from marvin.core.core import MarvinToolsClass
from marvin.core.exceptions import MarvinError, MarvinUserWarning
class ModelCube(MarvinToolsClass):
"""A class to interface with MaNGA DAP model cubes.
This class represents a DAP model cube, initialised either from a file,
a database, or remotely via the Marvin API.
Parameters:
data (``HDUList``, SQLAlchemy object, or None):
An astropy ``HDUList`` or a SQLAlchemy object of a model cube, to
be used for initialisation. If ``None``, the normal mode will
be used (see :ref:`mode-decision-tree`).
cube (:class:`~marvin.tools.cube.Cube` object)
The DRP cube object associated with this model cube.
maps (:class:`~marvin.tools.maps.Maps` object)
The DAP maps object associated with this model cube. Must match
the ``bintype``, ``template_kin``, and ``template_pop``.
filename (str):
The path of the file containing the model cube to load.
mangaid (str):
The mangaid of the model cube to load.
plateifu (str):
The plate-ifu of the model cube to load (either ``mangaid`` or
``plateifu`` can be used, but not both).
mode ({'local', 'remote', 'auto'}):
The load mode to use. See :ref:`mode-decision-tree`.
bintype (str or None):
The binning type. For MPL-4, one of the following: ``'NONE',
'RADIAL', 'STON'`` (if ``None`` defaults to ``'NONE'``).
For MPL-5 and successive, one of, ``'ALL', 'NRE', 'SPX', 'VOR10'``
(defaults to ``'ALL'``).
template_kin (str or None):
The template use for kinematics. For MPL-4, one of
``'M11-STELIB-ZSOL', 'MILES-THIN', 'MIUSCAT-THIN'`` (if ``None``,
defaults to ``'MIUSCAT-THIN'``). For MPL-5 and successive, the only
option in ``'GAU-MILESHC'`` (``None`` defaults to it).
template_pop (str or None):
A placeholder for a future version in which stellar populations
are fitted using a different template that ``template_kin``. It
has no effect for now.
nsa_source ({'auto', 'drpall', 'nsa'}):
Defines how the NSA data for this object should loaded when
``ModelCube.nsa`` is first called. If ``drpall``, the drpall file
will be used (note that this will only contain a subset of all the
NSA information); if ``nsa``, the full set of data from the DB will
be retrieved. If the drpall file or a database are not available, a
remote API call will be attempted. If ``nsa_source='auto'``, the
source will depend on how the ``ModelCube`` object has been
instantiated. If the cube has ``ModelCube.data_origin='file'``,
the drpall file will be used (as it is more likely that the user
has that file in their system). Otherwise, ``nsa_source='nsa'``
will be assumed. This behaviour can be modified during runtime by
modifying the ``ModelCube.nsa_mode`` with one of the valid values.
release (str):
The MPL/DR version of the data to use.
Return:
modelcube:
An object representing the model cube.
"""
def __init__(self, *args, **kwargs):
valid_kwargs = [
'data', 'cube', 'maps', 'filename', 'mangaid', 'plateifu', 'mode',
'release', 'bintype', 'template_kin', 'template_pop', 'nsa_source']
assert len(args) == 0, 'Maps does not accept arguments, only keywords.'
for kw in kwargs:
assert kw in valid_kwargs, 'keyword {0} is not valid'.format(kw)
if kwargs.pop('template_pop', None):
warnings.warn('template_pop is not yet in use. Ignoring value.', MarvinUserWarning)
self.bintype = kwargs.pop('bintype', marvin.tools.maps.__BINTYPES_UNBINNED__)
self.template_kin = kwargs.pop('template_kin', marvin.tools.maps.__TEMPLATES_KIN_DEFAULT__)
self.template_pop = None
super(ModelCube, self).__init__(*args, **kwargs)
# Checks that DAP is at least MPL-5
MPL5 = distutils.version.StrictVersion('2.0.2')
if self.filename is None and distutils.version.StrictVersion(self._dapver) < MPL5:
raise MarvinError('ModelCube requires at least dapver=\'2.0.2\'')
self._cube = kwargs.pop('cube', None)
self._maps = kwargs.pop('maps', None)
assert self.bintype in marvin.tools.maps.__BINTYPES__, \
'bintype must be on of {0}'.format(marvin.tools.maps.__BINTYPES__)
assert self.template_kin in marvin.tools.maps.__TEMPLATES_KIN__, \
'template_kin must be on of {0}'.format(marvin.tools.maps.__TEMPLATES_KIN__)
self.header = None
self.wcs = None
self.shape = None
self.wavelength = None
if self.data_origin == 'file':
self._load_modelcube_from_file()
elif self.data_origin == 'db':
self._load_modelcube_from_db()
elif self.data_origin == 'api':
self._load_modelcube_from_api()
else:
raise MarvinError('data_origin={0} is not valid'.format(self.data_origin))
# Confirm that drpver and dapver match the ones from the header.
marvin.tools.maps.Maps._check_versions(self)
def __repr__(self):
"""Representation for ModelCube."""
return ('<Marvin ModelCube (plateifu={0}, mode={1}, data_origin={2}, bintype={3}, '
'template_kin={4})>'
.format(repr(self.plateifu), repr(self.mode),
repr(self.data_origin), repr(self.bintype), repr(self.template_kin)))
def __getitem__(self, xy):
"""Returns the spaxel for ``(x, y)``"""
return self.getSpaxel(x=xy[1], y=xy[0], xyorig='lower')
def _getFullPath(self):
"""Returns the full path of the file in the tree."""
if not self.plateifu:
return None
plate, ifu = self.plateifu.split('-')
daptype = '{0}-{1}'.format(self.bintype, self.template_kin)
return super(ModelCube, self)._getFullPath('mangadap5', ifu=ifu,
drpver=self._drpver,
dapver=self._dapver,
plate=plate, mode='LOGCUBE',
daptype=daptype)
def download(self):
"""Downloads the cube using sdss_access - Rsync"""
if not self.plateifu:
return None
plate, ifu = self.plateifu.split('-')
daptype = '{0}-{1}'.format(self.bintype, self.template_kin)
return super(ModelCube, self).download('mangadap5', ifu=ifu,
drpver=self._drpver,
dapver=self._dapver,
plate=plate, mode='LOGCUBE',
daptype=daptype)
def _load_modelcube_from_file(self):
"""Initialises a model cube from a file."""
if self.data is not None:
assert isinstance(self.data, fits.HDUList), 'data is not an HDUList object'
else:
try:
self.data = fits.open(self.filename)
except IOError as err:
raise IOError('filename {0} cannot be found: {1}'.format(self.filename, err))
self.header = self.data[0].header
self.shape = self.data['FLUX'].data.shape[1:]
self.wcs = WCS(self.data['FLUX'].header)
self.wavelength = self.data['WAVE'].data
self.redcorr = self.data['REDCORR'].data
self.plateifu = self.header['PLATEIFU']
self.mangaid = self.header['MANGAID']
# Checks and populates release.
file_drpver = self.header['VERSDRP3']
file_drpver = 'v1_5_1' if file_drpver == 'v1_5_0' else file_drpver
file_ver = marvin.config.lookUpRelease(file_drpver)
assert file_ver is not None, 'cannot find file version.'
if file_ver != self._release:
warnings.warn('mismatch between file version={0} and object release={1}. '
'Setting object release to {0}'.format(file_ver, self._release),
marvin.core.exceptions.MarvinUserWarning)
self._release = file_ver
self._drpver, self._dapver = marvin.config.lookUpVersions(release=self._release)
def _load_modelcube_from_db(self):
"""Initialises a model cube from the DB."""
mdb = marvin.marvindb
plate, ifu = self.plateifu.split('-')
if not mdb.isdbconnected:
raise MarvinError('No db connected')
else:
datadb = mdb.datadb
dapdb = mdb.dapdb
if self.data:
assert isinstance(self.data, dapdb.ModelCube), \
'data is not an instance of marvindb.dapdb.ModelCube.'
else:
# Initial query for version
version_query = mdb.session.query(dapdb.ModelCube).join(
dapdb.File,
datadb.PipelineInfo,
datadb.PipelineVersion).filter(
datadb.PipelineVersion.version == self._dapver).from_self()
# Query for model cube parameters
db_modelcube = version_query.join(
dapdb.File,
datadb.Cube,
datadb.IFUDesign).filter(
datadb.Cube.plate == plate,
datadb.IFUDesign.name == str(ifu)).from_self().join(
dapdb.File,
dapdb.FileType).filter(dapdb.FileType.value == 'LOGCUBE').join(
dapdb.Structure, dapdb.BinType).join(
dapdb.Template,
dapdb.Structure.template_kin_pk == dapdb.Template.pk).filter(
dapdb.BinType.name == self.bintype,
dapdb.Template.name == self.template_kin).all()
if len(db_modelcube) > 1:
raise MarvinError('more than one ModelCube found for '
'this combination of parameters.')
elif len(db_modelcube) == 0:
raise MarvinError('no ModelCube found for this combination of parameters.')
self.data = db_modelcube[0]
self.header = self.data.file.primary_header
self.shape = self.data.file.cube.shape.shape
self.wcs = WCS(self.data.file.cube.wcs.makeHeader())
self.wavelength = np.array(self.data.file.cube.wavelength.wavelength, dtype=np.float)
self.redcorr = np.array(self.data.redcorr[0].value, dtype=np.float)
self.plateifu = str(self.header['PLATEIFU'].strip())
self.mangaid = str(self.header['MANGAID'].strip())
def _load_modelcube_from_api(self):
"""Initialises a model cube from the API."""
url = marvin.config.urlmap['api']['getModelCube']['url']
url_full = url.format(name=self.plateifu, bintype=self.bintype,
template_kin=self.template_kin)
try:
response = self._toolInteraction(url_full)
except Exception as ee:
raise MarvinError('found a problem when checking if remote model cube '
'exists: {0}'.format(str(ee)))
data = response.getData()
self.header = fits.Header.fromstring(data['header'])
self.shape = tuple(data['shape'])
self.wcs = WCS(fits.Header.fromstring(data['wcs_header']))
self.wavelength = np.array(data['wavelength'])
self.redcorr = np.array(data['redcorr'])
self.bintype = data['bintype']
self.template_kin = data['template_kin']
self.plateifu = str(self.header['PLATEIFU'].strip())
self.mangaid = str(self.header['MANGAID'].strip())
def getSpaxel(self, x=None, y=None, ra=None, dec=None,
spectrum=True, properties=True, **kwargs):
"""Returns the |spaxel| matching certain coordinates.
The coordinates of the spaxel to return can be input as ``x, y`` pixels
relative to``xyorig`` in the cube, or as ``ra, dec`` celestial
coordinates.
If ``spectrum=True``, the returned |spaxel| will be instantiated with the
DRP spectrum of the spaxel for the DRP cube associated with this
ModelCube. The same is true for ``properties=True`` for the DAP
properties of the spaxel in the Maps associated with these coordinates.
Parameters:
x,y (int or array):
The spaxel coordinates relative to ``xyorig``. If ``x`` is an
array of coordinates, the size of ``x`` must much that of
``y``.
ra,dec (float or array):
The coordinates of the spaxel to return. The closest spaxel to
those coordinates will be returned. If ``ra`` is an array of
coordinates, the size of ``ra`` must much that of ``dec``.
xyorig ({'center', 'lower'}):
The reference point from which ``x`` and ``y`` are measured.
Valid values are ``'center'`` (default), for the centre of the
spatial dimensions of the cube, or ``'lower'`` for the
lower-left corner. This keyword is ignored if ``ra`` and
``dec`` are defined.
spectrum (bool):
If ``True``, the |spaxel| will be initialised with the
corresponding DRP spectrum.
properties (bool):
If ``True``, the |spaxel| will be initialised with the
corresponding DAP properties for this spaxel.
modelcube (bool):
If ``True``, the |spaxel| will be initialised with the
corresponding ModelCube data.
Returns:
spaxels (list):
The |spaxel| objects for this cube/maps corresponding to the
input coordinates. The length of the list is equal to the
number of input coordinates.
.. |spaxel| replace:: :class:`~marvin.tools.spaxel.Spaxel`
"""
kwargs['cube'] = self.cube if spectrum else False
kwargs['maps'] = self.maps.get_unbinned() if properties else False
kwargs['modelcube'] = self.get_unbinned()
return marvin.utils.general.general.getSpaxel(x=x, y=y, ra=ra, dec=dec, **kwargs)
def _return_extension(self, extension):
if self.data_origin == 'file':
return self.data[extension.upper()].data
elif self.data_origin == 'db':
return self.data.get3DCube(extension.lower())
elif self.data_origin == 'api':
raise MarvinError('cannot return a full cube in remote mode. '
'Please use getSpaxel.')
@property
def flux(self):
"""Returns the flux extension."""
return self._return_extension('flux')
@property
def ivar(self):
"""Returns the ivar extension."""
return self._return_extension('ivar')
@property
def mask(self):
"""Returns the mask extension."""
return self._return_extension('mask')
@property
def model(self):
"""Returns the model extension."""
return self._return_extension('model')
@property
def emline(self):
"""Returns the emline extension."""
return self._return_extension('emline')
@property
def emline_base(self):
"""Returns the emline_base extension."""
return self._return_extension('emline_base')
@property
def emline_mask(self):
"""Returns the emline_mask extension."""
return self._return_extension('emline_mask')
@property
def stellar_continuum(self):
"""Returns the stellar continuum cube."""
return (self._return_extension('model') -
self._return_extension('emline') -
self._return_extension('emline_base'))
@property
def cube(self):
"""Returns the :class:`~marvin.tools.cube.Cube` associated with this ModelCube."""
if not self._cube:
if self.data_origin == 'db':
cube_data = self.data.file.cube
else:
cube_data = None
self._cube = marvin.tools.cube.Cube(data=cube_data,
plateifu=self.plateifu,
release=self._release)
return self._cube
@property
def maps(self):
"""Returns the :class:`~marvin.tools.mas.Maps` associated with this ModelCube."""
if not self._maps:
self._maps = marvin.tools.maps.Maps(plateifu=self.plateifu,
bintype=self.bintype,
template_kin=self.template_kin,
release=self._release)
return self._maps
def is_binned(self):
"""Returns True if the ModelCube is not unbinned."""
if marvin.tools.maps._is_MPL4(self._dapver):
return self.bintype != marvin.tools.maps.__BINTYPES_MPL4_UNBINNED__
else:
return self.bintype != marvin.tools.maps.__BINTYPES_UNBINNED__
def get_unbinned(self):
"""Returns a version of ``self`` corresponding to the unbinned ModelCube."""
if marvin.tools.maps._is_MPL4(self._dapver):
unbinned_name = marvin.tools.maps.__BINTYPES_MPL4_UNBINNED__
else:
unbinned_name = marvin.tools.maps.__BINTYPES_UNBINNED__
if self.bintype == unbinned_name:
return self
else:
return ModelCube(plateifu=self.plateifu, release=self._release, bintype=unbinned_name,
template_kin=self.template_kin, template_pop=self.template_pop,
mode=self.mode)
|
bretthandrews/marvin
|
python/marvin/tools/modelcube.py
|
Python
|
bsd-3-clause
| 18,903
|
[
"Brian"
] |
a96d0b76eee667bfe319e64fd705c4f4ca22743109f50009e1dcb198ebf13932
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2014 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from remindor import functions
from .remindor_common.scheduler import GenericScheduler
from agui.extras import Sound, Message, Popup, Icon, Timeout
from agui import Signal
import logging
logger = logging.getLogger('remindor')
class Scheduler(GenericScheduler):
def __init__(self, indicator):
GenericScheduler.__init__(self)
self.indicator = indicator
self.updated = Signal()
self.player = None
def remove_reminder_helper(self, reminder):
if reminder.started():
reminder.stop()
def change_icon(self):
self.indicator.attention = True
if self.dbus_service != None:
self.dbus_service.emitAttention()
def clear_icon(self):
self.indicator.attention = False
def play_sound_helper(self, sound_file, sound_loop, sound_loop_times):
self.player = Sound(sound_file, sound_loop_times)
self.player.play()
def stop_sound_helper(self):
if self.player is not None:
self.player.stop()
def remove_playing_sound(self):
self.playing_sound.stop()
def add_playing_sound(self, length):
self.playing_sound = Timeout(length, self.stop_sound)
self.playing_sound.start()
def popup_notification(self, label, notes):
Popup().popup('remindor', label, notes, functions.logo_icon())
def popup_dialog(self, label, notes):
Message().message('Remindor', label, notes, functions.logo_icon())
def update_schedule(self):
self.updated.emit()
def add_to_schedule(self, delay, id):
if delay <= 172800:
timeout = Timeout(delay, self.run_alarm, id)
self.schedule[str(id)] = timeout
timeout.start()
else:
logger.debug('not adding reminder with id=%s becacuse it is more than 2 days out' % str(id))
|
bhdouglass/remindor
|
remindor/scheduler.py
|
Python
|
gpl-3.0
| 2,615
|
[
"Brian"
] |
ebf2966372728d06996314b70f69785cab7fd7fa69666132fcdb96b886602b17
|
##
# Copyright 2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing ESMF, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import BUILD
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_ESMF(ConfigureMake):
"""Support for building/installing ESMF."""
def configure_step(self):
"""Custom configuration procedure for ESMF through environment variables."""
env.setvar('ESMF_DIR', self.cfg['start_dir'])
env.setvar('ESMF_INSTALL_PREFIX', self.installdir)
env.setvar('ESMF_INSTALL_BINDIR', 'bin')
env.setvar('ESMF_INSTALL_LIBDIR', 'lib')
env.setvar('ESMF_INSTALL_MODDIR', 'mod')
# specify compiler
comp_family = self.toolchain.comp_family()
if comp_family in [toolchain.GCC]:
compiler = 'gfortran'
else:
compiler = comp_family.lower()
env.setvar('ESMF_COMPILER', compiler)
# specify MPI communications library
comm = None
mpi_family = self.toolchain.mpi_family()
if mpi_family in [toolchain.MPICH, toolchain.QLOGICMPI]:
# MPICH family for MPICH v3.x, which is MPICH2 compatible
comm = 'mpich2'
else:
comm = mpi_family.lower()
env.setvar('ESMF_COMM', comm)
# specify decent LAPACK lib
env.setvar('ESMF_LAPACK', 'user')
env.setvar('ESMF_LAPACK_LIBS', '%s %s' % (os.getenv('LDFLAGS'), os.getenv('LIBLAPACK_MT')))
# specify netCDF
netcdf = get_software_root('netCDF')
if netcdf:
env.setvar('ESMF_NETCDF', 'user')
netcdf_libs = ['-L%s/lib' % netcdf, '-lnetcdf']
# Fortran
netcdff = get_software_root('netCDF-Fortran')
if netcdff:
netcdf_libs = ["-L%s/lib" % netcdff] + netcdf_libs + ["-lnetcdff"]
else:
netcdf_libs.append('-lnetcdff')
# C++
netcdfcxx = get_software_root('netCDF-C++')
if netcdfcxx:
netcdf_libs = ["-L%s/lib" % netcdfcxx] + netcdf_libs + ["-lnetcdf_c++"]
else:
netcdf_libs.append('-lnetcdf_c++')
env.setvar('ESMF_NETCDF_LIBS', ' '.join(netcdf_libs))
# 'make info' provides useful debug info
cmd = "make info"
run_cmd(cmd, log_all=True, simple=True, log_ok=True)
def sanity_check_step(self):
"""Custom sanity check for ESMF."""
shlib_ext = get_shared_lib_ext()
custom_paths = {
'files':
[os.path.join('bin', x) for x in ['ESMF_Info', 'ESMF_InfoC', 'ESMF_RegridWeightGen', 'ESMF_WebServController']] +
[os.path.join('lib', x) for x in ['libesmf.a', 'libesmf.%s' % shlib_ext]],
'dirs': ['include', 'mod'],
}
super(EB_ESMF, self).sanity_check_step(custom_paths=custom_paths)
|
valtandor/easybuild-easyblocks
|
easybuild/easyblocks/e/esmf.py
|
Python
|
gpl-2.0
| 4,293
|
[
"NetCDF"
] |
1adbf9001f951741c42df624eeba550a36dcfac0290ce77689bc631a5d27fd29
|
# -*- coding: utf-8
# pylint: disable=line-too-long
"""
Classes to define and work with anvi'o pangenomics workflows.
"""
import os
import anvio
import anvio.terminal as terminal
from anvio.errors import ConfigError
from anvio.workflows import WorkflowSuperClass
from anvio.workflows.contigs import ContigsDBWorkflow
from anvio.workflows.phylogenomics import PhylogenomicsWorkflow
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2018, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "Alon Shaiber"
__email__ = "alon.shaiber@gmail.com"
run = terminal.Run()
progress = terminal.Progress()
class PangenomicsWorkflow(PhylogenomicsWorkflow, ContigsDBWorkflow, WorkflowSuperClass):
def __init__(self, args=None, run=terminal.Run(), progress=terminal.Progress()):
self.init_workflow_super_class(args, workflow_name='pangenomics')
self.pan_project_name = None
self.valid_sequence_sources_for_phylogeny = ['gene_clusters', 'hmm']
self.sequence_source_for_phylogeny = None
self.tree_name = None
# initialize the base class
PhylogenomicsWorkflow.__init__(self)
self.rules.extend(['anvi_gen_genomes_storage',
'anvi_pan_genome',
'anvi_get_sequences_for_gene_clusters',
'import_phylogenetic_tree_to_pangenome',
'anvi_compute_genome_similarity'])
self.general_params.extend(["project_name",
"fasta_txt",
"internal_genomes",
"external_genomes",
"sequence_source_for_phylogeny"])
self.dirs_dict.update({"FASTA_DIR": "01_FASTA",
"CONTIGS_DIR": "02_CONTIGS",
"PAN_DIR": "03_PAN"})
self.default_config.update({"fasta_txt": "fasta.txt",
"anvi_pan_genome": {"threads": 7},
"import_phylogenetic_tree_to_pangenome": {'tree_name': 'phylogeny'},
"anvi_compute_genome_similarity": {"run": False}})
pan_params = ["--project-name", "--genome-names", "--skip-alignments",\
"--align-with", "--exclude-partial-gene-calls", "--use-ncbi-blast",\
"--minbit", "--mcl-inflation", "--min-occurrence",\
"--min-percent-identity", "--sensitive", "--description",\
"--overwrite-output-destinations", "--skip-hierarchical-clustering",\
"--enforce-hierarchical-clustering", "--distance", "--linkage"]
self.rule_acceptable_params_dict['anvi_pan_genome'] = pan_params
storage_params = ["--gene-caller"]
self.rule_acceptable_params_dict['anvi_gen_genomes_storage'] = storage_params
seq_params = ["--gene-cluster-id", "--gene-cluster-ids-file",
"--collection-name", "--bin-id",
"--min-num-genomes-gene-cluster-occurs", "--max-num-genomes-gene-cluster-occurs",
"--min-num-genes-from-each-genome", "--max-num-genes-from-each-genome",
"--max-num-gene-clusters-missing-from-genome", "--min-functional-homogeneity-index",
"--max-functional-homogeneity-index", "--min-geometric-homogeneity-index",
"--max-geometric-homogeneity-index", "--add-into-items-additional-data-table",
"--concatenate-gene-clusters", "--separator", "--align-with"]
self.rule_acceptable_params_dict['anvi_get_sequences_for_gene_clusters'] = seq_params
import_params = ['--just-do-it', 'tree_name']
self.rule_acceptable_params_dict['import_phylogenetic_tree_to_pangenome'] = import_params
self.rule_acceptable_params_dict['anvi_compute_genome_similarity'] = ['run', 'additional_params']
def init(self):
''' backend stuff (mostly sanity checks) specific for the phylogenomics workflow'''
super().init()
self.internal_genomes_file = self.get_param_value_from_config('internal_genomes')
self.external_genomes_file = self.get_param_value_from_config('external_genomes')
self.input_for_anvi_gen_genomes_storage = self.get_internal_and_external_genomes_files()
self.project_name = self.get_param_value_from_config("project_name")
self.pan_project_name = self.get_param_value_from_config(["anvi_pan_genome", "--project-name"])
if self.pan_project_name:
run.warning("You chose to set the '--project-name' parameter for 'anvi_pan_genome'. That is OK. "
"But just so you know, if you haven't supplied this, then we would have taken the value "
"from 'project_name' in your config file to also be the project name for 'anvi_pan_genome'.")
else:
self.pan_project_name = self.project_name
self.sequence_source_for_phylogeny = self.get_param_value_from_config('sequence_source_for_phylogeny')
if self.sequence_source_for_phylogeny == 'gene_clusters':
GC_sequences = os.path.join(self.dirs_dict["PHYLO_DIR"], self.project_name + "-GC-sequences.fa")
self.use_hmms_for_phylogeny = False
self.phylogenomics_sequence_file = GC_sequences
self.tree_name = self.get_param_value_from_config(['import_phylogenetic_tree_to_pangenome', 'tree_name'])
self.pan_db_path = os.path.join(self.dirs_dict["PAN_DIR"], self.pan_project_name + "-PAN.db")
self.input_for_anvi_compute_genome_similarity = {"pan_db": self.pan_db_path}
self.input_for_anvi_compute_genome_similarity.update(self.get_internal_and_external_genomes_files())
self.anvi_compute_genome_similarity_flag = os.path.join(self.dirs_dict["PAN_DIR"], self.project_name + "anvi_compute_genome_similarity.done")
self.anvi_compute_genome_similarity_output_dir = os.path.join(self.dirs_dict["PAN_DIR"], self.project_name + "-ANI-OUTPUT")
def get_pangenomics_target_files(self):
target_files = []
target_files.append(os.path.join(self.dirs_dict["PAN_DIR"], self.pan_project_name + "-PAN.db"))
if self.sequence_source_for_phylogeny:
target_files.append(self.get_phylogeny_imported_flag())
if self.get_param_value_from_config(['anvi_compute_genome_similarity', 'run']):
target_files.append(self.anvi_compute_genome_similarity_flag)
return target_files
def sanity_checks(self):
if (not self.internal_genomes_file) and (not self.external_genomes_file):
raise ConfigError("You must provide a path to either internal_genomes_file or external_genomes_file "
"or both.")
if not self.project_name:
raise ConfigError("You must provide a project name in your config file.")
if self.sequence_source_for_phylogeny:
if self.sequence_source_for_phylogeny not in self.valid_sequence_sources_for_phylogeny:
raise ConfigError('%s is not a valid sequence_source_for_phylogeny. '
'We only know: %s' % (self.sequence_source_for_phylogeny,\
', '.join(self.valid_sequence_sources_for_phylogeny)))
def get_phylogeny_imported_flag(self):
return os.path.join(self.dirs_dict["PAN_DIR"], self.project_name + '-' + self.tree_name + "-phylogeny-imported.done")
|
meren/anvio
|
anvio/workflows/pangenomics/__init__.py
|
Python
|
gpl-3.0
| 7,684
|
[
"BLAST"
] |
3c4748cd5996de78e4ea74a2b27db608480ccb95c17effcf7c29a61951930321
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from typing import List, Optional, Tuple
import numpy as np
from scipy.special import erf
from ...quadrature.interfaces.standard_kernels import IRBF
from ...quadrature.kernels.bounds import BoxBounds
from ...quadrature.kernels.integration_measures import IntegrationMeasure, IsotropicGaussianMeasure, UniformMeasure
from .quadrature_kernels import QuadratureKernel
class QuadratureRBF(QuadratureKernel):
"""
Augments an RBF kernel with integrability
Note 1: each standard kernel goes with a corresponding quadrature kernel, in this case the standard rbf kernel.
Note 2: each child of this class implements a unique measure-integralBounds pair
"""
def __init__(
self,
rbf_kernel: IRBF,
integral_bounds: Optional[List[Tuple[float, float]]],
measure: Optional[IntegrationMeasure],
variable_names: str = "",
) -> None:
"""
:param rbf_kernel: standard emukit rbf-kernel
:param integral_bounds: defines the domain of the integral. List of D tuples, where D is the dimensionality
of the integral and the tuples contain the lower and upper bounds of the integral
i.e., [(lb_1, ub_1), (lb_2, ub_2), ..., (lb_D, ub_D)]. None for infinite bounds
:param measure: an integration measure. None means the standard Lebesgue measure is used.
:param variable_names: the (variable) name(s) of the integral
"""
super().__init__(
kern=rbf_kernel, integral_bounds=integral_bounds, measure=measure, variable_names=variable_names
)
@property
def lengthscale(self):
return self.kern.lengthscale
@property
def variance(self):
return self.kern.variance
def qK(self, x2: np.ndarray) -> np.ndarray:
"""
RBF kernel with the first component integrated out aka kernel mean
:param x2: remaining argument of the once integrated kernel, shape (n_point N, input_dim)
:returns: kernel mean at location x2, shape (1, N)
"""
raise NotImplementedError
def Kq(self, x1: np.ndarray) -> np.ndarray:
"""
RBF kernel with the second component integrated out aka kernel mean
:param x1: remaining argument of the once integrated kernel, shape (n_point N, input_dim)
:returns: kernel mean at location x1, shape (N, 1)
"""
return self.qK(x1).T
def qKq(self) -> float:
"""
RBF kernel integrated over both arguments x1 and x2
:returns: double integrated kernel
"""
raise NotImplementedError
def dqK_dx(self, x2: np.ndarray) -> np.ndarray:
"""
gradient of the kernel mean (integrated in first argument) evaluated at x2
:param x2: points at which to evaluate, shape (n_point N, input_dim)
:return: the gradient with shape (input_dim, N)
"""
raise NotImplementedError
def dKq_dx(self, x1: np.ndarray) -> np.ndarray:
"""
gradient of the kernel mean (integrated in second argument) evaluated at x1
:param x1: points at which to evaluate, shape (n_point N, input_dim)
:return: the gradient with shape (N, input_dim)
"""
return self.dqK_dx(x1).T
# rbf-kernel specific helper
def _scaled_vector_diff(self, v1: np.ndarray, v2: np.ndarray, scale: float = None) -> np.ndarray:
"""
Scaled element-wise vector difference between vectors v1 and v2
.. math::
\frac{v_1 - v_2}{\lambda \sqrt{2}}
name mapping:
\lambda: self.kern.lengthscale
:param v1: first vector
:param v2: second vector, must have same second dimensions as v1
:param scale: the scale, default is the lengthscale of the kernel
:return: scaled difference between v1 and v2, np.ndarray with unchanged dimensions
"""
if scale is None:
scale = self.lengthscale
return (v1 - v2) / (scale * np.sqrt(2))
class QuadratureRBFLebesgueMeasure(QuadratureRBF):
"""
And RBF kernel with integrability over the standard Lebesgue measure. Can only be used with finite integral bounds.
Note that each standard kernel goes with a corresponding quadrature kernel, in this case standard rbf kernel.
"""
def __init__(self, rbf_kernel: IRBF, integral_bounds: List[Tuple[float, float]], variable_names: str = "") -> None:
"""
:param rbf_kernel: standard emukit rbf-kernel
:param integral_bounds: defines the domain of the integral. List of D tuples, where D is the dimensionality
of the integral and the tuples contain the lower and upper bounds of the integral
i.e., [(lb_1, ub_1), (lb_2, ub_2), ..., (lb_D, ub_D)]
:param variable_names: the (variable) name(s) of the integral
"""
super().__init__(
rbf_kernel=rbf_kernel, integral_bounds=integral_bounds, measure=None, variable_names=variable_names
)
def qK(self, x2: np.ndarray) -> np.ndarray:
"""
RBF kernel with the first component integrated out aka kernel mean
:param x2: remaining argument of the once integrated kernel, shape (n_point N, input_dim)
:returns: kernel mean at location x2, shape (1, N)
"""
lower_bounds = self.integral_bounds.lower_bounds
upper_bounds = self.integral_bounds.upper_bounds
erf_lo = erf(self._scaled_vector_diff(lower_bounds, x2))
erf_up = erf(self._scaled_vector_diff(upper_bounds, x2))
kernel_mean = self.variance * (self.lengthscale * np.sqrt(np.pi / 2.0) * (erf_up - erf_lo)).prod(axis=1)
return kernel_mean.reshape(1, -1)
def qKq(self) -> float:
"""
RBF kernel integrated over both arguments x1 and x2
:returns: double integrated kernel
"""
lower_bounds = self.integral_bounds.lower_bounds
upper_bounds = self.integral_bounds.upper_bounds
prefac = self.variance * (2.0 * self.lengthscale ** 2) ** self.input_dim
diff_bounds_scaled = self._scaled_vector_diff(upper_bounds, lower_bounds)
exp_term = np.exp(-(diff_bounds_scaled ** 2)) - 1.0
erf_term = erf(diff_bounds_scaled) * diff_bounds_scaled * np.sqrt(np.pi)
return float(prefac * (exp_term + erf_term).prod())
def dqK_dx(self, x2: np.ndarray) -> np.ndarray:
"""
gradient of the kernel mean (integrated in first argument) evaluated at x2
:param x2: points at which to evaluate, shape (n_point N, input_dim)
:return: the gradient with shape (input_dim, N)
"""
lower_bounds = self.integral_bounds.lower_bounds
upper_bounds = self.integral_bounds.upper_bounds
exp_lo = np.exp(-self._scaled_vector_diff(x2, lower_bounds) ** 2)
exp_up = np.exp(-self._scaled_vector_diff(x2, upper_bounds) ** 2)
erf_lo = erf(self._scaled_vector_diff(lower_bounds, x2))
erf_up = erf(self._scaled_vector_diff(upper_bounds, x2))
fraction = ((exp_lo - exp_up) / (self.lengthscale * np.sqrt(np.pi / 2.0) * (erf_up - erf_lo))).T
return self.qK(x2) * fraction
class QuadratureRBFIsoGaussMeasure(QuadratureRBF):
"""
Augments an RBF kernel with integrability
Note that each standard kernel goes with a corresponding quadrature kernel, in this case standard rbf kernel.
"""
def __init__(self, rbf_kernel: IRBF, measure: IsotropicGaussianMeasure, variable_names: str = "") -> None:
"""
:param rbf_kernel: standard emukit rbf-kernel
:param measure: a Gaussian measure
:param variable_names: the (variable) name(s) of the integral
"""
super().__init__(rbf_kernel=rbf_kernel, integral_bounds=None, measure=measure, variable_names=variable_names)
def qK(self, x2: np.ndarray, scale_factor: float = 1.0) -> np.ndarray:
"""
RBF kernel with the first component integrated out aka kernel mean
:param x2: remaining argument of the once integrated kernel, shape (n_point N, input_dim)
:param scale_factor: scales the lengthscale of the RBF kernel with the multiplicative factor.
:returns: kernel mean at location x2, shape (1, N)
"""
lengthscale = scale_factor * self.lengthscale
det_factor = (self.measure.variance / lengthscale ** 2 + 1) ** (self.input_dim / 2)
scale = np.sqrt(lengthscale ** 2 + self.measure.variance)
scaled_vector_diff = self._scaled_vector_diff(x2, self.measure.mean, scale)
kernel_mean = (self.variance / det_factor) * np.exp(-np.sum(scaled_vector_diff ** 2, axis=1))
return kernel_mean.reshape(1, -1)
def qKq(self) -> float:
"""
RBF kernel integrated over both arguments x1 and x2
:returns: double integrated kernel
"""
factor = (2 * self.measure.variance / self.lengthscale ** 2 + 1) ** (self.input_dim / 2)
result = self.variance / factor
return float(result)
def dqK_dx(self, x2: np.ndarray) -> np.ndarray:
"""
gradient of the kernel mean (integrated in first argument) evaluated at x2
:param x2: points at which to evaluate, shape (n_point N, input_dim)
:return: the gradient with shape (input_dim, N)
"""
qK_x = self.qK(x2)
factor = 1.0 / (self.lengthscale ** 2 + self.measure.variance)
return -(qK_x * factor) * (x2 - self.measure.mean).T
class QuadratureRBFUniformMeasure(QuadratureRBF):
"""
And RBF kernel with integrability over a uniform measure. Can be used with finite as well as infinite integral
bounds.
Note that each standard kernel goes with a corresponding quadrature kernel, in this case standard rbf kernel.
"""
def __init__(
self,
rbf_kernel: IRBF,
integral_bounds: Optional[List[Tuple[float, float]]],
measure: UniformMeasure,
variable_names: str = "",
):
"""
:param rbf_kernel: standard emukit rbf-kernel
:param integral_bounds: defines the domain of the integral. List of D tuples, where D is the dimensionality
of the integral and the tuples contain the lower and upper bounds of the integral
i.e., [(lb_1, ub_1), (lb_2, ub_2), ..., (lb_D, ub_D)]. None means infinite bounds.
:param measure: A D-dimensional uniform measure
:param variable_names: the (variable) name(s) of the integral
"""
super().__init__(
rbf_kernel=rbf_kernel, integral_bounds=integral_bounds, measure=measure, variable_names=variable_names
)
# construct bounds that are used in the computation of the kernel integrals. The lower bounds are the max of
# the lower bounds of integral and measure. The upper bounds are the min of the upper bounds of integral and
# measure, i.e., the resulting bounds are the overlap over the integral bounds and the measure bounds.
if integral_bounds is None:
bounds = measure.get_box()
else:
bounds = [(max(ib[0], mb[0]), min(ib[1], mb[1])) for (ib, mb) in zip(integral_bounds, measure.get_box())]
# checks if lower bounds are smaller than upper bounds.
for (lb_d, ub_d) in bounds:
if lb_d >= ub_d:
raise ValueError(
"Upper bound of relevant integration domain must be larger than lower bound. Found a "
"pair containing ({}, {}).".format(lb_d, ub_d)
)
self._bounds_list_for_kernel_integrals = bounds
self.reasonable_box_bounds = BoxBounds(name=variable_names, bounds=bounds)
def qK(self, x2: np.ndarray) -> np.ndarray:
"""
RBF kernel with the first component integrated out aka kernel mean
:param x2: remaining argument of the once integrated kernel, shape (n_point N, input_dim)
:returns: kernel mean at location x2, shape (1, N)
"""
lower_bounds = np.array([b[0] for b in self._bounds_list_for_kernel_integrals])
upper_bounds = np.array([b[1] for b in self._bounds_list_for_kernel_integrals])
erf_lo = erf(self._scaled_vector_diff(lower_bounds, x2))
erf_up = erf(self._scaled_vector_diff(upper_bounds, x2))
kernel_mean = self.variance * (self.lengthscale * np.sqrt(np.pi / 2.0) * (erf_up - erf_lo)).prod(axis=1)
return kernel_mean.reshape(1, -1) * self.measure.density
def qKq(self) -> float:
"""
RBF kernel integrated over both arguments x1 and x2
:returns: double integrated kernel
"""
lower_bounds = np.array([b[0] for b in self._bounds_list_for_kernel_integrals])
upper_bounds = np.array([b[1] for b in self._bounds_list_for_kernel_integrals])
prefac = self.variance * (2.0 * self.lengthscale ** 2) ** self.input_dim
diff_bounds_scaled = self._scaled_vector_diff(upper_bounds, lower_bounds)
exp_term = np.exp(-(diff_bounds_scaled ** 2)) - 1.0
erf_term = erf(diff_bounds_scaled) * diff_bounds_scaled * np.sqrt(np.pi)
return float(prefac * (exp_term + erf_term).prod()) * self.measure.density ** 2
def dqK_dx(self, x2: np.ndarray) -> np.ndarray:
"""
gradient of the kernel mean (integrated in first argument) evaluated at x2
:param x2: points at which to evaluate, shape (n_point N, input_dim)
:return: the gradient with shape (input_dim, N)
"""
lower_bounds = np.array([b[0] for b in self._bounds_list_for_kernel_integrals])
upper_bounds = np.array([b[1] for b in self._bounds_list_for_kernel_integrals])
exp_lo = np.exp(-self._scaled_vector_diff(x2, lower_bounds) ** 2)
exp_up = np.exp(-self._scaled_vector_diff(x2, upper_bounds) ** 2)
erf_lo = erf(self._scaled_vector_diff(lower_bounds, x2))
erf_up = erf(self._scaled_vector_diff(upper_bounds, x2))
fraction = ((exp_lo - exp_up) / (self.lengthscale * np.sqrt(np.pi / 2.0) * (erf_up - erf_lo))).T
return self.qK(x2) * fraction
|
EmuKit/emukit
|
emukit/quadrature/kernels/quadrature_rbf.py
|
Python
|
apache-2.0
| 14,186
|
[
"Gaussian"
] |
86c9f600e3c855f4a4c738a213b71439703156cb93d1c3ad9da243a419afc7c4
|
from __future__ import print_function, division
from sympy.core import S, C, sympify
from sympy.core.function import Function, ArgumentIndexError
from sympy.core.logic import fuzzy_and
from sympy.ntheory import sieve
from math import sqrt as _sqrt
from sympy.core.compatibility import reduce, as_int, xrange
from sympy.core.cache import cacheit
class CombinatorialFunction(Function):
"""Base class for combinatorial functions. """
def _eval_simplify(self, ratio, measure):
from sympy.simplify.simplify import combsimp
expr = combsimp(self)
if measure(expr) <= ratio*measure(self):
return expr
return self
###############################################################################
######################## FACTORIAL and MULTI-FACTORIAL ########################
###############################################################################
class factorial(CombinatorialFunction):
"""Implementation of factorial function over nonnegative integers.
By convention (consistent with the gamma function and the binomial
coefficients), factorial of a negative integer is complex infinity.
The factorial is very important in combinatorics where it gives
the number of ways in which `n` objects can be permuted. It also
arises in calculus, probability, number theory, etc.
There is strict relation of factorial with gamma function. In
fact n! = gamma(n+1) for nonnegative integers. Rewrite of this
kind is very useful in case of combinatorial simplification.
Computation of the factorial is done using two algorithms. For
small arguments naive product is evaluated. However for bigger
input algorithm Prime-Swing is used. It is the fastest algorithm
known and computes n! via prime factorization of special class
of numbers, called here the 'Swing Numbers'.
Examples
========
>>> from sympy import Symbol, factorial, S
>>> n = Symbol('n', integer=True)
>>> factorial(0)
1
>>> factorial(7)
5040
>>> factorial(-2)
zoo
>>> factorial(n)
factorial(n)
>>> factorial(2*n)
factorial(2*n)
>>> factorial(S(1)/2)
factorial(1/2)
See Also
========
factorial2, RisingFactorial, FallingFactorial
"""
def fdiff(self, argindex=1):
if argindex == 1:
return C.gamma(self.args[0] + 1)*C.polygamma(0, self.args[0] + 1)
else:
raise ArgumentIndexError(self, argindex)
_small_swing = [
1, 1, 1, 3, 3, 15, 5, 35, 35, 315, 63, 693, 231, 3003, 429, 6435, 6435, 109395,
12155, 230945, 46189, 969969, 88179, 2028117, 676039, 16900975, 1300075,
35102025, 5014575, 145422675, 9694845, 300540195, 300540195
]
@classmethod
def _swing(cls, n):
if n < 33:
return cls._small_swing[n]
else:
N, primes = int(_sqrt(n)), []
for prime in sieve.primerange(3, N + 1):
p, q = 1, n
while True:
q //= prime
if q > 0:
if q & 1 == 1:
p *= prime
else:
break
if p > 1:
primes.append(p)
for prime in sieve.primerange(N + 1, n//3 + 1):
if (n // prime) & 1 == 1:
primes.append(prime)
L_product = R_product = 1
for prime in sieve.primerange(n//2 + 1, n + 1):
L_product *= prime
for prime in primes:
R_product *= prime
return L_product*R_product
@classmethod
def _recursive(cls, n):
if n < 2:
return 1
else:
return (cls._recursive(n//2)**2)*cls._swing(n)
@classmethod
def eval(cls, n):
n = sympify(n)
if n.is_Number:
if n is S.Zero:
return S.One
elif n is S.Infinity:
return S.Infinity
elif n.is_Integer:
if n.is_negative:
return S.ComplexInfinity
else:
n, result = n.p, 1
if n < 20:
for i in range(2, n + 1):
result *= i
else:
N, bits = n, 0
while N != 0:
if N & 1 == 1:
bits += 1
N = N >> 1
result = cls._recursive(n)*2**(n - bits)
return C.Integer(result)
def _eval_rewrite_as_gamma(self, n):
return C.gamma(n + 1)
def _eval_is_integer(self):
return self.args[0].is_integer
def _eval_is_positive(self):
if self.args[0].is_integer and self.args[0].is_positive:
return True
class MultiFactorial(CombinatorialFunction):
pass
class subfactorial(CombinatorialFunction):
"""The subfactorial counts the derangements of n items and is
defined for non-negative integers as::
,
| 1 for n = 0
!n = { 0 for n = 1
| (n - 1)*(!(n - 1) + !(n - 2)) for n > 1
`
It can also be written as int(round(n!/exp(1))) but the recursive
definition with caching is implemented for this function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Subfactorial
Examples
========
>>> from sympy import subfactorial
>>> from sympy.abc import n
>>> subfactorial(n + 1)
subfactorial(n + 1)
>>> subfactorial(5)
44
See Also
========
factorial, sympy.utilities.iterables.generate_derangements
"""
@classmethod
@cacheit
def _eval(self, n):
if not n:
return 1
elif n == 1:
return 0
return (n - 1)*(self._eval(n - 1) + self._eval(n - 2))
@classmethod
def eval(cls, arg):
try:
arg = as_int(arg)
if arg < 0:
raise ValueError
return C.Integer(cls._eval(arg))
except ValueError:
if sympify(arg).is_Number:
raise ValueError("argument must be a nonnegative integer")
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer,
self.args[0].is_nonnegative))
class factorial2(CombinatorialFunction):
"""The double factorial n!!, not to be confused with (n!)!
The double factorial is defined for integers >= -1 as::
,
| n*(n - 2)*(n - 4)* ... * 1 for n odd
n!! = { n*(n - 2)*(n - 4)* ... * 2 for n even
| 1 for n = 0, -1
`
Examples
========
>>> from sympy import factorial2, var
>>> var('n')
n
>>> factorial2(n + 1)
factorial2(n + 1)
>>> factorial2(5)
15
>>> factorial2(-1)
1
See Also
========
factorial, RisingFactorial, FallingFactorial
"""
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg == S.Zero or arg == S.NegativeOne:
return S.One
return factorial2(arg - 2)*arg
###############################################################################
######################## RISING and FALLING FACTORIALS ########################
###############################################################################
class RisingFactorial(CombinatorialFunction):
"""Rising factorial (also called Pochhammer symbol) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by:
rf(x, k) = x * (x+1) * ... * (x + k-1)
where 'x' can be arbitrary expression and 'k' is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/RisingFactorial.html page.
Examples
========
>>> from sympy import rf
>>> from sympy.abc import x
>>> rf(x, 0)
1
>>> rf(1, 5)
120
>>> rf(x, 5) == x*(1 + x)*(2 + x)*(3 + x)*(4 + x)
True
See Also
========
factorial, factorial2, FallingFactorial
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN:
return S.NaN
elif x is S.One:
return factorial(k)
elif k.is_Integer:
if k is S.NaN:
return S.NaN
elif k is S.Zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
return reduce(lambda r, i: r*(x + i), xrange(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
return 1/reduce(lambda r, i: r*(x - i), xrange(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k):
return C.gamma(x + k) / C.gamma(x)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
class FallingFactorial(CombinatorialFunction):
"""Falling factorial (related to rising factorial) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by
ff(x, k) = x * (x-1) * ... * (x - k+1)
where 'x' can be arbitrary expression and 'k' is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/FallingFactorial.html page.
>>> from sympy import ff
>>> from sympy.abc import x
>>> ff(x, 0)
1
>>> ff(5, 5)
120
>>> ff(x, 5) == x*(x-1)*(x-2)*(x-3)*(x-4)
True
See Also
========
factorial, factorial2, RisingFactorial
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN:
return S.NaN
elif k.is_Integer:
if k is S.NaN:
return S.NaN
elif k is S.Zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
return reduce(lambda r, i: r*(x - i), xrange(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
return 1/reduce(lambda r, i: r*(x + i), xrange(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k):
return (-1)**k * C.gamma(-x + k) / C.gamma(-x)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
rf = RisingFactorial
ff = FallingFactorial
###############################################################################
########################### BINOMIAL COEFFICIENTS #############################
###############################################################################
class binomial(CombinatorialFunction):
"""Implementation of the binomial coefficient. It can be defined
in two ways depending on its desired interpretation:
C(n,k) = n!/(k!(n-k)!) or C(n, k) = ff(n, k)/k!
First, in a strict combinatorial sense it defines the
number of ways we can choose 'k' elements from a set of
'n' elements. In this case both arguments are nonnegative
integers and binomial is computed using an efficient
algorithm based on prime factorization.
The other definition is generalization for arbitrary 'n',
however 'k' must also be nonnegative. This case is very
useful when evaluating summations.
For the sake of convenience for negative 'k' this function
will return zero no matter what valued is the other argument.
To expand the binomial when n is a symbol, use either
expand_func() or expand(func=True). The former will keep the
polynomial in factored form while the latter will expand the
polynomial itself. See examples for details.
Examples
========
>>> from sympy import Symbol, Rational, binomial, expand_func
>>> n = Symbol('n', integer=True)
>>> binomial(15, 8)
6435
>>> binomial(n, -1)
0
>>> [ binomial(0, i) for i in range(1)]
[1]
>>> [ binomial(1, i) for i in range(2)]
[1, 1]
>>> [ binomial(2, i) for i in range(3)]
[1, 2, 1]
>>> [ binomial(3, i) for i in range(4)]
[1, 3, 3, 1]
>>> [ binomial(4, i) for i in range(5)]
[1, 4, 6, 4, 1]
>>> binomial(Rational(5,4), 3)
-5/128
>>> binomial(n, 3)
binomial(n, 3)
>>> binomial(n, 3).expand(func=True)
n**3/6 - n**2/2 + n/3
>>> expand_func(binomial(n, 3))
n*(n - 2)*(n - 1)/6
"""
def fdiff(self, argindex=1):
if argindex == 1:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/01/
n, k = self.args
return binomial(n, k)*(C.polygamma(0, n + 1) - C.polygamma(0, n - k + 1))
elif argindex == 2:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/02/
n, k = self.args
return binomial(n, k)*(C.polygamma(0, n - k + 1) - C.polygamma(0, k + 1))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, n, k):
n, k = map(sympify, (n, k))
if k.is_Number:
if k.is_Integer:
if k < 0:
return S.Zero
elif k == 0 or n == k:
return S.One
elif n.is_Integer and n >= 0:
n, k = int(n), int(k)
if k > n:
return S.Zero
elif k > n // 2:
k = n - k
M, result = int(_sqrt(n)), 1
for prime in sieve.primerange(2, n + 1):
if prime > n - k:
result *= prime
elif prime > n // 2:
continue
elif prime > M:
if n % prime < k % prime:
result *= prime
else:
N, K = n, k
exp = a = 0
while N > 0:
a = int((N % prime) < (K % prime + a))
N, K = N // prime, K // prime
exp = a + exp
if exp > 0:
result *= prime**exp
return C.Integer(result)
elif n.is_Number:
result = n - k + 1
for i in xrange(2, k + 1):
result *= n - k + i
result /= i
return result
elif k.is_negative:
return S.Zero
elif (n - k).simplify().is_negative:
return S.Zero
else:
d = n - k
if d.is_Integer:
return cls.eval(n, d)
def _eval_expand_func(self, **hints):
"""
Function to expand binomial(n,k) when m is positive integer
Also,
n is self.args[0] and k is self.args[1] while using binomial(n, k)
"""
n = self.args[0]
if n.is_Number:
return binomial(*self.args)
k = self.args[1]
if k.is_Add and n in k.args:
k = n - k
if k.is_Integer:
if k == S.Zero:
return S.One
elif k < 0:
return S.Zero
else:
n = self.args[0]
result = n - k + 1
for i in xrange(2, k + 1):
result *= n - k + i
result /= i
return result
else:
return binomial(*self.args)
def _eval_rewrite_as_factorial(self, n, k):
return C.factorial(n)/(C.factorial(k)*C.factorial(n - k))
def _eval_rewrite_as_gamma(self, n, k):
return C.gamma(n + 1)/(C.gamma(k + 1)*C.gamma(n - k + 1))
def _eval_is_integer(self):
return self.args[0].is_integer and self.args[1].is_integer
|
cccfran/sympy
|
sympy/functions/combinatorial/factorials.py
|
Python
|
bsd-3-clause
| 17,750
|
[
"VisIt"
] |
0617ab74061e60f346628bf4c92a49cdb32064f8bbc51b377e9bb478369a7186
|
"""Support for package tracking sensors from 17track.net."""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LOCATION,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle, slugify
_LOGGER = logging.getLogger(__name__)
ATTR_DESTINATION_COUNTRY = "destination_country"
ATTR_FRIENDLY_NAME = "friendly_name"
ATTR_INFO_TEXT = "info_text"
ATTR_ORIGIN_COUNTRY = "origin_country"
ATTR_PACKAGES = "packages"
ATTR_PACKAGE_TYPE = "package_type"
ATTR_STATUS = "status"
ATTR_TRACKING_INFO_LANGUAGE = "tracking_info_language"
ATTR_TRACKING_NUMBER = "tracking_number"
CONF_SHOW_ARCHIVED = "show_archived"
CONF_SHOW_DELIVERED = "show_delivered"
DATA_PACKAGES = "package_data"
DATA_SUMMARY = "summary_data"
DEFAULT_ATTRIBUTION = "Data provided by 17track.net"
DEFAULT_SCAN_INTERVAL = timedelta(minutes=10)
UNIQUE_ID_TEMPLATE = "package_{0}_{1}"
ENTITY_ID_TEMPLATE = "sensor.seventeentrack_package_{0}"
NOTIFICATION_DELIVERED_ID = "package_delivered_{0}"
NOTIFICATION_DELIVERED_TITLE = "Package {0} delivered"
NOTIFICATION_DELIVERED_MESSAGE = (
"Package Delivered: {0}<br />" + "Visit 17.track for more information: "
"https://t.17track.net/track#nums={1}"
)
VALUE_DELIVERED = "Delivered"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SHOW_ARCHIVED, default=False): cv.boolean,
vol.Optional(CONF_SHOW_DELIVERED, default=False): cv.boolean,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Configure the platform and add the sensors."""
from py17track import Client
from py17track.errors import SeventeenTrackError
websession = aiohttp_client.async_get_clientsession(hass)
client = Client(websession)
try:
login_result = await client.profile.login(
config[CONF_USERNAME], config[CONF_PASSWORD]
)
if not login_result:
_LOGGER.error("Invalid username and password provided")
return
except SeventeenTrackError as err:
_LOGGER.error("There was an error while logging in: %s", err)
return
scan_interval = config.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
data = SeventeenTrackData(
client,
async_add_entities,
scan_interval,
config[CONF_SHOW_ARCHIVED],
config[CONF_SHOW_DELIVERED],
)
await data.async_update()
class SeventeenTrackSummarySensor(Entity):
"""Define a summary sensor."""
def __init__(self, data, status, initial_state):
"""Initialize."""
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._data = data
self._state = initial_state
self._status = status
@property
def available(self):
"""Return whether the entity is available."""
return self._state is not None
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return "mdi:package"
@property
def name(self):
"""Return the name."""
return f"Seventeentrack Packages {self._status}"
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, HASS-friendly identifier for this entity."""
return "summary_{0}_{1}".format(self._data.account_id, slugify(self._status))
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return "packages"
async def async_update(self):
"""Update the sensor."""
await self._data.async_update()
package_data = []
for package in self._data.packages.values():
if package.status != self._status:
continue
package_data.append(
{
ATTR_FRIENDLY_NAME: package.friendly_name,
ATTR_INFO_TEXT: package.info_text,
ATTR_STATUS: package.status,
ATTR_TRACKING_NUMBER: package.tracking_number,
}
)
if package_data:
self._attrs[ATTR_PACKAGES] = package_data
self._state = self._data.summary.get(self._status)
class SeventeenTrackPackageSensor(Entity):
"""Define an individual package sensor."""
def __init__(self, data, package):
"""Initialize."""
self._attrs = {
ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION,
ATTR_DESTINATION_COUNTRY: package.destination_country,
ATTR_INFO_TEXT: package.info_text,
ATTR_LOCATION: package.location,
ATTR_ORIGIN_COUNTRY: package.origin_country,
ATTR_PACKAGE_TYPE: package.package_type,
ATTR_TRACKING_INFO_LANGUAGE: package.tracking_info_language,
ATTR_TRACKING_NUMBER: package.tracking_number,
}
self._data = data
self._friendly_name = package.friendly_name
self._state = package.status
self._tracking_number = package.tracking_number
self.entity_id = ENTITY_ID_TEMPLATE.format(self._tracking_number)
@property
def available(self):
"""Return whether the entity is available."""
return self._data.packages.get(self._tracking_number) is not None
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return "mdi:package"
@property
def name(self):
"""Return the name."""
name = self._friendly_name
if not name:
name = self._tracking_number
return f"Seventeentrack Package: {name}"
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, HASS-friendly identifier for this entity."""
return UNIQUE_ID_TEMPLATE.format(self._data.account_id, self._tracking_number)
async def async_update(self):
"""Update the sensor."""
await self._data.async_update()
if not self.available:
self.hass.async_create_task(self._remove())
return
package = self._data.packages.get(self._tracking_number, None)
# If the user has elected to not see delivered packages and one gets
# delivered, post a notification:
if package.status == VALUE_DELIVERED and not self._data.show_delivered:
self._notify_delivered()
self.hass.async_create_task(self._remove())
return
self._attrs.update(
{ATTR_INFO_TEXT: package.info_text, ATTR_LOCATION: package.location}
)
self._state = package.status
self._friendly_name = package.friendly_name
async def _remove(self):
"""Remove entity itself."""
await self.async_remove()
reg = await self.hass.helpers.entity_registry.async_get_registry()
entity_id = reg.async_get_entity_id(
"sensor",
"seventeentrack",
UNIQUE_ID_TEMPLATE.format(self._data.account_id, self._tracking_number),
)
if entity_id:
reg.async_remove(entity_id)
def _notify_delivered(self):
"""Notify when package is delivered."""
_LOGGER.info("Package delivered: %s", self._tracking_number)
identification = (
self._friendly_name if self._friendly_name else self._tracking_number
)
message = NOTIFICATION_DELIVERED_MESSAGE.format(
self._tracking_number, identification
)
title = NOTIFICATION_DELIVERED_TITLE.format(identification)
notification_id = NOTIFICATION_DELIVERED_TITLE.format(self._tracking_number)
self.hass.components.persistent_notification.create(
message, title=title, notification_id=notification_id
)
class SeventeenTrackData:
"""Define a data handler for 17track.net."""
def __init__(
self, client, async_add_entities, scan_interval, show_archived, show_delivered
):
"""Initialize."""
self._async_add_entities = async_add_entities
self._client = client
self._scan_interval = scan_interval
self._show_archived = show_archived
self.account_id = client.profile.account_id
self.packages = {}
self.show_delivered = show_delivered
self.summary = {}
self.async_update = Throttle(self._scan_interval)(self._async_update)
self.first_update = True
async def _async_update(self):
"""Get updated data from 17track.net."""
from py17track.errors import SeventeenTrackError
try:
packages = await self._client.profile.packages(
show_archived=self._show_archived
)
_LOGGER.debug("New package data received: %s", packages)
new_packages = {p.tracking_number: p for p in packages}
to_add = set(new_packages) - set(self.packages)
_LOGGER.debug("Will add new tracking numbers: %s", to_add)
if to_add:
self._async_add_entities(
[
SeventeenTrackPackageSensor(self, new_packages[tracking_number])
for tracking_number in to_add
],
True,
)
self.packages = new_packages
except SeventeenTrackError as err:
_LOGGER.error("There was an error retrieving packages: %s", err)
try:
self.summary = await self._client.profile.summary(
show_archived=self._show_archived
)
_LOGGER.debug("New summary data received: %s", self.summary)
# creating summary sensors on first update
if self.first_update:
self.first_update = False
self._async_add_entities(
[
SeventeenTrackSummarySensor(self, status, quantity)
for status, quantity in self.summary.items()
],
True,
)
except SeventeenTrackError as err:
_LOGGER.error("There was an error retrieving the summary: %s", err)
self.summary = {}
|
Cinntax/home-assistant
|
homeassistant/components/seventeentrack/sensor.py
|
Python
|
apache-2.0
| 10,865
|
[
"VisIt"
] |
9feea18c0e8931859e7230ec6bd222e42df0897bc9395ee606e196f880f05376
|
__author__ = 'Elahe'
import ephem
import numpy as np
import json
from operator import attrgetter
from numpy import *
import time
''' Constants '''
inf = 1e10
eps = 1e-10
#temp variable
AllF = np.zeros((4206,7))
class Data(object):
def __init__(self, date, site):
self.Date = date
self.Site = site
LastNight_start = float(date) -1; LastNight_end = float(date)
''' Predictable data '''
# 3 by n_fields matrix of ID, RA, Dec
self.all_fields = np.loadtxt("NightDataInLIS/Constants/fieldID.lis", dtype = "i4, f8, f8", unpack = True)
self.time_slots = np.loadtxt("NightDataInLIS/TimeSlots{}.lis".format(int(ephem.julian_date(self.Date))), unpack = True)
self.altitudes = np.loadtxt("NightDataInLIS/Altitudes{}.lis".format(int(ephem.julian_date(self.Date))), unpack = True)
self.hour_angs = np.loadtxt("NightDataInLIS/HourAngs{}.lis".format(int(ephem.julian_date(self.Date))), unpack = True)
#self.Moon_seps = np.loadtxt("MoonSeps{}.lis".format(int(ephem.julian_date(self.Date))), unpack = True)
self.amass_cstr = np.loadtxt("NightDataInLIS/AirmassConstraints{}.lis".format(int(ephem.julian_date(self.Date))), unpack = True)
self.all_n_tot_visits = np.loadtxt("NightDataInLIS/tot_N_visit{}.lis".format(int(ephem.julian_date(self.Date))), dtype = "i4", unpack = True)
self.t_last_v_last= np.loadtxt("NightDataInLIS/t_last_visit{}.lis".format(int(ephem.julian_date(self.Date))), unpack = True)
self.coad_depth = self.all_n_tot_visits / (np.max(self.all_n_tot_visits) +1 ) #!!!!! temporarily!!!!!!!! # TODO Add coadded depth module instead of visit count
self.vis_of_year = np.zeros(len(self.all_fields[0])) #!!!!! temporarily!!!!!!!! # TODO Visibility of the year is currently all zero
self.sci_prog = np.zeros(len(self.all_fields[0]), dtype= 'int') #!!!!! temporarily!!!!!!!! # TODO Science program is not considered yet
self.moon_sep = np.loadtxt("NightDataInLIS/MoonSeps{}.lis".format(int(ephem.julian_date(self.Date))), unpack = True)
# n_fields by n_fields symmetric matrix, slew time from field i to j
self.slew_t = np.loadtxt("NightDataInLIS/Constants/slewMatrix.dat", unpack = True) * ephem.second
self.n_all_fields = len(self.all_fields[0])
self.n_time_slots = len(self.time_slots)
self.t_start = self.time_slots[0]
self.t_end = self.time_slots[self.n_time_slots -1]
self.n_start = find_n(self.t_start, self.t_start, self.t_end, self.n_time_slots, self.time_slots)
''' Unpredictable data '''
self.sky_brightness = np.zeros(len(self.all_fields[0]), dtype= 'int') #!!!!! temporarily!!!!!!!! # current sky brightness
self.temp_coverage = np.zeros(len(self.all_fields[0]), dtype= 'int') #!!!!! temporarily!!!!!!!! # temporary 0/1 coverage of the sky including clouds
# TODO Add update module for live sky brightness and temporary coverage updates
#print('\nData imported correctly') # data validity check should be added
def update_sky_brightness(self, sky_brightness): # SkyB is a 1 by n_fields vector, reflects the sky brightness at each field
self.sky_brightness = sky_brightness #must be fed into the algorithm in real time or as prediction in training
def update_temp_coverage(self, temp_coverage): # SkyB is a 1 by n_fields vector, reflects the sky brightness at each field
self.temp_coverage = temp_coverage #must be fed into the algorithm in real time or as prediction in training
class Scheduler(Data):
def __init__(self, date, site, f_weight, preferences, manual_init_state = 0, exposure_t = 30 * ephem.second, visit_window = [15*ephem.minute, 30*ephem.minute], max_n_ton_visits =3, micro_train = False):
super(Scheduler, self).__init__(date, site)
# create telescope
self.tonight_telescope = TelescopeState()
self.tonight_telescope.set_param(self.t_start, self.t_end)
# create fields objects and their parameters
self.fields = []
for index, field in enumerate(np.transpose(self.all_fields)):
id = field[0]
ra = field[1]
dec = field[2]
temp = FiledState(id, ra, dec)
t_rise = self.amass_cstr[0, index]
set_t = self.amass_cstr[1, index]
temp.set_param(t_rise,
set_t,
self.all_n_tot_visits[index],
self.coad_depth[index],
self.vis_of_year[index],
self.sci_prog[index],
self.t_last_v_last[index])
self.fields.append(temp)
# scheduler outputs
self.__NightOutput = None
self.__NightSummary = None
# scheduler parameters
self.exposure_t = exposure_t
self.manual_init_state = manual_init_state
self.visit_window = visit_window
self.max_n_ton_visits = max_n_ton_visits
self.f_weight = f_weight
self.preferences = preferences
# timing
self.__t = None
self.__n = None
self.__step = None
#other
self.init_id = None
# create trainer
self.trainer = Trainer()
self.micro_train = micro_train
def set_f_wight(self, new_f_weight):
self.f_weight = new_f_weight
def get_f_wight(self):
return self.f_weight
def schedule(self):
self.init_night() #Initialize observation
while self.__t < self.t_end:
feasibility_idx = []
all_costs = np.ones(self.n_all_fields) * inf
for field, index in zip(self.fields, range(self.n_all_fields)):
if self.is_feasible(field): # update features of the feasible fields
feasibility_idx.append(index)
self.update_field(field)
all_costs[index] = calculate_cost(field, self.tonight_telescope, self.f_weight)
next_field_index, self.minimum_cost = decision_fcn(all_costs, feasibility_idx)
self.next_field = self.fields[next_field_index]
dt = self.next_field.slew_t_to + self.exposure_t
if len(feasibility_idx) <= 10:
print(len(feasibility_idx))
self.clock(dt)
# update next field visit variables
self.next_field.update_visit_var(self.__t)
self.tonight_telescope.update(self.__t, self.__n, self.__step, self.next_field, 0) # TODO Filter change decision making procedure (maybe as second stage decision)
self.tonight_telescope.watch_fcn()
self.record_visit()
# update F_weights by feedback
if self.micro_train:
self.old_c_r = self.new_c_r
self.new_c_r = self.cum_reward()
reward = self.new_c_r - self.old_c_r - self.avg_rwd
if reward > 0:
reward = 1
elif reward < 0:
reward = -1
self.avg_rwd = (self.avg_rwd * (self.__step -1) + (reward)) / float(self.__step)
self.old_cost = self.new_cost
self.new_cost = self.minimum_cost
F_state= AllF[self.tonight_telescope.state.id -1]
f_weight_correction = self.trainer.micro_feedback(R = reward, av_R = self.avg_rwd, n_C = self.new_cost, o_C = self.old_cost, F = F_state)
self.set_f_wight(self.f_weight - f_weight_correction)
self.AllF_weight = np.vstack((self.AllF_weight, self.f_weight))
self.record_night()
def update_field(self,field):
id = field.id
slew_t_to = self.calculate_f1(id)
ha = self.calculate_f4(id)
t_to_invis = self.calculate_f6(field.set_t)
normalized_bri = self.calculate_f7(id)
cov = self.calculate_f10(id)
field.set_soft_var(slew_t_to, ha, t_to_invis, normalized_bri, cov)
def clock(self, dt, reset = False):
if reset:
self.__t = self.t_start + self.exposure_t
self.__step = 0
else:
self.__t += dt
self.__step += 1
self.__n = find_n(self.__t, self.t_start, self. t_end, self.n_time_slots, self.time_slots)
def init_night(self):
# Reset Nights outputs
self.__NightOutput = np.zeros((1200,), dtype = [('Field_id', np.int),
('ephemDate', np.float),
('Filter', np.int),
('n_ton', np.int),
('n_last', np.int),
('Cost', np.float),
('Slew_t', np.float),
('t_since_v_ton', np.float),
('t_since_v_last', np.float),
('Alt', np.float),
('HA', np.float),
('t_to_invis', np.float),
('Sky_bri', np.float),
('Temp_coverage', np.int)]) # at most 1200 visits per night
self.__NightSummary = np.zeros(3) # t_start and t_end for now
# Reset time
self.clock(0,True)
# Reset fields' state
self.reset_fields_state()
# Reset telescope
init_state = self.init_state(self.manual_init_state, False)
init_filter = self.init_filter()
init_state.update_visit_var(self.__t)
self.tonight_telescope.update(self.t_start, self.n_start, self.__step, init_state, init_filter)
self.minimum_cost = 0.
self.reset_feedback()
# Record initial condition
self.op_log = open("Output/log{}.lis".format(int(ephem.julian_date(self.Date))),"w")
self.record_visit()
def reset_fields_state(self):
for index, field in enumerate(self.fields):
alt = self.altitudes[0, index]
ha = self.hour_angs[0, index]
cov = self.temp_coverage[index]
bri = self.sky_brightness[index]
t_last_visit = inf
t_last_v_last = self.t_last_v_last[index]
set_t = self.amass_cstr[1, index]
t_to_invis = self.calculate_f6(set_t)
t_since_last_v_ton, t_since_last_v_last = self.calculate_f2(t_last_visit, t_last_v_last)
slew_t_to = 0
field.set_variables(alt, ha, cov, bri, t_to_invis, t_since_last_v_ton, t_since_last_v_last, slew_t_to)
field.set_visit_var(0, t_last_visit)
def init_state(self, state, manual = False): # TODO Feasibility of the initial field needs to be checked
if manual:
self.init_id = state.id
return state
else:
init_state = max(self.fields, key = attrgetter('alt'))
self.init_id = init_state.id
return init_state
def init_filter(self):
return 0
def reset_feedback(self):
self.old_c_r = 0
self.new_c_r = 0
self.AllF_weight = self.f_weight
self.old_cost = 0
self.new_cost = 0
self.avg_rwd = 0
# Feature calculation
def calculate_f1(self, id): # slew time
return self.slew_t[int(id -1), int(self.tonight_telescope.state.id) -1]
def calculate_f2(self, t_last_v, t_last_v_last):# time since last visit
if t_last_v_last != inf:
t_since_last_v_last = self.__t - t_last_v_last
else:
t_since_last_v_last = inf
if t_last_v != inf:
t_since_last_v_ton = self.__t - t_last_v
else:
t_since_last_v_ton = inf
return t_since_last_v_ton, t_since_last_v_last
def calculate_f3(self, id): # altitude
return self.altitudes[self.__n, int(id) -1]
def calculate_f4(self, id): # hour angle
return self.hour_angs[self.__n, int(id) -1]
def calculate_f6(self, set_t): # time to become effectively invisible- temporarily until setting below airmass horizon
if set_t == 0:
return inf
else:
return set_t - self.__t
def calculate_f7(self, id): # normalized sky brightness
moon_size = 0.5 - np.abs(self.tonight_telescope.moon_phase - 0.5)
moon_sep = self.moon_sep[self.__n, int(id) -1] / np.pi
if moon_sep < 10 * np.pi/180:
return inf
if moon_size <0.2:
return np.exp(-10 * moon_sep)
elif moon_size < 0.5:
return np.exp(-2 * moon_sep)
elif moon_size < 0.8:
return np.exp(-1 * moon_sep)
else:
return 1 - 0.5 * moon_sep
def calculate_f8(self, id): # visibility for rest of the year
return 0
def calculate_f9(self, id): # science program identifier
return 0
def calculate_f10(self, id): # 0/1 temporary coverage
return 0
def is_feasible(self, any_next_state):
rise_t = any_next_state.rise_t
n_ton_visits = any_next_state.n_ton_visits
t_last_v_last = any_next_state.t_last_v_last
t_last_visit = any_next_state.t_last_visit
t_since_last_v_ton, t_since_last_v_last = self.calculate_f2(t_last_visit, t_last_v_last)
current_field = self.tonight_telescope.state.id
id = any_next_state.id
alt = self.calculate_f3(id)
slew_t = self.calculate_f1(id) #TODO change the slew_t and bri features from soft to hard variables
bri = self.calculate_f7(id)
if rise_t != 0 and (any_next_state.rise_t > self.__t or any_next_state.set_t < self.__t):
return False
if rise_t == 0 and alt < np.pi/4:
return False
if t_since_last_v_ton != inf and (t_since_last_v_ton < self.visit_window[0] or t_since_last_v_ton > self.visit_window[1]):
return False
if n_ton_visits >= self.max_n_ton_visits:
return False
if current_field == any_next_state.id:
return False
if slew_t > 20 *ephem.second and t_since_last_v_ton != inf:
return False
if bri == inf:
return False
any_next_state.set_hard_var(t_since_last_v_ton, t_since_last_v_last, alt)
return True
def record_visit(self):
self.__NightOutput[self.__step]['Field_id'] = self.tonight_telescope.state.id
self.__NightOutput[self.__step]['ephemDate'] = self.__t
self.__NightOutput[self.__step]['Filter'] = self.tonight_telescope.the_filter
self.__NightOutput[self.__step]['n_ton'] = self.tonight_telescope.state.n_ton_visits
self.__NightOutput[self.__step]['n_last'] = self.tonight_telescope.state.n_tot_visits
self.__NightOutput[self.__step]['Cost'] = self.minimum_cost
self.__NightOutput[self.__step]['Slew_t'] = self.tonight_telescope.state.slew_t_to
self.__NightOutput[self.__step]['t_since_v_ton'] = self.tonight_telescope.state.t_since_last_v_ton
self.__NightOutput[self.__step]['t_since_v_last'] = self.tonight_telescope.state.t_since_last_v_last
self.__NightOutput[self.__step]['Alt'] = self.tonight_telescope.state.alt
self.__NightOutput[self.__step]['HA'] = self.tonight_telescope.state.ha
self.__NightOutput[self.__step]['t_to_invis'] = self.tonight_telescope.state.t_to_invis
self.__NightOutput[self.__step]['Sky_bri'] = self.tonight_telescope.state.normalized_bri
self.__NightOutput[self.__step]['Temp_coverage']= self.tonight_telescope.state.cov
self.op_log.write(json.dumps(self.__NightOutput[self.__step].tolist())+"\n")
def record_night(self):
self.__NightSummary[0] = self.t_start
self.__NightSummary[1] = self.t_end
self.__NightSummary[2] = self.init_id
np.save("Output/Schedule{}.npy".format(int(ephem.julian_date(self.Date))), self.__NightOutput)
np.save("Output/Summary{}.npy".format(int(ephem.julian_date(self.Date))), self.__NightSummary)
np.save("Output/Watch{}.npy".format(int(ephem.julian_date(self.Date))), self.tonight_telescope.watch)
def performance(self):
duration = (self.t_end - self.t_start) /ephem.hour
# linear
cost_avg = np.average(self.__NightOutput[0:self.__step]['Alt'])
slew_avg = np.average(self.__NightOutput[0:self.__step]['Slew_t'])
alt_avg = np.average(self.__NightOutput[0:self.__step]['Alt'])
# non-linear
u, c = np.unique(self.__NightOutput['Field_id'], return_counts=True)
unique, counts = np.unique(c, return_counts=True)
try:
N_triple = counts[unique == 3][0] / duration # per hour
except:
N_triple = 0
try:
N_double = counts[unique == 2][0] / duration
except:
N_double = 0
try:
N_single = counts[unique == 1][0] / duration
except:
N_single = 0
# objective function
p = self.preferences[0] * cost_avg * -1 +\
self.preferences[1] * slew_avg * -1 +\
self.preferences[2] * alt_avg * 1 +\
self.preferences[3] * N_triple * 1 +\
self.preferences[4] * N_double * 1 +\
self.preferences[5] * N_single * -1
return p
def cum_reward(self):
cost_sum = 0 #np.sum(self.__NightOutput[0:self.__step]['Alt'])
slew_sum = 0 #np.sum(self.__NightOutput[0:self.__step]['Slew_t'])
alt_sum = 0 #np.sum(self.__NightOutput[0:self.__step]['Alt'])
# non-linear
u, c = np.unique(self.__NightOutput['Field_id'], return_counts=True)
unique, counts = np.unique(c, return_counts=True)
try:
N_triple = counts[unique == 3][0]
except:
N_triple = 0
try:
N_double = counts[unique == 2][0]
except:
N_double = 0
try:
N_single = counts[unique == 1][0]
except:
N_single = 0
# cumulative reward
c_r = self.preferences[0] * cost_sum * -1 +\
self.preferences[1] * slew_sum * -1 +\
self.preferences[2] * alt_sum * 1 +\
self.preferences[3] * N_triple * 1 +\
self.preferences[4] * N_double * 1 +\
self.preferences[5] * N_single * -1
return c_r
class TelescopeState(object):
def __init__(self):
# variables
self.t = None # current time
self.n = None # current time slot
self.__step = None # current decision number
self.state = None # current field
self.the_filter = None
# parameters
self.t_start = None
self.t_end = None
# Moon
self.moon_phase = None
# temporary
self.watch = np.zeros((1200,), dtype = [('Field_id', np.int),
('ephemDate', np.float),
('F1', np.float),
('F2', np.float),
('F3', np.float),
('F4', np.float),
('F5', np.float),
('F6', np.float),
('F7', np.float)]) # at most 1200 visits per night
def set_param(self, t_start, t_end):
self.t_start = t_start
self.t_end = t_end
self.moon_phase = (t_start - ephem.previous_new_moon(t_start))/30
def set_t_n(self, t, n, step):
self.t = t
self.n = n
self.step = step
def set_state(self, state):
self.state = state
def set_filter(self,the_filter):
self.the_filter = the_filter
def update(self, t, n, step, state, the_filter):
self.set_t_n(t, n, step)
self.set_state(state)
self.set_filter(the_filter)
def watch_fcn(self, watch = True):
if not watch:
return
else:
F = AllF[int(self.state.id) -1]
self.watch[self.step]['Field_id'] = self.state.id
self.watch[self.step]['ephemDate'] = self.t
self.watch[self.step]['F1'] = F[0]
self.watch[self.step]['F2'] = F[1]
self.watch[self.step]['F3'] = F[2]
self.watch[self.step]['F4'] = F[3]
self.watch[self.step]['F5'] = F[4]
self.watch[self.step]['F6'] = F[5]
self.watch[self.step]['F7'] = F[6]
return
class FiledState(object):
def __init__(self, **param):
# parameters (constant during the night)
self.id = param.get('id')
self.dec = param.get('dec')
self.ra = param.get('ra')
self.label = param.get('lbl')
self.rise_t = None
self.set_t = None
self.n_tot_visits = None # total number of visits before tonight
self.coadded_depth = None # coadded depth before tonight
self.vis_of_year = None
self.sci_prog = None
self.t_last_v_last = None
# variables (gets updated after each time step)
self.slew_t_to = None
self.t_since_last_v_ton = None
self.t_since_last_v_last = None
self.alt = None
self.ha = None
self.t_to_invis = None
self.normalized_bri = None
self.cov = None
# visit variables (gets updated only after a visit of )
self.n_ton_visits = None # total number of tonight's visits
self.t_last_visit = None # time of the last visit
def set_param(self, rise_t, set_t, n_tot_visits, coad_depth, vis_of_year, sci_prog, t_last_v_last):
self.rise_t = rise_t
self.set_t = set_t
self.n_tot_visits = n_tot_visits
self.coadded_depth = coad_depth
self.vis_of_year = vis_of_year
self.sci_prog = sci_prog
self.t_last_v_last = t_last_v_last
def set_variables(self, alt, ha, cov, bri, t_to_invis, t_since_last_v_ton, t_since_last_v_last, slew_t_to):
self.slew_t_to = slew_t_to
self.alt = alt
self.ha = ha
self.t_to_invis = t_to_invis
self.normalized_bri = bri
self.cov = cov
self.t_since_last_v_ton = t_since_last_v_ton
self.t_since_last_v_last = t_since_last_v_last
def set_visit_var(self, n_ton_visits, t_new_visit):
self.n_ton_visits = n_ton_visits
self.t_last_visit = t_new_visit
def update_visit_var(self,t_new_visit):
self.n_ton_visits = self.n_ton_visits +1
self.t_last_visit = t_new_visit
# variables can be group as updated before feasibility check(hard) of after(soft)
def set_hard_var(self, t_since_last_v, t_since_last_v_last, alt):
self.t_since_last_v_ton = t_since_last_v
self.t_since_last_v_last = t_since_last_v_last
self.alt = alt
def set_soft_var(self, slew_t_to, ha, t_to_invis, normalized_bri, cov):
self.slew_t_to = slew_t_to
self.ha = ha
self.t_to_invis = t_to_invis
self.normalized_bri = normalized_bri
self.cov = cov
# Basis function calculation
def calculate_F1(slew_t_to): # slew time cost 0~2
return (slew_t_to /ephem.second) /5
def calculate_F2(t_since_last_v_ton, n_ton_visits, t_to_invis): # night urgency -1~1
if t_since_last_v_ton == inf or n_ton_visits == 2:
return 5
elif n_ton_visits == 1:
if t_to_invis < 30 * ephem.minute:
return 0
else:
return 5 * (1 - np.exp(-1* t_since_last_v_ton / 20 * ephem.minute))
def calculate_F3(t_since_last_v_last): # overall urgency 0~1
if t_since_last_v_last == inf:
return 0
else:
return 1/t_since_last_v_last
def calculate_F4(alt): # altitude cost 0~1
return 1 - (2/np.pi) * alt
def calculate_F5(ha): # hour angle cost 0~1
return np.abs(ha)/12
def calculate_F6(coadded_depth): # coadded depth cost 0~1
return coadded_depth
def calculate_F7(normalized_bri): # normalized brightness 0~1
return normalized_bri
# cost function
def cost_fcn(weight, F):
return np.dot(weight, F)
def calculate_cost(possible_next_field, tonight_telescope, f_weight):
slew_t_to = possible_next_field.slew_t_to
t_since_last_v_ton = possible_next_field.t_since_last_v_ton
t_since_last_v_last= possible_next_field.t_since_last_v_last
alt = possible_next_field.alt
ha = possible_next_field.ha
n_ton_visits = possible_next_field.n_ton_visits
t_to_invis = possible_next_field.t_to_invis
coadded_depth = possible_next_field.coadded_depth
normalized_bri = possible_next_field.normalized_bri
F = np.zeros(7) # 7 is the number of basis functions
F[0] = calculate_F1(slew_t_to)
F[1] = calculate_F2(t_since_last_v_ton, n_ton_visits, t_to_invis)
F[2] = calculate_F3(t_since_last_v_last)
F[3] = calculate_F4(alt)
F[4] = calculate_F5(ha)
F[5] = calculate_F6(coadded_depth)
F[6] = calculate_F7(normalized_bri)
global AllF
AllF[int(possible_next_field.id) -1] = F
return cost_fcn(f_weight, F)
def decision_fcn(all_costs, feasibility_idx):
cost_of_feasibles = [all_costs[i] for i in feasibility_idx]
index = np.argmin(cost_of_feasibles)
next_field_index = feasibility_idx[index]
minimum_cost = cost_of_feasibles[index]
return next_field_index, minimum_cost
# feasibility check and update some of the features that are used to check feasibility
# other functions
def find_n(t, t_start, t_end, n_time_slots, time_slots):
n = 0
if t <= t_start:
return 0
if t >= t_end:
return n_time_slots -1
while t > time_slots[n]:
n += 1
return n
class Trainer(object):
def micro_feedback(self, **options):
old_cost = options.get("o_C")
new_cost = options.get("n_C")
reward = options.get("R")
F = options.get("F")
alpha = 0.1
gamma = 0.1
delta = - old_cost - (reward - gamma * new_cost)
F_w_correction = alpha * delta * F
return F_w_correction
'''
Date = ephem.Date('2016/09/01 12:00:00.00') # times are in UT
Site = ephem.Observer()
Site.lon = -1.2320792
Site.lat = -0.517781017
Site.elevation = 2650
Site.pressure = 0.
Site.horizon = 0.
F_weight = np.array([ 1, 1, 1, 1, 1, 1, 1])
# create scheduler
scheduler = Scheduler(Date, Site, F_weight)
# schedule
scheduler.schedule()
'''
|
elahesadatnaghib/feature-based-scheduler
|
FBDE.py
|
Python
|
mit
| 27,590
|
[
"VisIt"
] |
3dc1e93f8ad8208f538d4c0f2b6aaffd7ba9006428e1442ae05175e9ece651ac
|
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
# Jun Yang
#
import numpy
from pyscf import lib
#einsum = numpy.einsum
einsum = lib.einsum
def _gamma1_intermediates(mycc, t1, t2, l1, l2):
doo =-einsum('ie,je->ij', l1, t1)
doo -= einsum('imef,jmef->ij', l2, t2) * .5
dvv = einsum('ma,mb->ab', t1, l1)
dvv += einsum('mnea,mneb->ab', t2, l2) * .5
xt1 = einsum('mnef,inef->mi', l2, t2) * .5
xt2 = einsum('mnfa,mnfe->ae', t2, l2) * .5
xt2 += einsum('ma,me->ae', t1, l1)
dvo = einsum('imae,me->ai', t2, l1)
dvo -= einsum('mi,ma->ai', xt1, t1)
dvo -= einsum('ie,ae->ai', t1, xt2)
dvo += t1.T
dov = l1
return doo, dov, dvo, dvv
# gamma2 intermediates in Chemist's notation
# When computing intermediates, the convention
# dm2[q,p,s,r] = <p^\dagger r^\dagger s q> is assumed in this function.
# It changes to dm2[p,q,r,s] = <p^\dagger r^\dagger s q> in _make_rdm2
def _gamma2_intermediates(mycc, t1, t2, l1, l2):
tau = t2 + einsum('ia,jb->ijab', t1, t1) * 2
miajb = einsum('ikac,kjcb->iajb', l2, t2)
goovv = 0.25 * (l2.conj() + tau)
tmp = einsum('kc,kica->ia', l1, t2)
goovv += einsum('ia,jb->ijab', tmp, t1)
tmp = einsum('kc,kb->cb', l1, t1)
goovv += einsum('cb,ijca->ijab', tmp, t2) * .5
tmp = einsum('kc,jc->kj', l1, t1)
goovv += einsum('kiab,kj->ijab', tau, tmp) * .5
tmp = numpy.einsum('ldjd->lj', miajb)
goovv -= einsum('lj,liba->ijab', tmp, tau) * .25
tmp = numpy.einsum('ldlb->db', miajb)
goovv -= einsum('db,jida->ijab', tmp, tau) * .25
goovv -= einsum('ldia,ljbd->ijab', miajb, tau) * .5
tmp = einsum('klcd,ijcd->ijkl', l2, tau) * .25**2
goovv += einsum('ijkl,klab->ijab', tmp, tau)
goovv = goovv.conj()
gvvvv = einsum('ijab,ijcd->abcd', tau, l2) * 0.125
goooo = einsum('klab,ijab->klij', l2, tau) * 0.125
gooov = einsum('jkba,ib->jkia', tau, l1) * -0.25
gooov += einsum('iljk,la->jkia', goooo, t1)
tmp = numpy.einsum('icjc->ij', miajb) * .25
gooov -= einsum('ij,ka->jkia', tmp, t1)
gooov += einsum('icja,kc->jkia', miajb, t1) * .5
gooov = gooov.conj()
gooov += einsum('jkab,ib->jkia', l2, t1) * .25
govvo = einsum('ia,jb->ibaj', l1, t1)
govvo += numpy.einsum('iajb->ibaj', miajb)
govvo -= einsum('ikac,jc,kb->ibaj', l2, t1, t1)
govvv = einsum('ja,ijcb->iacb', l1, tau) * .25
govvv += einsum('bcad,id->iabc', gvvvv, t1)
tmp = numpy.einsum('kakb->ab', miajb) * .25
govvv += einsum('ab,ic->iacb', tmp, t1)
govvv += einsum('kaib,kc->iabc', miajb, t1) * .5
govvv = govvv.conj()
govvv += einsum('ijbc,ja->iabc', l2, t1) * .25
dovov = goovv.transpose(0,2,1,3) - goovv.transpose(0,3,1,2)
dvvvv = gvvvv.transpose(0,2,1,3) - gvvvv.transpose(0,3,1,2)
doooo = goooo.transpose(0,2,1,3) - goooo.transpose(0,3,1,2)
dovvv = govvv.transpose(0,2,1,3) - govvv.transpose(0,3,1,2)
dooov = gooov.transpose(0,2,1,3) - gooov.transpose(1,2,0,3)
dovvo = govvo.transpose(0,2,1,3)
dovov =(dovov + dovov.transpose(2,3,0,1)) * .5
dvvvv = dvvvv + dvvvv.transpose(1,0,3,2).conj()
doooo = doooo + doooo.transpose(1,0,3,2).conj()
dovvo =(dovvo + dovvo.transpose(3,2,1,0).conj()) * .5
doovv = None # = -dovvo.transpose(0,3,2,1)
dvvov = None
return (dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov)
def make_rdm1(mycc, t1, t2, l1, l2, ao_repr=False):
r'''
One-particle density matrix in the molecular spin-orbital representation
(the occupied-virtual blocks from the orbital response contribution are
not included).
dm1[p,q] = <q^\dagger p> (p,q are spin-orbitals)
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
d1 = _gamma1_intermediates(mycc, t1, t2, l1, l2)
return _make_rdm1(mycc, d1, with_frozen=True, ao_repr=ao_repr)
def make_rdm2(mycc, t1, t2, l1, l2, ao_repr=False):
r'''
Two-particle density matrix in the molecular spin-orbital representation
dm2[p,q,r,s] = <p^\dagger r^\dagger s q>
where p,q,r,s are spin-orbitals. p,q correspond to one particle and r,s
correspond to another particle. The contraction between ERIs (in
Chemist's notation) and rdm2 is
E = einsum('pqrs,pqrs', eri, rdm2)
'''
d1 = _gamma1_intermediates(mycc, t1, t2, l1, l2)
d2 = _gamma2_intermediates(mycc, t1, t2, l1, l2)
return _make_rdm2(mycc, d1, d2, with_dm1=True, with_frozen=True,
ao_repr=ao_repr)
def _make_rdm1(mycc, d1, with_frozen=True, ao_repr=False):
r'''
One-particle density matrix in the molecular spin-orbital representation
(the occupied-virtual blocks from the orbital response contribution are
not included).
dm1[p,q] = <q^\dagger p> (p,q are spin-orbitals)
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
doo, dov, dvo, dvv = d1
nocc, nvir = dov.shape
nmo = nocc + nvir
dm1 = numpy.empty((nmo,nmo), dtype=doo.dtype)
dm1[:nocc,:nocc] = doo + doo.conj().T
dm1[:nocc,nocc:] = dov + dvo.conj().T
dm1[nocc:,:nocc] = dm1[:nocc,nocc:].conj().T
dm1[nocc:,nocc:] = dvv + dvv.conj().T
dm1 *= .5
dm1[numpy.diag_indices(nocc)] += 1
if with_frozen and mycc.frozen is not None:
nmo = mycc.mo_occ.size
nocc = numpy.count_nonzero(mycc.mo_occ > 0)
rdm1 = numpy.zeros((nmo,nmo), dtype=dm1.dtype)
rdm1[numpy.diag_indices(nocc)] = 1
moidx = numpy.where(mycc.get_frozen_mask())[0]
rdm1[moidx[:,None],moidx] = dm1
dm1 = rdm1
if ao_repr:
mo = mycc.mo_coeff
dm1 = lib.einsum('pi,ij,qj->pq', mo, dm1, mo.conj())
return dm1
def _make_rdm2(mycc, d1, d2, with_dm1=True, with_frozen=True, ao_repr=False):
r'''
dm2[p,q,r,s] = <p^\dagger r^\dagger s q>
Note the contraction between ERIs (in Chemist's notation) and rdm2 is
E = einsum('pqrs,pqrs', eri, rdm2)
'''
dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov = d2
nocc, nvir = dovov.shape[:2]
nmo = nocc + nvir
dm2 = numpy.empty((nmo,nmo,nmo,nmo), dtype=doooo.dtype)
dovov = numpy.asarray(dovov)
dm2[:nocc,nocc:,:nocc,nocc:] = dovov
dm2[nocc:,:nocc,nocc:,:nocc] = dm2[:nocc,nocc:,:nocc,nocc:].transpose(1,0,3,2).conj()
dovov = None
dovvo = numpy.asarray(dovvo)
dm2[:nocc,:nocc,nocc:,nocc:] =-dovvo.transpose(0,3,2,1)
dm2[nocc:,nocc:,:nocc,:nocc] =-dovvo.transpose(2,1,0,3)
dm2[:nocc,nocc:,nocc:,:nocc] = dovvo
dm2[nocc:,:nocc,:nocc,nocc:] = dovvo.transpose(1,0,3,2).conj()
dovvo = None
dm2[nocc:,nocc:,nocc:,nocc:] = dvvvv
dm2[:nocc,:nocc,:nocc,:nocc] = doooo
dovvv = numpy.asarray(dovvv)
dm2[:nocc,nocc:,nocc:,nocc:] = dovvv
dm2[nocc:,nocc:,:nocc,nocc:] = dovvv.transpose(2,3,0,1)
dm2[nocc:,nocc:,nocc:,:nocc] = dovvv.transpose(3,2,1,0).conj()
dm2[nocc:,:nocc,nocc:,nocc:] = dovvv.transpose(1,0,3,2).conj()
dovvv = None
dooov = numpy.asarray(dooov)
dm2[:nocc,:nocc,:nocc,nocc:] = dooov
dm2[:nocc,nocc:,:nocc,:nocc] = dooov.transpose(2,3,0,1)
dm2[:nocc,:nocc,nocc:,:nocc] = dooov.transpose(1,0,3,2).conj()
dm2[nocc:,:nocc,:nocc,:nocc] = dooov.transpose(3,2,1,0).conj()
if with_frozen and mycc.frozen is not None:
nmo, nmo0 = mycc.mo_occ.size, nmo
nocc = numpy.count_nonzero(mycc.mo_occ > 0)
rdm2 = numpy.zeros((nmo,nmo,nmo,nmo), dtype=dm2.dtype)
moidx = numpy.where(mycc.get_frozen_mask())[0]
idx = (moidx.reshape(-1,1) * nmo + moidx).ravel()
lib.takebak_2d(rdm2.reshape(nmo**2,nmo**2),
dm2.reshape(nmo0**2,nmo0**2), idx, idx)
dm2 = rdm2
if with_dm1:
dm1 = _make_rdm1(mycc, d1, with_frozen)
dm1[numpy.diag_indices(nocc)] -= 1
for i in range(nocc):
# Be careful with the convention of dm1 and dm2.transpose
# at the end
dm2[i,i,:,:] += dm1
dm2[:,:,i,i] += dm1
dm2[:,i,i,:] -= dm1
dm2[i,:,:,i] -= dm1.T
for i in range(nocc):
for j in range(nocc):
dm2[i,i,j,j] += 1
dm2[i,j,j,i] -= 1
# dm2 was computed as dm2[p,q,r,s] = < p^\dagger r^\dagger s q > in the
# above. Transposing it so that it be contracted with ERIs (in Chemist's
# notation):
# E = einsum('pqrs,pqrs', eri, rdm2)
dm2 = dm2.transpose(1,0,3,2)
if ao_repr:
from pyscf.cc import ccsd_rdm
dm2 = ccsd_rdm._rdm2_mo2ao(dm2, mycc.mo_coeff)
return dm2
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf.cc import gccsd
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '631g'
mol.spin = 2
mol.build()
mf = scf.UHF(mol).run(conv_tol=1.)
mf = scf.addons.convert_to_ghf(mf)
mycc = gccsd.GCCSD(mf)
ecc, t1, t2 = mycc.kernel()
l1, l2 = mycc.solve_lambda()
dm1 = make_rdm1(mycc, t1, t2, l1, l2)
dm2 = make_rdm2(mycc, t1, t2, l1, l2)
nao = mol.nao_nr()
mo_a = mf.mo_coeff[:nao]
mo_b = mf.mo_coeff[nao:]
nmo = mo_a.shape[1]
eri = ao2mo.kernel(mf._eri, mo_a+mo_b, compact=False).reshape([nmo]*4)
orbspin = mf.mo_coeff.orbspin
sym_forbid = (orbspin[:,None] != orbspin)
eri[sym_forbid,:,:] = 0
eri[:,:,sym_forbid] = 0
hcore = scf.RHF(mol).get_hcore()
h1 = reduce(numpy.dot, (mo_a.T.conj(), hcore, mo_a))
h1+= reduce(numpy.dot, (mo_b.T.conj(), hcore, mo_b))
e1 = numpy.einsum('ij,ji', h1, dm1)
e1+= numpy.einsum('ijkl,ijkl', eri, dm2) * .5
e1+= mol.energy_nuc()
print(e1 - mycc.e_tot)
#TODO: test 1pdm, 2pdm against FCI
|
sunqm/pyscf
|
pyscf/cc/gccsd_rdm.py
|
Python
|
apache-2.0
| 10,624
|
[
"PySCF"
] |
10ceb99a91e6e5d0f6a1a38baac3c2902f82fcf443da972e63aec3468342f8ba
|
""" Module that contains simple client access to Matcher service
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC.Core.Base.Client import Client, createClient
from DIRAC.Core.Utilities.DEncode import ignoreEncodeWarning
from DIRAC.Core.Utilities.JEncode import strToIntDict
@createClient('WorkloadManagement/Matcher')
class MatcherClient(Client):
""" Exposes the functionality available in the WorkloadManagement/MatcherHandler
This inherits the DIRAC base Client for direct execution of server functionality.
The following methods are available (although not visible here).
"""
def __init__(self, **kwargs):
""" Simple constructor
"""
super(MatcherClient, self).__init__(**kwargs)
self.setServer('WorkloadManagement/Matcher')
@ignoreEncodeWarning
def getMatchingTaskQueues(self, resourceDict):
""" Return all task queues that match the resourceDict
"""
res = self._getRPC().getMatchingTaskQueues(resourceDict)
if res["OK"]:
# Cast the string back to int
res['Value'] = strToIntDict(res['Value'])
return res
@ignoreEncodeWarning
def getActiveTaskQueues(self):
""" Return all active task queues
"""
res = self._getRPC().getActiveTaskQueues()
if res["OK"]:
# Cast the string back to int
res['Value'] = strToIntDict(res['Value'])
return res
|
yujikato/DIRAC
|
src/DIRAC/WorkloadManagementSystem/Client/MatcherClient.py
|
Python
|
gpl-3.0
| 1,451
|
[
"DIRAC"
] |
f2fd51fc608a0d79e128e846d737f590207d57988ca53b6f8c13a11b6ad8a1fc
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Custom astroid checker for config calls."""
import sys
import os
import os.path
import astroid
from pylint import interfaces, checkers
from pylint.checkers import utils
sys.path.insert(
0, os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir))
from qutebrowser.config import configdata
class ConfigChecker(checkers.BaseChecker):
"""Custom astroid checker for config calls."""
__implements__ = interfaces.IAstroidChecker
name = 'config'
msgs = {
'E0000': ('"%s -> %s" is no valid config option.', # flake8: disable=S001
'bad-config-call',
None),
}
priority = -1
@utils.check_messages('bad-config-call')
def visit_call(self, node):
"""Visit a Call node."""
if hasattr(node, 'func'):
infer = utils.safe_infer(node.func)
if infer and infer.root().name == 'qutebrowser.config.config':
if getattr(node.func, 'attrname', None) in ('get', 'set'):
self._check_config(node)
def _check_config(self, node):
"""Check that the arguments to config.get(...) are valid.
FIXME: We should check all ConfigManager calls.
https://github.com/The-Compiler/qutebrowser/issues/107
"""
try:
sect_arg = utils.get_argument_from_call(node, position=0,
keyword='sectname')
opt_arg = utils.get_argument_from_call(node, position=1,
keyword='optname')
except utils.NoSuchArgumentError:
return
sect_arg = utils.safe_infer(sect_arg)
opt_arg = utils.safe_infer(opt_arg)
if not (isinstance(sect_arg, astroid.Const) and
isinstance(opt_arg, astroid.Const)):
return
try:
configdata.DATA[sect_arg.value][opt_arg.value]
except KeyError:
self.add_message('bad-config-call', node=node,
args=(sect_arg.value, opt_arg.value))
def register(linter):
"""Register this checker."""
linter.register_checker(ConfigChecker(linter))
|
Konubinix/qutebrowser
|
scripts/dev/pylint_checkers/qute_pylint/config.py
|
Python
|
gpl-3.0
| 3,009
|
[
"VisIt"
] |
5eedb43f8ad05da882dd6b93de99bd88b0b2ea6cde094be67eba119a24cf2086
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
:mod:`MDAnalysis.lib` --- Access to lower level routines
================================================================
"""
__all__ = ['log', 'transformations', 'util', 'mdamath', 'distances',
'NeighborSearch', 'formats']
from . import log
from . import transformations
from . import util
from . import mdamath
from . import distances # distances relies on mdamath
from . import NeighborSearch
from . import formats
|
alejob/mdanalysis
|
package/MDAnalysis/lib/__init__.py
|
Python
|
gpl-2.0
| 1,457
|
[
"MDAnalysis"
] |
493f5ddea1ba308ec2ac2ae9cd6960fe258f6ce37365f369e3bbdab15cf89851
|
#!/usr/bin/python
import sys
import argparse
from CSPBrowser import *
parser = argparse.ArgumentParser(description="Auto-visit target URLs")
parser.add_argument('-p', '--port', metavar='num', type=int, help='Port to listen on', dest='port')
parser.add_argument('-d', '--domain', metavar='d', help='IP address/hostname to listen on', dest='domain')
parser.add_argument('-u', '--url', metavar='u', action='append', help='url to visit', dest='url')
args = parser.parse_args()
if not args.url:
args.url=[x.strip() for x in sys.stdin.readlines()]
ff = CSPBrowser(args.port, args.domain)
ff.load(args.url)
ff.run()
|
Kennysan/CSPTools
|
browser/run.py
|
Python
|
mit
| 617
|
[
"VisIt"
] |
d0e18334a7806b10de872610936073cbae25dbd937a626f621205665dae4ad65
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Window')
from data_800ms import Fmat_original
# Returns mu,sigma for 10 hidden-states from feature-vectors(121,35) for RF,SF,RM,SM models
def feature_to_mu_sigma(fvec):
index = 0
m,n = np.shape(fvec)
#print m,n
mu = np.matrix(np.zeros((10,1)))
sigma = np.matrix(np.zeros((10,1)))
DIVS = m/10
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),0:]
#if index == 1:
#print temp_fvec
mu[index] = scp.mean(temp_fvec)
sigma[index] = scp.std(temp_fvec)
index = index+1
return mu,sigma
# Returns sequence given raw data
def create_seq(fvec):
m,n = np.shape(fvec)
#print m,n
seq = np.matrix(np.zeros((10,n)))
DIVS = m/10
for i in range(n):
index = 0
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),i]
#if index == 1:
#print temp_fvec
seq[index,i] = scp.mean(temp_fvec)
index = index+1
return seq
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
#print " "
#print 'Total_Matrix_Shape:',m_tot,n_tot
mu_rf,sigma_rf = feature_to_mu_sigma(Fmat[81:162,0:35])
mu_rm,sigma_rm = feature_to_mu_sigma(Fmat[81:162,35:70])
mu_sf,sigma_sf = feature_to_mu_sigma(Fmat[81:162,70:105])
mu_sm,sigma_sm = feature_to_mu_sigma(Fmat[81:162,105:140])
mu_obj1,sigma_obj1 = feature_to_mu_sigma(Fmat[81:162,140:141])
mu_obj2,sigma_obj2 = feature_to_mu_sigma(Fmat[81:162,141:142])
#print [mu_rf, sigma_rf]
# HMM - Implementation:
# 10 Hidden States
# Max. Force(Not now), Contact Area(For now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf = np.zeros((10,2))
B_rm = np.zeros((10,2))
B_sf = np.zeros((10,2))
B_sm = np.zeros((10,2))
for num_states in range(10):
B_rf[num_states,0] = mu_rf[num_states]
B_rf[num_states,1] = sigma_rf[num_states]
B_rm[num_states,0] = mu_rm[num_states]
B_rm[num_states,1] = sigma_rm[num_states]
B_sf[num_states,0] = mu_sf[num_states]
B_sf[num_states,1] = sigma_sf[num_states]
B_sm[num_states,0] = mu_sm[num_states]
B_sm[num_states,1] = sigma_sm[num_states]
B_rf = B_rf.tolist()
B_rm = B_rm.tolist()
B_sf = B_sf.tolist()
B_sm = B_sm.tolist()
# pi - initial probabilities per state
pi = [0.1] * 10
# generate RF, RM, SF, SM models from parameters
model_rf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf, pi) # Will be Trained
model_rm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm, pi) # Will be Trained
model_sf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf, pi) # Will be Trained
model_sm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm, pi) # Will be Trained
trial_number = 1
rf_final = np.matrix(np.zeros((28,1)))
rm_final = np.matrix(np.zeros((28,1)))
sf_final = np.matrix(np.zeros((28,1)))
sm_final = np.matrix(np.zeros((28,1)))
while (trial_number < 6):
# For Training
total_seq = Fmat[121:242,:]
m_total, n_total = np.shape(total_seq)
#print 'Total_Sequence_Shape:', m_total, n_total
if (trial_number == 1):
j = 5
total_seq_rf = total_seq[0:81,1:5]
total_seq_rm = total_seq[0:81,36:40]
total_seq_sf = total_seq[0:81,71:75]
total_seq_sm = total_seq[0:81,106:110]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:81,j+1:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:81,j+36:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:81,j+71:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:81,j+106:j+110]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = np.column_stack((total_seq[0:81,0],total_seq[0:81,2:5]))
total_seq_rm = np.column_stack((total_seq[0:81,35],total_seq[0:81,37:40]))
total_seq_sf = np.column_stack((total_seq[0:81,70],total_seq[0:81,72:75]))
total_seq_sm = np.column_stack((total_seq[0:81,105],total_seq[0:81,107:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:81,j+0],total_seq[0:81,j+2:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:81,j+35],total_seq[0:81,j+37:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:81,j+70],total_seq[0:81,j+72:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:81,j+105],total_seq[0:81,j+107:j+110]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = np.column_stack((total_seq[0:81,0:2],total_seq[0:81,3:5]))
total_seq_rm = np.column_stack((total_seq[0:81,35:37],total_seq[0:81,38:40]))
total_seq_sf = np.column_stack((total_seq[0:81,70:72],total_seq[0:81,73:75]))
total_seq_sm = np.column_stack((total_seq[0:81,105:107],total_seq[0:81,108:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:81,j+0:j+2],total_seq[0:81,j+3:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:81,j+35:j+37],total_seq[0:81,j+38:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:81,j+70:j+72],total_seq[0:81,j+73:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:81,j+105:j+107],total_seq[0:81,j+108:j+110]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = np.column_stack((total_seq[0:81,0:3],total_seq[0:81,4:5]))
total_seq_rm = np.column_stack((total_seq[0:81,35:38],total_seq[0:81,39:40]))
total_seq_sf = np.column_stack((total_seq[0:81,70:73],total_seq[0:81,74:75]))
total_seq_sm = np.column_stack((total_seq[0:81,105:108],total_seq[0:81,109:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:81,j+0:j+3],total_seq[0:81,j+4:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:81,j+35:j+38],total_seq[0:81,j+39:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:81,j+70:j+73],total_seq[0:81,j+74:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:81,j+105:j+108],total_seq[0:81,j+109:j+110]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = total_seq[0:81,0:4]
total_seq_rm = total_seq[0:81,35:39]
total_seq_sf = total_seq[0:81,70:74]
total_seq_sm = total_seq[0:81,105:109]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:81,j+0:j+4]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:81,j+35:j+39]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:81,j+70:j+74]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:81,j+105:j+109]))
j = j+5
train_seq_rf = (np.array(total_seq_rf).T).tolist()
train_seq_rm = (np.array(total_seq_rm).T).tolist()
train_seq_sf = (np.array(total_seq_sf).T).tolist()
train_seq_sm = (np.array(total_seq_sm).T).tolist()
#print train_seq_rf
final_ts_rf = ghmm.SequenceSet(F,train_seq_rf)
final_ts_rm = ghmm.SequenceSet(F,train_seq_rm)
final_ts_sf = ghmm.SequenceSet(F,train_seq_sf)
final_ts_sm = ghmm.SequenceSet(F,train_seq_sm)
model_rf.baumWelch(final_ts_rf)
model_rm.baumWelch(final_ts_rm)
model_sf.baumWelch(final_ts_sf)
model_sm.baumWelch(final_ts_sm)
# For Testing
if (trial_number == 1):
j = 5
total_seq_rf = total_seq[0:81,0]
total_seq_rm = total_seq[0:81,35]
total_seq_sf = total_seq[0:81,70]
total_seq_sm = total_seq[0:81,105]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:81,j]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:81,j+35]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:81,j+70]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:81,j+105]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = total_seq[0:81,1]
total_seq_rm = total_seq[0:81,36]
total_seq_sf = total_seq[0:81,71]
total_seq_sm = total_seq[0:81,106]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:81,j+1]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:81,j+36]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:81,j+71]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:81,j+106]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = total_seq[0:81,2]
total_seq_rm = total_seq[0:81,37]
total_seq_sf = total_seq[0:81,72]
total_seq_sm = total_seq[0:81,107]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:81,j+2]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:81,j+37]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:81,j+72]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:81,j+107]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = total_seq[0:81,3]
total_seq_rm = total_seq[0:81,38]
total_seq_sf = total_seq[0:81,73]
total_seq_sm = total_seq[0:81,108]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:81,j+3]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:81,j+38]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:81,j+73]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:81,j+108]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = total_seq[0:81,4]
total_seq_rm = total_seq[0:81,39]
total_seq_sf = total_seq[0:81,74]
total_seq_sm = total_seq[0:81,109]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:81,j+4]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:81,j+39]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:81,j+74]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:81,j+109]))
j = j+5
total_seq_obj = np.matrix(np.column_stack((total_seq_rf,total_seq_rm,total_seq_sf,total_seq_sm)))
rf = np.matrix(np.zeros(np.size(total_seq_obj,1)))
rm = np.matrix(np.zeros(np.size(total_seq_obj,1)))
sf = np.matrix(np.zeros(np.size(total_seq_obj,1)))
sm = np.matrix(np.zeros(np.size(total_seq_obj,1)))
k = 0
while (k < np.size(total_seq_obj,1)):
test_seq_obj = (np.array(total_seq_obj[0:81,k]).T).tolist()
new_test_seq_obj = np.array(sum(test_seq_obj,[]))
ts_obj = new_test_seq_obj
final_ts_obj = ghmm.EmissionSequence(F,ts_obj.tolist())
# Find Viterbi Path
path_rf_obj = model_rf.viterbi(final_ts_obj)
path_rm_obj = model_rm.viterbi(final_ts_obj)
path_sf_obj = model_sf.viterbi(final_ts_obj)
path_sm_obj = model_sm.viterbi(final_ts_obj)
obj = max(path_rf_obj[1],path_rm_obj[1],path_sf_obj[1],path_sm_obj[1])
if obj == path_rf_obj[1]:
rf[0,k] = 1
elif obj == path_rm_obj[1]:
rm[0,k] = 1
elif obj == path_sf_obj[1]:
sf[0,k] = 1
else:
sm[0,k] = 1
k = k+1
#print rf.T
rf_final = rf_final + rf.T
rm_final = rm_final + rm.T
sf_final = sf_final + sf.T
sm_final = sm_final + sm.T
trial_number = trial_number + 1
#print rf_final
#print rm_final
#print sf_final
#print sm_final
# Confusion Matrix
cmat = np.zeros((4,4))
arrsum_rf = np.zeros((4,1))
arrsum_rm = np.zeros((4,1))
arrsum_sf = np.zeros((4,1))
arrsum_sm = np.zeros((4,1))
k = 7
i = 0
while (k < 29):
arrsum_rf[i] = np.sum(rf_final[k-7:k,0])
arrsum_rm[i] = np.sum(rm_final[k-7:k,0])
arrsum_sf[i] = np.sum(sf_final[k-7:k,0])
arrsum_sm[i] = np.sum(sm_final[k-7:k,0])
i = i+1
k = k+7
i=0
while (i < 4):
j=0
while (j < 4):
if (i == 0):
cmat[i][j] = arrsum_rf[j]
elif (i == 1):
cmat[i][j] = arrsum_rm[j]
elif (i == 2):
cmat[i][j] = arrsum_sf[j]
else:
cmat[i][j] = arrsum_sm[j]
j = j+1
i = i+1
#print cmat
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_HMM/Single_Contact_Classification/area_codes/time_window/hmm_crossvalidation_area_800ms.py
|
Python
|
mit
| 16,122
|
[
"Gaussian",
"Mayavi"
] |
713c5f9a0122b56e7232c8917501cfb8649a0e8a7e72d4b5bec221fb8b69e1be
|
"""
Tests for regressiom based dispersion tests (Cameron & Trivedi, 2013)
Cameron, Colin A. & Trivedi, Pravin K. (2013) Regression Analysis of Count Data.
Camridge University Press: New York, New York.
"""
__author__ = 'Taylor Oshan tayoshan@gmail.com'
import unittest
import numpy as np
import libpysal
from spglm.family import Poisson
from ..count_model import CountModel
from ..dispersion import phi_disp, alpha_disp
class TestDispersion(unittest.TestCase):
def setUp(self):
db = libpysal.io.open(libpysal.examples.get_path('columbus.dbf'), 'r')
y = np.array(db.by_col("HOVAL"))
y = np.reshape(y, (49, 1))
self.y = np.round(y).astype(int)
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
def test_Dispersion(self):
model = CountModel(self.y, self.X, family=Poisson())
results = model.fit('GLM')
phi = phi_disp(results)
alpha1 = alpha_disp(results)
alpha2 = alpha_disp(results, lambda x: x**2)
np.testing.assert_allclose(phi, [5.39968689, 2.3230411, 0.01008847],
atol=1.0e-8)
np.testing.assert_allclose(alpha1, [4.39968689, 2.3230411,
0.01008847], atol=1.0e-8)
np.testing.assert_allclose(alpha2, [0.10690133, 2.24709978,
0.01231683], atol=1.0e-8)
if __name__ == '__main__':
unittest.main()
|
TaylorOshan/spint
|
spint/tests/test_dispersion.py
|
Python
|
bsd-3-clause
| 1,507
|
[
"COLUMBUS"
] |
19ccf758cd8027afeb1f3dd259224b50f6d0cf9069a7411a47f9f39e97c88b07
|
"""
=================================
Map data to a normal distribution
=================================
.. currentmodule:: sklearn.preprocessing
This example demonstrates the use of the Box-Cox and Yeo-Johnson transforms
through :class:`~PowerTransformer` to map data from various
distributions to a normal distribution.
The power transform is useful as a transformation in modeling problems where
homoscedasticity and normality are desired. Below are examples of Box-Cox and
Yeo-Johnwon applied to six different probability distributions: Lognormal,
Chi-squared, Weibull, Gaussian, Uniform, and Bimodal.
Note that the transformations successfully map the data to a normal
distribution when applied to certain datasets, but are ineffective with others.
This highlights the importance of visualizing the data before and after
transformation.
Also note that even though Box-Cox seems to perform better than Yeo-Johnson for
lognormal and chi-squared distributions, keep in mind that Box-Cox does not
support inputs with negative values.
For comparison, we also add the output from
:class:`~QuantileTransformer`. It can force any arbitrary
distribution into a gaussian, provided that there are enough training samples
(thousands). Because it is a non-parametric method, it is harder to interpret
than the parametric ones (Box-Cox and Yeo-Johnson).
On "small" datasets (less than a few hundred points), the quantile transformer
is prone to overfitting. The use of the power transform is then recommended.
"""
# Author: Eric Chang <ericchang2017@u.northwestern.edu>
# Nicolas Hug <contact@nicolas-hug.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PowerTransformer
from sklearn.preprocessing import QuantileTransformer
from sklearn.model_selection import train_test_split
N_SAMPLES = 1000
FONT_SIZE = 6
BINS = 30
rng = np.random.RandomState(304)
bc = PowerTransformer(method="box-cox")
yj = PowerTransformer(method="yeo-johnson")
# n_quantiles is set to the training set size rather than the default value
# to avoid a warning being raised by this example
qt = QuantileTransformer(
n_quantiles=500, output_distribution="normal", random_state=rng
)
size = (N_SAMPLES, 1)
# lognormal distribution
X_lognormal = rng.lognormal(size=size)
# chi-squared distribution
df = 3
X_chisq = rng.chisquare(df=df, size=size)
# weibull distribution
a = 50
X_weibull = rng.weibull(a=a, size=size)
# gaussian distribution
loc = 100
X_gaussian = rng.normal(loc=loc, size=size)
# uniform distribution
X_uniform = rng.uniform(low=0, high=1, size=size)
# bimodal distribution
loc_a, loc_b = 100, 105
X_a, X_b = rng.normal(loc=loc_a, size=size), rng.normal(loc=loc_b, size=size)
X_bimodal = np.concatenate([X_a, X_b], axis=0)
# create plots
distributions = [
("Lognormal", X_lognormal),
("Chi-squared", X_chisq),
("Weibull", X_weibull),
("Gaussian", X_gaussian),
("Uniform", X_uniform),
("Bimodal", X_bimodal),
]
colors = ["#D81B60", "#0188FF", "#FFC107", "#B7A2FF", "#000000", "#2EC5AC"]
fig, axes = plt.subplots(nrows=8, ncols=3, figsize=plt.figaspect(2))
axes = axes.flatten()
axes_idxs = [
(0, 3, 6, 9),
(1, 4, 7, 10),
(2, 5, 8, 11),
(12, 15, 18, 21),
(13, 16, 19, 22),
(14, 17, 20, 23),
]
axes_list = [(axes[i], axes[j], axes[k], axes[l]) for (i, j, k, l) in axes_idxs]
for distribution, color, axes in zip(distributions, colors, axes_list):
name, X = distribution
X_train, X_test = train_test_split(X, test_size=0.5)
# perform power transforms and quantile transform
X_trans_bc = bc.fit(X_train).transform(X_test)
lmbda_bc = round(bc.lambdas_[0], 2)
X_trans_yj = yj.fit(X_train).transform(X_test)
lmbda_yj = round(yj.lambdas_[0], 2)
X_trans_qt = qt.fit(X_train).transform(X_test)
ax_original, ax_bc, ax_yj, ax_qt = axes
ax_original.hist(X_train, color=color, bins=BINS)
ax_original.set_title(name, fontsize=FONT_SIZE)
ax_original.tick_params(axis="both", which="major", labelsize=FONT_SIZE)
for ax, X_trans, meth_name, lmbda in zip(
(ax_bc, ax_yj, ax_qt),
(X_trans_bc, X_trans_yj, X_trans_qt),
("Box-Cox", "Yeo-Johnson", "Quantile transform"),
(lmbda_bc, lmbda_yj, None),
):
ax.hist(X_trans, color=color, bins=BINS)
title = "After {}".format(meth_name)
if lmbda is not None:
title += "\n$\\lambda$ = {}".format(lmbda)
ax.set_title(title, fontsize=FONT_SIZE)
ax.tick_params(axis="both", which="major", labelsize=FONT_SIZE)
ax.set_xlim([-3.5, 3.5])
plt.tight_layout()
plt.show()
|
manhhomienbienthuy/scikit-learn
|
examples/preprocessing/plot_map_data_to_normal.py
|
Python
|
bsd-3-clause
| 4,665
|
[
"Gaussian"
] |
6a11e70c05b5fbd481255b6aa21defd1438d325592c0da57bd9a0d1ffe0fdf4d
|
#!/usr/bin/env python
"""
dirac-my-great-script
This script prints out how great is it, shows raw queries and sets the
number of pings.
Usage:
dirac-my-great-script [option|cfgfile] <Arguments>
Arguments:
<service1> [<service2> ...]
"""
from DIRAC import S_OK, S_ERROR, gLogger, exit as DIRACExit
from DIRAC.Core.Base import Script
__RCSID__ = '$Id$'
cliParams = None
switchDict = None
class Params:
'''
Class holding the parameters raw and pingsToDo, and callbacks for their
respective switches.
'''
def __init__( self ):
self.raw = False
self.pingsToDo = 1
def setRawResult( self, value ):
self.raw = True
return S_OK()
def setNumOfPingsToDo( self, value ):
try:
self.pingsToDo = max( 1, int( value ) )
except ValueError:
return S_ERROR( "Number of pings to do has to be a number" )
return S_OK()
def registerSwitches():
'''
Registers all switches that can be used while calling the script from the
command line interface.
'''
#Some of the switches have associated a callback, defined on Params class.
cliParams = Params()
switches = [
( '', 'text=', 'Text to be printed' ),
( 'u', 'upper', 'Print text on upper case' ),
( 'r', 'showRaw', 'Show raw result from the query', cliParams.setRawResult ),
( 'p:', 'numPings=', 'Number of pings to do (by default 1)', cliParams.setNumOfPingsToDo )
]
# Register switches
for switch in switches:
Script.registerSwitch( *switch )
#Define a help message
Script.setUsageMessage( __doc__ )
def parseSwitches():
'''
Parse switches and positional arguments given to the script
'''
#Parse the command line and initialize DIRAC
Script.parseCommandLine( ignoreErrors = False )
#Get the list of services
servicesList = Script.getPositionalArgs()
gLogger.info( 'This is the servicesList %s:' % servicesList )
# Gets the rest of the
switches = dict( Script.getUnprocessedSwitches() )
gLogger.debug( "The switches used are:" )
map( gLogger.debug, switches.iteritems() )
switches[ 'servicesList' ] = servicesList
return switches
def main():
'''
This is the script main method, which will hold all the logic.
'''
# let's do something
if not len( switchDict[ 'servicesList' ] ):
gLogger.error( 'No services defined' )
DIRACExit( 1 )
gLogger.notice( 'We are done' )
if __name__ == "__main__":
# Script initialization
registerSwitches()
switchDict = parseSwitches()
#Import the required DIRAC modules
from DIRAC.Interfaces.API.Dirac import Dirac
# Run the script
main()
# Bye
DIRACExit( 0 )
|
DIRACGrid/DIRACDocs
|
source/DeveloperGuide/AddingNewComponents/DevelopingCommands/dirac-my-great-script.py
|
Python
|
gpl-3.0
| 2,711
|
[
"DIRAC"
] |
06befd9ee2f9e0b201f852e24700bdca4e5e126cd6dfc3b78968ed478ce6eab6
|
../../../../../../../share/pyshared/orca/scripts/apps/notify-osd/script.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/notify-osd/script.py
|
Python
|
gpl-3.0
| 74
|
[
"ORCA"
] |
b663627d7ef9cf2f45099cfa4c71e627a66757205383fd802a129155658b617a
|
from collections import OrderedDict
from scipy.signal.signaltools import convolve2d
from scipy.interpolate.interpolate import interp1d
from scipy.optimize import nnls
from numpy import power, max, square, ones, arange, exp, zeros, transpose, diag, linalg, dot, outer, empty, ceil
# Reddening law from CCM89 # TODO Replace this methodology to an Import of Pyneb
def CCM89_Bal07(Rv, wave):
x = 1e4 / wave # Assuming wavelength is in Amstrongs
ax = zeros(len(wave))
bx = zeros(len(wave))
idcs = x > 1.1
y = (x[idcs] - 1.82)
ax[idcs] = 1 + 0.17699 * y - 0.50447 * y ** 2 - 0.02427 * y ** 3 + 0.72085 * y ** 4 + 0.01979 * y ** 5 - 0.77530 * y ** 6 + 0.32999 * y ** 7
bx[idcs] = 1. * y + 2.28305 * y ** 2 + 1.07233 * y ** 3 - 5.38434 * y ** 4 - 0.62251 * y ** 5 + 5.30260 * y ** 6 - 2.09002 * y ** 7
ax[~idcs] = 0.574 * x[~idcs] ** 1.61
bx[~idcs] = -0.527 * x[~idcs] ** 1.61
Xx = ax + bx / Rv # WARNING better to check this definition
return Xx
class SspFitter():
def __init__(self):
self.ssp_conf_dict = OrderedDict()
def physical_SED_model(self, bases_wave_rest, obs_wave, bases_flux, Av_star, z_star, sigma_star, Rv_coeff=3.4):
# Calculate wavelength at object z
wave_z = bases_wave_rest * (1 + z_star)
# Kernel matrix
box = int(ceil(max(3 * sigma_star)))
kernel_len = 2 * box + 1
kernel_range = arange(0, 2 * box + 1)
kernel = empty((1, kernel_len))
# Filling gaussian values (the norm factor is the sum of the gaussian)
kernel[0, :] = exp(-0.5 * (square((kernel_range - box) / sigma_star)))
kernel /= sum(kernel[0, :])
# Convove bases with respect to kernel for dispersion velocity calculation
basesGridConvolved = convolve2d(bases_flux, kernel, mode='same', boundary='symm')
# Interpolate bases to wavelength ranges
basesGridInterp = (interp1d(wave_z, basesGridConvolved, axis=1, bounds_error=True)(obs_wave)).T
# Generate final flux model including reddening
Av_vector = Av_star * ones(basesGridInterp.shape[1])
obs_wave_resam_rest = obs_wave / (1 + z_star)
Xx_redd = CCM89_Bal07(Rv_coeff, obs_wave_resam_rest)
dust_attenuation = power(10, -0.4 * outer(Xx_redd, Av_vector))
bases_grid_redd = basesGridInterp * dust_attenuation
return bases_grid_redd
def ssp_fitting(self, ssp_grid_masked, obs_flux_masked):
optimize_result = nnls(ssp_grid_masked, obs_flux_masked)
return optimize_result[0]
def linfit1d(self, obsFlux_norm, obsFlux_mean, basesFlux, weight):
nx, ny = basesFlux.shape
# Case where the number of pixels is smaller than the number of bases
if nx < ny:
basesFlux = transpose(basesFlux)
nx = ny
A = basesFlux
B = obsFlux_norm
# Weight definition #WARNING: Do we need to use the diag?
if weight.shape[0] == nx:
weight = diag(weight)
A = dot(weight, A)
B = dot(weight, transpose(B))
else:
B = transpose(B)
coeffs_0 = dot(linalg.inv(dot(A.T, A)), dot(A.T, B)) * obsFlux_mean
return coeffs_0
|
Delosari/dazer
|
bin/lib/Astro_Libraries/spectrum_fitting/starContinuum_functions.py
|
Python
|
mit
| 3,244
|
[
"Gaussian"
] |
07e58c49a5961fc0e4a34218e7d82b20ab08e8725b87d80cd13d94c1e032a966
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Akamai Technologies
# Author: Daniel Garcia
"""
Usage:
c2e [options]
c2e -h | --help
c2e --version
Options:
-h, --help Print this help
--version Show version number
-v, --verbose Print verbose output
-d, --dry Don't produce source code
-C DIR, --codec-dir DIR Directory of codecs [default: ./codecs]
-T DIR, --template-dir DIR Directory of templates [default: ./templates]
-l LANG, --language LANG Language to output in
c2e will search DIR for codec files ending in .c2e
CODECS:
The top object TARGET defines the targt of a codec (html, css, etc)
The main top object is RULES, it contains an ordered list of rules
A rule is an object with a left part (called a guard) and
a right part (called an emitter)
A character in a string that is being encoded is called a candidate
If a candidate matches a guard then the candidate is passed to the
corresponding to emitter which produces characters for the output string
GUARDS can be specified in two ways:
- a single character which will match that character
- or a range of characters which will match any character within the range
inclusively. Ranges have the form (a-z) where a and z are characters
Characters (codepoints really) can be specified in three ways:
- by literal character
- by codepoint in the form U+HHHH if the codepoint is in the
basic multilingual plane
- or by codepoint in the form U+HHHHHH
where H are hex digits
for example:
"a", "U+0061", and "U+000061" are equivalent
EMITTERS can be specified in two ways:
- a string literal which will produce that string
- a named emitter (in the form {emitter: "EMITTER-NAME"})
c2e provides four builtin named emitters:
- DEC: which emits the decimal representation of a codepoint
- HEX: which emits the hexadecimal representation of a codepoint
- IDENTITY: which emits its input
- NOP: which emits nothing
New emitters can be defined as an ordered list of other emitters
where the candidate will be passed to each emitter and the
results concatenated
All top objects besides TARGET, RULES, and DEFAULT-EMITTER
are assumed to be emitter definitions
DEFAULT-EMITTER is a special emitter that will be used when
a candidate does not match a guard
If not defined DEFAULT-EMITTER defaults to the NOP emitter
"""
import os
import time
import re
import unicodedata
from docopt import docopt
from clint.textui import progress, colored, indent, puts, columns, STDOUT, STDERR
from c2e_cog import C2Ecog
from c2e_codec import *
from codec2ast import AstFormatter
class ast2str(ast.NodeVisitor):
def __init__(self, node):
self.out = ''
self.indent = 0
self.visit(node)
def visit_If(self, node):
if len(self.out) > 0:
self.out += '\n'
self.out += ' '*(self.indent-1) + colored.cyan('⤷ ') if self.indent != 0 else ''
self.indent += 1
self.out += colored.blue('IF (')
self.visit(node.condition)
self.out += colored.blue(') THEN ')
self.visit(node.iftrue)
self.out += colored.blue(' ELSE ')
self.visit(node.iffalse)
def visit_Candidate(self, node):
self.out += '{}'.format(colored.red('α'))
def visit_Codepoint(self, node):
cp = node.codepoint
if re.match(r'\s', cp):
if unicodedata.name(cp, False):
self.out += '\'{}\''.format(colored.white(unicodedata.name(cp)))
else:
self.out += CODEPOINT_FORMAT.format(ord(cp))
elif ord(cp) > 255:
self.out += CODEPOINT_FORMAT.format(ord(cp))
else:
self.out += '"{}"'.format(colored.white(cp))
def visit_Bool(self, node):
if node.value:
self.out += colored.green('True')
else:
self.out += colored.red('False')
def visit_Nop(self, node):
self.out += colored.red('nop')
def visit_Builtin(self, node):
self.out += '{}({})'.format(colored.yellow(node.builtin), colored.red('α'))
def visit_ConstantEmitter(self, node):
self.out += '{}({} {} \"{}\")'.format(colored.yellow('λ'), colored.red('α'), colored.yellow('↦'), colored.white(node.string))
def visit_EmitterList(self, node):
self.out += '['
for index, e in enumerate(node.emitters):
if index != 0:
self.out += colored.yellow(' ∙ ')
self.visit(e)
self.out += ']'
def visit_BinOp(self, node):
self.visit(node.operand1)
if node.operation is ast.BinOp.OPS.land:
self.out += ' {} '.format(colored.yellow('∧'))
elif node.operation is ast.BinOp.OPS.lor:
self.out += ' {} '.format(colored.yellow('∨'))
elif node.operation is ast.BinOp.OPS.eq:
self.out += ' {} '.format(colored.yellow('=='))
elif node.operation is ast.BinOp.OPS.lt:
self.out += ' {} '.format(colored.yellow('<'))
elif node.operation is ast.BinOp.OPS.gt:
self.out += ' {} '.format(colored.yellow('>'))
elif node.operation is ast.BinOp.OPS.lte:
self.out += ' {} '.format(colored.yellow('≤'))
elif node.operation is ast.BinOp.OPS.gte:
self.out += ' {} '.format(colored.yellow('≥'))
self.visit(node.operand2)
def __str__(self):
return self.out
def main():
CODEC_EXT = '.c2e'
C2E_VERSION = 'C2E 0.1'
# get command line arguements
global args, verbose
args = docopt(__doc__, version=C2E_VERSION, options_first=True)
verbose = args['--verbose']
# print(args)
# print(); print()
# verify --codec-dir and --template-dir exist
if not os.path.isdir(args['--codec-dir']):
puts('Error: {dir} is not a directory'.format(dir=colored.red(args['--codec-dir'])), stream=STDERR)
exit(1)
if not os.path.isdir(args['--template-dir']):
puts('Error: {dir} is not a directory'.format(dir=colored.red(args['--template-dir'])), stream=STDERR)
exit(1)
# verify output language
if args['--language']:
path = '{}/{}'.format(args['--template-dir'], args['--language'])
if not os.path.isdir(path):
puts('Error: the template dir ({dir}) has no subdirectory named {lang}'.format(dir=colored.red(args['--template-dir']), lang=colored.red(args['--language']), stream=STDERR))
exit(1)
# enumerate codecs
codec_paths = []
with progress.Bar(label=colored.green('traversing {}: '.format(args['--codec-dir'])), expected_size=len(os.listdir(args['--codec-dir'])), hide=not verbose) as bar:
for i, codec_file in enumerate(os.listdir(args['--codec-dir'])):
if verbose: time.sleep(.1)
bar.show(i+1)
path = '{0}/{1}'.format(args['--codec-dir'], codec_file)
if codec_file.endswith(CODEC_EXT) and os.path.isfile(path):
codec_paths.append(path)
if verbose:
puts(colored.green('\nfound {} codec(s)'.format(len(codec_paths))), stream=STDERR)
with indent(3, quote=colored.blue(' •')):
for path in codec_paths:
puts(path, stream=STDERR)
# construct encoder
encoder = Encoder()
if verbose:
puts(colored.green('\nparsing codecs:'), stream=STDERR)
for path in codec_paths:
c = parseCodec(path)
if verbose:
puts('{} {} {}'.format(colored.blue(' •'), path, colored.green('✔') if c else colored.red('✘')), stream=STDERR)
if verbose and c:
with indent(3):
puts(colored.green('target: ') + c.target, stream=STDERR)
puts(colored.green('emitters: '), stream=STDERR)
with indent(3):
for e in c.emitters:
puts('{}({})'.format(colored.yellow(e), colored.red('α')), stream=STDERR)
puts(colored.green('syntax tree: '), stream=STDERR)
with indent(4, quote=colored.cyan(" ┆")):
puts(str(ast2str(c.ast)), stream=STDERR)
puts('', stream=STDERR)
encoder.add(c)
# cog = C2Ecog(encoder, codec_template='templates/Java/codec.template')
# puts(cog('templates/Java/Encode.java'))
if not args['--dry']:
cog = C2Ecog(encoder)
puts(cog('templates/Java/Encode.java'))
if __name__ == '__main__':
main()
|
dagarcia-akamai/c2e
|
c2e/c2e.py
|
Python
|
bsd-3-clause
| 8,530
|
[
"VisIt"
] |
0d41015bf40b95c13424fa570ddb8c5b32d5656a63ca52d26a8f3d9867b21256
|
#!/usr/bin/env python
import sys
import os
import unittest
import glob
import shutil
import vtk
import time
from PyQt5 import QtWidgets
import chigger
from peacock.ExodusViewer.plugins.VTKWindowPlugin import main
from peacock.utils import Testing
class TestVTKWindowPlugin(Testing.PeacockImageTestCase):
"""
Testing for VTKWindowPlugin
"""
#: QApplication: The main App for QT, this must be static to work correctly.
qapp = QtWidgets.QApplication(sys.argv)
#: str: The filename to load.
_filename = Testing.get_chigger_input('mug_blocks_out.e')
#: str: Temporary filename for testing delayed load (see testFilename)
_temp_file = 'TestVTKWindowPlugin.e'
@classmethod
def setUpClass(cls):
super(TestVTKWindowPlugin, cls).setUpClass()
if os.path.exists(cls._temp_file):
os.remove(cls._temp_file)
def setUp(self):
"""
Loads an Exodus file in the VTKWindowWidget object using a structure similar to the ExodusViewer widget.
"""
self.sleepIfSlow()
self._widget, self._window = main(size=[600,600])
def testInitialize(self):
"""
Test the result open and are initialized.
"""
self._window.onFileChanged(self._filename)
self._window.onResultOptionsChanged({'variable':'diffused'})
self._window.onWindowRequiresUpdate()
self.assertTrue(self._window._initialized)
self.assertImage('testInitialize.png', allowed=0.98)
def testCamera(self):
"""
Test that the camera can be modified.
"""
camera = vtk.vtkCamera()
camera.SetViewUp(-0.7786, 0.2277, 0.5847)
camera.SetPosition(9.2960, -0.4218, 12.6685)
camera.SetFocalPoint(0.0000, 0.0000, 0.1250)
self._window.onFileChanged(self._filename)
self._window.onCameraChanged(camera)
self._window.onResultOptionsChanged({'variable':'diffused'})
self._window.onWindowRequiresUpdate()
self.assertEqual(camera.GetViewUp(), self._window._result.getVTKRenderer().GetActiveCamera().GetViewUp())
self.assertImage('testCamera.png')
def testReader(self):
"""
Test that reader settings may be changed.
"""
self._window.onFileChanged(self._filename)
self._window._reader.setOptions(timestep=1)
self._window.onResultOptionsChanged({'variable':'diffused'})
self._window.onWindowRequiresUpdate()
tdata = self._window._reader.getTimeData()
self.assertEqual(1, tdata.timestep)
self.assertEqual(0.1, tdata.time)
self.assertImage('testReader.png')
def testResult(self):
"""
Test that result settings may be changed.
"""
self._window.onFileChanged(self._filename)
self._window._result.setOptions(cmap='viridis')
self._window.onResultOptionsChanged({'variable':'diffused'})
self._window.onWindowRequiresUpdate()
self.assertEqual('viridis', self._window._result.getOption('cmap'))
self.assertImage('testResult.png')
def testHighlight(self):
"""
Test the highlighting is working.
"""
self._window.onFileChanged(self._filename)
self._window.onResultOptionsChanged({'variable':'diffused'})
self._window.onWindowRequiresUpdate()
self._window.onHighlight(block=['76'])
self.assertImage('testHighlightOn.png')
self._window.onHighlight()
self.assertImage('testHighlightOff.png')
def testFilename(self):
"""
Tests that non-existent files, new files, and removed files do not break window.
"""
# The source and destination filenames
filename = Testing.get_chigger_input('step10_micro_out.e')
newfile = self._temp_file
# Remove any existing files
for fname in glob.glob(newfile + '*'):
os.remove(fname)
# Supply a non-existent file
self._window.onFileChanged(newfile)
self.assertImage('testFilenameEmpty.png')
# Create the files and simulate the initialization timer timeout call
shutil.copy(filename, newfile)
for i in range(2, 6):
ext = '-s00' + str(i)
shutil.copy(filename + ext, newfile + ext)
time.sleep(1.5) # sleep so modified times differ
self._window._timers["initialize"].timeout.emit()
self._window.onResultOptionsChanged({'variable':'phi'})
self._window.onWindowRequiresUpdate()
self.assertImage('testFilenameCreated.png', allowed=0.98)
# Add new files and simulate the update timer timeout call
for i in range(6, 10):
ext = '-s00' + str(i)
shutil.copy(filename + ext, newfile + ext)
self._window.onWindowRequiresUpdate()
self.assertImage('testFilenameUpdated.png', allowed=0.98)
# Remove the files and simulate a call to the update timer
for fname in glob.glob(newfile + '*'):
os.remove(fname)
self._window.onWindowRequiresUpdate()
self.assertImage('testFilenameEmpty.png') # the window should be empty again
def testIteractorStyle(self):
"""
Tests interaction style matches the mesh dimensionality
"""
self._window.onFileChanged(self._filename)
self.assertIsNone(self._window._window.getOption('style'))
self.assertIsInstance(self._window._window.getVTKInteractor().GetInteractorStyle(), chigger.base.KeyPressInteractorStyle)
self._window.onFileChanged(Testing.get_chigger_input('displace.e'))
self.assertIsNone(self._window._window.getOption('style'))
self.assertIsInstance(self._window._window.getVTKInteractor().GetInteractorStyle(), vtk.vtkInteractorStyleImage)
def testNoFile(self):
"""
Test that window shows up with peacock image.
"""
self._window.onFileChanged()
self.assertImage('testPeacockMessage.png')
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
|
liuwenf/moose
|
python/peacock/tests/exodus_tab/test_VTKWindowPlugin.py
|
Python
|
lgpl-2.1
| 6,082
|
[
"VTK"
] |
bc4d0d505a8c2e792c60954b8b43e6409bab6d9b3e04c250196eb9aa233e6f45
|
# coding: utf-8
"""
Generators of Firework workflows
"""
import logging
import sys
import abc
import os
import six
import datetime
import json
import numpy as np
import abipy.abio.input_tags as atags
import traceback
from collections import defaultdict
from abipy.abio.factories import HybridOneShotFromGsFactory, ScfFactory, IoncellRelaxFromGsFactory
from abipy.abio.factories import PhononsFromGsFactory, ScfForPhononsFactory, InputFactory
from abipy.abio.factories import ion_ioncell_relax_input, scf_input, dte_from_gsinput, scf_for_phonons
from abipy.abio.factories import dfpt_from_gsinput
from abipy.abio.inputs import AbinitInput, AnaddbInput
from abipy.abio.abivars_db import get_abinit_variables
from abipy.dfpt.anaddbnc import AnaddbNcFile
from abipy.flowtk.abiobjects import KSampling
from fireworks.core.firework import Firework, Workflow, FWorker
from fireworks.core.launchpad import LaunchPad
from monty.json import MontyDecoder
from abiflows.core.mastermind_abc import ControlProcedure
from abiflows.core.controllers import AbinitController, WalltimeController, MemoryController
from abiflows.fireworks.tasks.abinit_tasks import AbiFireTask, ScfFWTask, RelaxFWTask, NscfFWTask, PhononTask, BecTask, DteTask
from abiflows.fireworks.tasks.abinit_tasks_src import AbinitSetupTask, AbinitRunTask, AbinitControlTask
from abiflows.fireworks.tasks.abinit_tasks_src import ScfTaskHelper, NscfTaskHelper, DdkTaskHelper
from abiflows.fireworks.tasks.abinit_tasks_src import RelaxTaskHelper
from abiflows.fireworks.tasks.abinit_tasks_src import GeneratePiezoElasticFlowFWSRCAbinitTask
from abiflows.fireworks.tasks.abinit_tasks_src import Cut3DAbinitTask
from abiflows.fireworks.tasks.abinit_tasks_src import BaderTask
from abiflows.fireworks.tasks.abinit_tasks import HybridFWTask, RelaxDilatmxFWTask, GeneratePhononFlowFWAbinitTask
from abiflows.fireworks.tasks.abinit_tasks import AutoparalTask, DdeTask
from abiflows.fireworks.tasks.abinit_tasks import AnaDdbAbinitTask, StrainPertTask, DdkTask, MergeDdbAbinitTask
from abiflows.fireworks.tasks.abinit_tasks import NscfWfqFWTask
from abiflows.fireworks.tasks.src_tasks_abc import createSRCFireworks
from abiflows.fireworks.tasks.utility_tasks import FinalCleanUpTask, DatabaseInsertTask, MongoEngineDBInsertionTask
#from abiflows.fireworks.tasks.utility_tasks import createSRCFireworksOld
from abiflows.fireworks.utils.fw_utils import append_fw_to_wf, get_short_single_core_spec, links_dict_update
from abiflows.fireworks.utils.fw_utils import set_short_single_core_to_spec, get_last_completed_launch
from abiflows.fireworks.utils.fw_utils import get_time_report_for_wf, FWTaskManager
from abiflows.database.mongoengine.abinit_results import RelaxResult, PhononResult, DteResult, DfptResult
from abiflows.fireworks.utils.task_history import TaskEvent
# logging.basicConfig()
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stdout))
#TODO AbstractFWWorkflow should not be a subclass of Workflow, should be removed.
@six.add_metaclass(abc.ABCMeta)
class AbstractFWWorkflow(Workflow):
"""
Abstract class of the workflow generators.
Subclasses should define a "wf" attribute at the end of the init, containing an instance of a fireworks Workflow.
Normally subclasses should alse define attributes containing the different Fireworks that have been generated.
"""
def add_to_db(self, lpad=None):
"""
Add the workflows to the fireworks DB.
Args:
lpad: A LaunchPad. If None the LaunchPad will be generated with auto_load.
Returns:
dict: mapping between old and new Firework ids
"""
if not lpad:
lpad = LaunchPad.auto_load()
return lpad.add_wf(self.wf)
def append_fw(self, fw, short_single_spec=False):
"""
Append a Firework at the end of the workflow.
Args:
fw: A Firework object
short_single_spec: if True the _queueadapter parameters will be set to a single core for a short time.
"""
if short_single_spec:
fw.spec.update(self.set_short_single_core_to_spec())
append_fw_to_wf(fw, self.wf)
@staticmethod
def set_short_single_core_to_spec(spec=None, master_mem_overhead=0):
"""
Sets the _queueadapter parameter in the spec for a single process job with a short run time.
Args:
spec: A spec. If None a new dictionary will be created.
master_mem_overhead:
Returns:
The spec with the _queueadapter parameters set.
"""
if spec is None:
spec = {}
spec = dict(spec)
qadapter_spec = get_short_single_core_spec(master_mem_overhead=master_mem_overhead)
spec['mpi_ncpus'] = 1
spec['_queueadapter'] = qadapter_spec
return spec
def add_mongoengine_db_insertion(self, db_data):
"""
Adds a Firework containing a task for the insertion of the results in the database, based on mongoengine.
Args:
db_data: A DatabaseData with the connection information to the database.
"""
fw = Firework([MongoEngineDBInsertionTask(db_data=db_data)],
spec={'_add_launchpad_and_fw_id': True}, name="DB_insertion")
self.append_fw(fw, short_single_spec=True)
def add_final_cleanup(self, out_exts=None, additional_spec=None):
"""
Adds a Firework with a FinalCleanUpTask.
_queueadapter parameter in the spec are set for a single process job with a short run time
Args:
out_exts: list of extensions that should be cleaned
additional_spec: dict with additional keys to be added to the spec
"""
if out_exts is None:
out_exts = ["WFK", "1WF", "DEN"]
spec = self.set_short_single_core_to_spec()
if additional_spec:
spec.update(additional_spec)
# high priority
#TODO improve the handling of the priorities
spec['_priority'] = 100
spec['_add_launchpad_and_fw_id'] = True
cleanup_fw = Firework(FinalCleanUpTask(out_exts=out_exts), spec=spec,
name=("final_cleanup")[:15])
append_fw_to_wf(cleanup_fw, self.wf)
def add_db_insert_and_cleanup(self, mongo_database, out_exts=None, insertion_data=None,
criteria=None):
"""
Appends a Firework with a DatabaseInsertTask and a FinalCleanUpTask. N.B. this does not add a
MongoEngineDBInsertionTask.
Args:
mongo_database: a MongoDatabase object describing the connection to the database.
out_exts: list of extensions that should be cleaned.
insertion_data: dictionary describing the functions that should be called to insert the data.
criteria: identifies the entry that should be updated. If None a new entry will be created.
"""
if out_exts is None:
out_exts = ["WFK", "1WF", "DEN"]
if insertion_data is None:
insertion_data = {'structure': 'get_final_structure_and_history'}
spec = self.set_short_single_core_to_spec()
spec['mongo_database'] = mongo_database.as_dict()
spec['_add_launchpad_and_fw_id'] = True
insert_and_cleanup_fw = Firework([DatabaseInsertTask(insertion_data=insertion_data, criteria=criteria),
FinalCleanUpTask(out_exts=out_exts)],
spec=spec,
name=(self.wf.name+"_insclnup")[:15])
append_fw_to_wf(insert_and_cleanup_fw, self.wf)
def add_cut3d_den_to_cube_task(self, den_task_type_source=None):
"""
Appends a FW with a task to convert a DEN file to cube format using cut3d.
Args:
den_task_type_source: Option to be i.
"""
spec = self.set_short_single_core_to_spec()
spec['_add_launchpad_and_fw_id'] = True
if den_task_type_source is None:
cut3d_fw = Firework(Cut3DAbinitTask.den_to_cube(deps=['DEN']), spec=spec,
name=(self.wf.name+"_cut3d")[:15])
else:
raise NotImplementedError('Cut3D from specified task_type source not yet implemented')
append_fw_to_wf(cut3d_fw, self.wf)
def add_bader_task(self, den_task_type_source=None):
"""
Appends a FW with a task to calculate the bader charges.
Args:
den_task_type_source: The task type from which the DEN file should be taken. If None the default is 'scf'.
"""
spec = self.set_short_single_core_to_spec()
spec['_add_launchpad_and_fw_id'] = True
if den_task_type_source is None:
den_task_type_source = 'scf'
# Find the Firework that should compute the DEN file
den_fw = None
control_fw_id = None
for fw_id, fw in self.wf.id_fw.items():
for task in fw.tasks:
if isinstance(task, AbinitSetupTask):
if task.task_type == den_task_type_source:
if den_fw is None:
den_fw = fw
if not task.pass_input:
raise ValueError('Abinit task with task_type "{}" should pass the input to the '
'Bader task'.format(den_task_type_source))
den_fw_id = fw_id
if len(self.wf.links[den_fw_id]) != 1:
raise ValueError('AbinitSetupTask has {:d} children while it should have exactly '
'one'.format(len(self.wf.links[den_fw_id])))
run_fw_id = self.wf.links[den_fw_id][0]
if len(self.wf.links[run_fw_id]) != 1:
raise ValueError('AbinitRunTask has {:d} children while it should have exactly '
'one'.format(len(self.wf.links[run_fw_id])))
control_fw_id = self.wf.links[run_fw_id][0]
else:
raise ValueError('Found more than one Firework with Abinit '
'task_type "{}".'.format(den_task_type_source))
if den_fw is None:
raise ValueError('Firework with Abinit task_type "{}" not found.'.format(den_task_type_source))
# # Set the pass_input variable of the task to True (needed to get the pseudo valence electrons)
# for task in den_fw.tasks:
# if isinstance(task, AbinitSetupTask):
# task.pass_input = True
spec['den_task_type_source'] = den_task_type_source
cut3d_task = Cut3DAbinitTask.den_to_cube(deps=['DEN'])
bader_task = BaderTask()
bader_fw = Firework([cut3d_task, bader_task], spec=spec,
name=("bader")[:15])
self.wf.append_wf(new_wf=Workflow.from_Firework(bader_fw), fw_ids=[control_fw_id],
detour=False, pull_spec_mods=False)
@classmethod
def get_bader_charges(cls, wf):
# I dont think we need that here ...
# assert wf.metadata['workflow_class'] == self.workflow_class
# assert wf.metadata['workflow_module'] == self.workflow_module
final_fw_id = None
for fw_id, fw in wf.id_fw.items():
if fw.name == 'bader':
if not final_fw_id:
final_fw_id = fw_id
else:
raise ValueError('Multiple Fireworks found with name equal to "bader"')
if final_fw_id is None:
raise RuntimeError('Bader analysis not found ...')
myfw = wf.id_fw[final_fw_id]
#TODO add a check on the state of the launches
last_launch = (myfw.archived_launches + myfw.launches)[-1]
#TODO add a cycle to find the instance of AbiFireTask?
myfw.tasks[-1].setup_rundir(rundir=last_launch.launch_dir)
bader_data = myfw.tasks[-1].get_bader_data()
if len(myfw.spec['previous_fws'][myfw.spec['den_task_type_source']]) != 1:
raise ValueError('Found "{:d}" previous fws with task_type "{}" while there should be only '
'one.'.format(len(myfw.spec['previous_fws'][myfw.spec['den_task_type_source']]),
myfw.spec['den_task_type_source']))
abinit_input = myfw.spec['previous_fws'][myfw.spec['den_task_type_source']][0]['input']
psp_valences = abinit_input.valence_electrons_per_atom
bader_charges = [atom['charge'] for atom in bader_data]
bader_charges_transfer = [bader_charges[iatom]-psp_valences[iatom] for iatom in range(len(psp_valences))]
return {'bader_analysis': {'pseudo_valence_charges': psp_valences,
'bader_charges': bader_charges,
'bader_charges_transfer': bader_charges_transfer}}
def add_metadata(self, structure=None, additional_metadata=None):
if additional_metadata is None:
additional_metadata = {}
metadata = dict(wf_type=self.__class__.__name__)
if structure:
composition = structure.composition
metadata['nsites'] = len(structure)
metadata['elements'] = [el.symbol for el in composition.elements]
metadata['reduced_formula'] = composition.reduced_formula
metadata.update(additional_metadata)
self.wf.metadata.update(metadata)
def get_reduced_formula(self, input):
"""
Gets the reduced formula of the structure used in the workflow.
Args:
input: An |AbinitInput| object or a |Structure|
"""
structure = None
try:
if isinstance(input, AbinitInput):
structure = input.structure
elif 'structure' in input.kwargs:
structure = input.kwargs['structure']
elif isinstance(input, InputFactory):
try:
structure = input.args[0]
except IndexError:
structure = input.kwargs.get("structure")
except Exception:
logger.warning("Couldn't get the structure from the input: {}".format(traceback.format_exc()))
return structure.composition.reduced_formula if structure else ""
def add_spec_to_all_fws(self, spec):
"""
Updates the spec of all the Fireworks with the input dictionary
"""
for fw in self.wf.fws:
fw.spec.update(spec)
def set_preserve_fworker(self):
"""
Sets the _preserve_fworker key in the spec of all the Fireworks
"""
self.add_spec_to_all_fws(dict(_preserve_fworker=True))
def fix_fworker(self, name=None):
"""
Sets the _fworker key to the name specified and adds _preserve_fworker to the spec of all the fws.
If name is None the name is taken from the FWorker loaded with FWorker.auto_load (the default being the
file ~/.fireworks/my_fworker.yaml)
"""
if name is None:
name = FWorker.auto_load().name
self.add_spec_to_all_fws(dict(_preserve_fworker=True, _fworker=name))
class InputFWWorkflow(AbstractFWWorkflow):
"""
Generator of a fireworks workflow with a single abinit task based on a generic |AbinitInput|.
"""
def __init__(self, abiinput, task_type=AbiFireTask, autoparal=False, spec=None, initialization_info=None):
"""
Args:
abiinput: an |AbinitInput| object
task_type: the class of the task created
autoparal: if True autoparal will be used at runtime to optimize the number of processes.
spec: a dict with additional spec for the Firework.
initialization_info: a dict defining additional information about the initialization of the workflow.
"""
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
abitask = task_type(abiinput, is_autoparal=autoparal)
spec = dict(spec)
spec['initialization_info'] = initialization_info
if autoparal:
spec = self.set_short_single_core_to_spec(spec)
self.fw = Firework(abitask, spec=spec)
self.wf = Workflow([self.fw])
class ScfFWWorkflow(AbstractFWWorkflow):
"""
Generator of a fireworks workflow with a single abinit task performing a SCF calculation
"""
def __init__(self, abiinput, autoparal=False, spec=None, initialization_info=None):
"""
Args:
abiinput: an |AbinitInput| object for a SCF calculation.
autoparal: if True autoparal will be used at runtime to optimize the number of processes.
spec: a dict with additional spec for the Firework.
initialization_info: a dict defining additional information about the initialization of the workflow.
"""
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
abitask = ScfFWTask(abiinput, is_autoparal=autoparal)
spec = dict(spec)
spec['initialization_info'] = initialization_info
start_task_index = 1
if autoparal:
spec = self.set_short_single_core_to_spec(spec)
start_task_index = 'autoparal'
spec['wf_task_index'] = 'scf_' + str(start_task_index)
self.scf_fw = Firework(abitask, spec=spec)
self.wf = Workflow([self.scf_fw])
@classmethod
def from_factory(cls, structure, pseudos, kppa=None, ecut=None, pawecutdg=None, nband=None, accuracy="normal",
spin_mode="polarized", smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None,
shift_mode="Monkhorst-Pack", extra_abivars=None, decorators=None, autoparal=False, spec=None,
initialization_info=None):
"""
Creates an instance of ScfFWWorkflow using the scf_input factory function. See the description
of the factory for the definition of the arguments.
"""
if extra_abivars is None:
extra_abivars = {}
if decorators is None:
decorators = []
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
abiinput = scf_input(structure, pseudos, kppa=kppa, ecut=ecut, pawecutdg=pawecutdg, nband=nband,
accuracy=accuracy, spin_mode=spin_mode, smearing=smearing, charge=charge,
scf_algorithm=scf_algorithm, shift_mode=shift_mode)
abiinput.set_vars(extra_abivars)
for d in decorators:
d(abiinput)
return cls(abiinput, autoparal=autoparal, spec=spec, initialization_info=initialization_info)
class ScfFWWorkflowSRC(AbstractFWWorkflow):
workflow_class = 'ScfFWWorkflowSRC'
workflow_module = 'abiflows.fireworks.workflows.abinit_workflows'
def __init__(self, abiinput, spec=None, initialization_info=None, pass_input=False):
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
scf_helper = ScfTaskHelper()
control_procedure = ControlProcedure(controllers=[AbinitController.from_helper(scf_helper),
WalltimeController(), MemoryController()])
setup_task = AbinitSetupTask(abiinput=abiinput, task_helper=scf_helper, pass_input=pass_input)
run_task = AbinitRunTask(control_procedure=control_procedure, task_helper=scf_helper)
control_task = AbinitControlTask(control_procedure=control_procedure, task_helper=scf_helper)
scf_fws = createSRCFireworks(setup_task=setup_task, run_task=run_task, control_task=control_task, spec=spec,
task_index='scf', initialization_info=initialization_info)
self.wf = Workflow(fireworks=scf_fws['fws'], links_dict=scf_fws['links_dict'],
metadata={'workflow_class': self.workflow_class,
'workflow_module': self.workflow_module})
@classmethod
def from_factory(cls, structure, pseudos, kppa=None, ecut=None, pawecutdg=None, nband=None, accuracy="normal",
spin_mode="polarized", smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None,
shift_mode="Monkhorst-Pack", extra_abivars=None, decorators=None, autoparal=False, spec=None,
initialization_info=None, pass_input=False):
if extra_abivars is None:
extra_abivars = {}
if decorators is None:
decorators = []
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
abiinput = scf_input(structure, pseudos, kppa=kppa, ecut=ecut, pawecutdg=pawecutdg, nband=nband,
accuracy=accuracy, spin_mode=spin_mode, smearing=smearing, charge=charge,
scf_algorithm=scf_algorithm, shift_mode=shift_mode)
abiinput.set_vars(extra_abivars)
for d in decorators:
d(abiinput)
return cls(abiinput, spec=spec, initialization_info=initialization_info, pass_input=pass_input)
class RelaxFWWorkflow(AbstractFWWorkflow):
"""
Generator of a firework workflow performing a relax of structure (atomic positions, cell shape and size).
Can converge the dilatmx during the cell relaxtion up to a custom value.
"""
workflow_class = 'RelaxFWWorkflow'
workflow_module = 'abiflows.fireworks.workflows.abinit_workflows'
def __init__(self, ion_input, ioncell_input, autoparal=False, spec=None, initialization_info=None,
target_dilatmx=None, skip_ion=False):
"""
Args:
ion_input: an AbinitInput for the relax of the atomic position calculation.
ioncell_input: an AbinitInput for the relax of both atomic position and cell size and shape.
autoparal: if True autoparal will be used at runtime to optimize the number of processes.
spec: a dict with additional spec for the Firework.
initialization_info: a dict defining additional information about the initialization of the workflow.
target_dilatmx: target value for the dilatmx. The workflow will progressively reduce the value of
dilatmx and relax the structure again until this value is reached.
skip_ion: if True the first step with relax of the atomic position will not be performed.
"""
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
start_task_index = 1
spec = dict(spec)
spec['initialization_info'] = initialization_info
if autoparal:
spec = self.set_short_single_core_to_spec(spec)
start_task_index = 'autoparal'
fws = []
deps = {}
if not skip_ion:
rf = self.get_reduced_formula(ion_input)
spec['wf_task_index'] = 'ion_' + str(start_task_index)
ion_task = RelaxFWTask(ion_input, is_autoparal=autoparal)
self.ion_fw = Firework(ion_task, spec=spec, name=rf + "_" + "relax_ion")
deps = {ion_task.task_type: '@structure'}
fws.append(self.ion_fw)
else:
rf = self.get_reduced_formula(ioncell_input)
spec['wf_task_index'] = 'ioncell_' + str(start_task_index)
if target_dilatmx:
ioncell_task = RelaxDilatmxFWTask(ioncell_input, is_autoparal=autoparal, target_dilatmx=target_dilatmx,
deps=deps)
else:
ioncell_task = RelaxFWTask(ioncell_input, is_autoparal=autoparal, deps=deps)
self.ioncell_fw = Firework(ioncell_task, spec=spec, name=rf + "_" + "relax_ioncell")
fws.append(self.ioncell_fw)
fw_deps = None if skip_ion else {self.ion_fw: [self.ioncell_fw]}
self.wf = Workflow(fws, fw_deps,
metadata={'workflow_class': self.workflow_class,
'workflow_module': self.workflow_module})
@classmethod
def get_final_structure_and_history(cls, wf):
assert wf.metadata['workflow_class'] == cls.workflow_class
assert wf.metadata['workflow_module'] == cls.workflow_module
ioncell = -1
final_fw_id = None
for fw_id, fw in wf.id_fw.items():
if 'wf_task_index' in fw.spec and fw.spec['wf_task_index'][:8] == 'ioncell_':
try:
this_ioncell = int(fw.spec['wf_task_index'].split('_')[-1])
except ValueError:
# skip if the index is not an int
continue
if this_ioncell > ioncell:
ioncell = this_ioncell
final_fw_id = fw_id
if final_fw_id is None:
raise RuntimeError('Final structure not found ...')
myfw = wf.id_fw[final_fw_id]
#TODO add a check on the state of the launches
last_launch = (myfw.archived_launches + myfw.launches)[-1]
#TODO add a cycle to find the instance of AbiFireTask?
myfw.tasks[-1].set_workdir(workdir=last_launch.launch_dir)
structure = myfw.tasks[-1].get_final_structure()
with open(os.path.join(last_launch.launch_dir, 'history.json'), "rt") as fh:
history = json.load(fh, cls=MontyDecoder)
return {'structure': structure.as_dict(), 'history': history}
@classmethod
def get_runtime_secs(cls, wf):
assert wf.metadata['workflow_class'] == cls.workflow_class
assert wf.metadata['workflow_module'] == cls.workflow_module
time_secs = 0.0
for fw_id, fw in wf.id_fw.items():
if 'wf_task_index' in fw.spec:
if fw.spec['wf_task_index'][-9:] == 'autoparal':
time_secs += fw.launches[-1].runtime_secs
elif fw.spec['wf_task_index'][:4] == 'ion_':
time_secs += fw.launches[-1].runtime_secs * fw.spec['mpi_ncpus']
elif fw.spec['wf_task_index'][:8] == 'ioncell_':
time_secs += fw.launches[-1].runtime_secs * fw.spec['mpi_ncpus']
return time_secs
@classmethod
def get_mongoengine_results(cls, wf):
"""
Generates the RelaxResult mongoengine document containing the results of the calculation.
The workflow should have been generated from this class and requires an open connection to the
fireworks database and access to the file system containing the calculations.
Args:
wf: the fireworks Workflow instance of the workflow.
Returns:
A RelaxResult document.
"""
assert wf.metadata['workflow_class'] == cls.workflow_class
assert wf.metadata['workflow_module'] == cls.workflow_module
ioncell_fws = [fw for fw in wf.fws if fw.spec.get('wf_task_index', '').startswith('ioncell_')
and not fw.spec.get('wf_task_index', '').endswith('autoparal')]
ioncell_fws.sort(key=lambda l: int(l.spec.get('wf_task_index', '0').split('_')[-1]))
last_ioncell_fw = ioncell_fws[-1]
last_ioncell_launch = get_last_completed_launch(last_ioncell_fw)
ion_fws = [fw for fw in wf.fws if fw.spec.get('wf_task_index', '').startswith('ion_')
and not fw.spec.get('wf_task_index', '').endswith('autoparal')]
ion_fws.sort(key=lambda l: int(l.spec.get('wf_task_index', '0').split('_')[-1]))
if ion_fws:
first_fw = ion_fws[0]
else:
first_fw = ioncell_fws[0]
relax_task = last_ioncell_fw.tasks[-1]
relax_task.set_workdir(workdir=last_ioncell_launch.launch_dir)
structure = relax_task.get_final_structure()
with open(os.path.join(last_ioncell_launch.launch_dir, 'history.json'), "rt") as fh:
history_ioncell = json.load(fh, cls=MontyDecoder)
document = RelaxResult()
document.abinit_output.structure = structure.as_dict()
document.set_material_data_from_structure(structure)
final_input = history_ioncell.get_events_by_types(TaskEvent.FINALIZED)[0].details['final_input']
document.abinit_input.last_input = final_input.as_dict()
document.abinit_input.set_abinit_basic_from_abinit_input(final_input)
# need to set the structure as the initial one
document.abinit_input.structure = first_fw.tasks[0].abiinput.structure.as_dict()
document.history = history_ioncell.as_dict()
document.set_dir_names_from_fws_wf(wf)
initialization_info = history_ioncell.get_events_by_types(TaskEvent.INITIALIZED)[0].details.get('initialization_info', {})
document.abinit_input.kppa = initialization_info.get('kppa', None)
document.mp_id = initialization_info.get('mp_id', None)
document.custom = initialization_info.get("custom", None)
document.abinit_input.pseudopotentials.set_pseudos_from_files_file(relax_task.files_file.path,
len(structure.composition.elements))
document.time_report = get_time_report_for_wf(wf).as_dict()
document.fw_id = last_ioncell_fw.fw_id
document.created_on = datetime.datetime.now()
document.modified_on = datetime.datetime.now()
with open(relax_task.gsr_path, "rb") as f:
document.abinit_output.gsr.put(f)
# first get all the file paths. If something goes wrong in this loop no file is left dangling in the db
hist_files_path = {}
for fw in ion_fws + ioncell_fws:
task_index = fw.spec.get('wf_task_index')
last_launch = get_last_completed_launch(fw)
task = fw.tasks[0]
task.set_workdir(workdir=last_launch.launch_dir)
hist_files_path[task_index] = task.hist_nc_path
# now save all the files in the db
#TODO I would prefer to avoid the import of mongoengine related objects here and delegate to some other specific module
#from abiflows.core.models import AbiGridFSProxy
# This is an alternative from importing the object explicitely. Still quite a dirty hack
proxy_class = RelaxResult.abinit_output.default.hist_files.field.proxy_class
collection_name = RelaxResult.abinit_output.default.hist_files.field.collection_name
hist_files = {}
for task_index, file_path in six.iteritems(hist_files_path):
with open(file_path, "rb") as f:
file_field = proxy_class(collection_name=collection_name)
file_field.put(f)
hist_files[task_index] = file_field
# read in binary for py3k compatibility with mongoengine
with open(relax_task.output_file.path, 'rb') as f:
document.abinit_output.outfile_ioncell.put(f)
document.abinit_output.hist_files = hist_files
return document
@classmethod
def from_factory(cls, structure, pseudos, kppa=None, nband=None, ecut=None, pawecutdg=None, accuracy="normal",
spin_mode="polarized", smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None,
extra_abivars=None, decorators=None, autoparal=False, spec=None, initialization_info=None,
target_dilatmx=None, skip_ion=False, shift_mode="Monkhorst-Pack"):
"""
Creates an instance of RelaxFWWorkflow using the ion_ioncell_relax_input factory function. See the description
of the factory for the definition of the arguments.
"""
if extra_abivars is None:
extra_abivars = {}
if decorators is None:
decorators = []
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
ion_ioncell_input = ion_ioncell_relax_input(structure=structure, pseudos=pseudos, kppa=kppa, nband=nband, ecut=ecut,
pawecutdg=pawecutdg, accuracy=accuracy, spin_mode=spin_mode,
smearing=smearing, charge=charge, scf_algorithm=scf_algorithm,
shift_mode=shift_mode)
ion_ioncell_input.set_vars(**extra_abivars)
for d in decorators:
ion_ioncell_input = d(ion_ioncell_input)
ion_input = ion_ioncell_input[0]
if skip_ion:
ioncell_input = ion_ioncell_input[1]
else:
ioncell_input = IoncellRelaxFromGsFactory(accuracy=accuracy, extra_abivars=extra_abivars,
decorators=decorators)
return cls(ion_input, ioncell_input, autoparal=autoparal, spec=spec, initialization_info=initialization_info,
target_dilatmx=target_dilatmx,skip_ion=skip_ion)
class RelaxFWWorkflowSRC(AbstractFWWorkflow):
workflow_class = 'RelaxFWWorkflowSRC'
workflow_module = 'abiflows.fireworks.workflows.abinit_workflows'
def __init__(self, ion_input, ioncell_input, spec=None, initialization_info=None, additional_controllers=None):
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
fws = []
links_dict = {}
if additional_controllers is None:
additional_controllers = [WalltimeController(), MemoryController()]
else:
additional_controllers = additional_controllers
#1. Relax run at fixed cell
relax_helper = RelaxTaskHelper()
relax_controllers = [AbinitController.from_helper(relax_helper)]
relax_controllers.extend(additional_controllers)
relax_control_procedure = ControlProcedure(controllers=relax_controllers)
setup_relax_ions_task = AbinitSetupTask(abiinput=ion_input, task_helper=relax_helper)
run_relax_ions_task = AbinitRunTask(control_procedure=relax_control_procedure, task_helper=relax_helper,
task_type='ion')
control_relax_ions_task = AbinitControlTask(control_procedure=relax_control_procedure,
task_helper=relax_helper)
relax_ions_fws = createSRCFireworks(setup_task=setup_relax_ions_task, run_task=run_relax_ions_task,
control_task=control_relax_ions_task,
spec=spec, initialization_info=initialization_info)
fws.extend(relax_ions_fws['fws'])
links_dict_update(links_dict=links_dict, links_update=relax_ions_fws['links_dict'])
#2. Relax run with cell relaxation
setup_relax_ions_cell_task = AbinitSetupTask(abiinput=ioncell_input, task_helper=relax_helper,
deps={run_relax_ions_task.task_type: '@structure'})
run_relax_ions_cell_task = AbinitRunTask(control_procedure=relax_control_procedure, task_helper=relax_helper,
task_type='ioncell')
control_relax_ions_cell_task = AbinitControlTask(control_procedure=relax_control_procedure,
task_helper=relax_helper)
relax_ions_cell_fws = createSRCFireworks(setup_task=setup_relax_ions_cell_task,
run_task=run_relax_ions_cell_task,
control_task=control_relax_ions_cell_task,
spec=spec, initialization_info=initialization_info)
fws.extend(relax_ions_cell_fws['fws'])
links_dict_update(links_dict=links_dict, links_update=relax_ions_cell_fws['links_dict'])
links_dict.update({relax_ions_fws['control_fw']: relax_ions_cell_fws['setup_fw']})
self.wf = Workflow(fireworks=fws,
links_dict=links_dict,
metadata={'workflow_class': self.workflow_class,
'workflow_module': self.workflow_module})
@classmethod
def get_final_structure(cls, wf):
assert wf.metadata['workflow_class'] == cls.workflow_class
assert wf.metadata['workflow_module'] == cls.workflow_module
ioncell = -1
final_fw_id = None
for fw_id, fw in wf.id_fw.items():
if 'SRC_task_index' in fw.spec:
if fw.tasks[-1].src_type != 'run':
continue
task_index = fw.spec['SRC_task_index']
if task_index.task_type == 'ioncell':
if task_index.index > ioncell:
ioncell = task_index.index
final_fw_id = fw_id
if final_fw_id is None:
raise RuntimeError('Final structure not found ...')
myfw = wf.id_fw[final_fw_id]
mytask = myfw.tasks[-1]
#TODO add a check on the state of the launches
last_launch = (myfw.archived_launches + myfw.launches)[-1]
#TODO add a cycle to find the instance of AbiFireTask?
# myfw.tasks[-1].set_workdir(workdir=last_launch.launch_dir)
# mytask.setup_rundir(last_launch.launch_dir, create_dirs=False)
helper = RelaxTaskHelper()
helper.set_task(mytask)
helper.task.setup_rundir(last_launch.launch_dir, create_dirs=False)
structure = helper.get_final_structure()
return {'structure': structure.as_dict()}
@classmethod
def get_final_structure_and_history(cls, wf):
assert wf.metadata['workflow_class'] == cls.workflow_class
assert wf.metadata['workflow_module'] == cls.workflow_module
ioncell = -1
final_fw_id = None
for fw_id, fw in wf.id_fw.items():
if 'SRC_task_index' in fw.spec:
if fw.tasks[-1].src_type != 'run':
continue
task_index = fw.spec['SRC_task_index']
if task_index.task_type == 'ioncell':
if task_index.index > ioncell:
ioncell = task_index.index
final_fw_id = fw_id
if final_fw_id is None:
raise RuntimeError('Final structure not found ...')
myfw = wf.id_fw[final_fw_id]
mytask = myfw.tasks[-1]
#TODO add a check on the state of the launches
last_launch = (myfw.archived_launches + myfw.launches)[-1]
#TODO add a cycle to find the instance of AbiFireTask?
# myfw.tasks[-1].set_workdir(workdir=last_launch.launch_dir)
# mytask.setup_rundir(last_launch.launch_dir, create_dirs=False)
helper = RelaxTaskHelper()
helper.set_task(mytask)
helper.task.setup_rundir(last_launch.launch_dir, create_dirs=False)
# helper.set_task(mytask)
structure = helper.get_final_structure()
# with open(os.path.join(last_launch.launch_dir, 'history.json'), "rt") as fh:
# history = json.load(fh, cls=MontyDecoder)
return {'structure': structure.as_dict()}
@classmethod
def get_computed_entry(cls, wf):
assert wf.metadata['workflow_class'] == cls.workflow_class
assert wf.metadata['workflow_module'] == cls.workflow_module
ioncell = -1
final_fw_id = None
for fw_id, fw in wf.id_fw.items():
if 'SRC_task_index' in fw.spec:
if fw.tasks[-1].src_type != 'run':
continue
task_index = fw.spec['SRC_task_index']
if task_index.task_type == 'ioncell':
if task_index.index > ioncell:
ioncell = task_index.index
final_fw_id = fw_id
if final_fw_id is None:
raise RuntimeError('Final structure not found ...')
myfw = wf.id_fw[final_fw_id]
mytask = myfw.tasks[-1]
#TODO add a check on the state of the launches
last_launch = (myfw.archived_launches + myfw.launches)[-1]
#TODO add a cycle to find the instance of AbiFireTask?
# myfw.tasks[-1].set_workdir(workdir=last_launch.launch_dir)
# mytask.setup_rundir(last_launch.launch_dir, create_dirs=False)
helper = RelaxTaskHelper()
helper.set_task(mytask)
helper.task.setup_rundir(last_launch.launch_dir, create_dirs=False)
# helper.set_task(mytask)
computed_entry = helper.get_computed_entry()
# with open(os.path.join(last_launch.launch_dir, 'history.json'), "rt") as fh:
# history = json.load(fh, cls=MontyDecoder)
return {'computed_entry': computed_entry.as_dict()}
class NscfFWWorkflow(AbstractFWWorkflow):
"""
Generator of a fireworks workflow with a SCF followed by a NSCF calculation. For the calculation of
band structure or DOS.
"""
workflow_class = 'NscfFWWorkflow'
workflow_module = 'abiflows.fireworks.workflows.abinit_workflows'
def __init__(self, scf_input, nscf_input, autoparal=False, spec=None, initialization_info=None):
"""
Args:
scf_input: an AbinitInput for the SCF calculation.
nscf_input: an AbinitInput for the NSCF calculation.
autoparal: if True autoparal will be used at runtime to optimize the number of processes.
spec: a dict with additional spec for the Firework.
initialization_info: a dict defining additional information about the initialization of the workflow.
"""
start_task_index = 1
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
spec = dict(spec)
spec['initialization_info'] = initialization_info
if autoparal:
spec = self.set_short_single_core_to_spec(spec)
start_task_index = "autoparal"
spec['wf_task_index'] = 'scf_' + str(start_task_index)
scf_task = ScfFWTask(scf_input, is_autoparal=autoparal)
self.scf_fw = Firework(scf_task, spec=spec)
spec['wf_task_index'] = 'nscf_' + str(start_task_index)
nscf_task = NscfFWTask(nscf_input, deps={scf_task.task_type: 'DEN'}, is_autoparal=autoparal)
self.nscf_fw = Firework(nscf_task, spec=spec)
self.wf = Workflow([self.scf_fw, self.nscf_fw], {self.scf_fw: [self.nscf_fw]},
metadata={'workflow_class': self.workflow_class,
'workflow_module': self.workflow_module})
class NscfFWWorkflowSRC(AbstractFWWorkflow):
workflow_class = 'NscfFWWorkflowSRC'
workflow_module = 'abiflows.fireworks.workflows.abinit_workflows'
def __init__(self, scf_input, nscf_input, spec=None, initialization_info=None):
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
# Initializes fws list and links_dict
fws = []
links_dict = {}
if 'additional_controllers' in spec:
additional_controllers = spec['additional_controllers']
spec.pop('additional_controllers')
else:
additional_controllers = [WalltimeController(), MemoryController()]
# Self-consistent calculation
scf_helper = ScfTaskHelper()
scf_controllers = [AbinitController.from_helper(scf_helper)]
scf_controllers.extend(additional_controllers)
scf_control_procedure = ControlProcedure(controllers=scf_controllers)
setup_scf_task = AbinitSetupTask(abiinput=scf_input, task_helper=scf_helper)
run_scf_task = AbinitRunTask(control_procedure=scf_control_procedure, task_helper=scf_helper)
control_scf_task = AbinitControlTask(control_procedure=scf_control_procedure, task_helper=scf_helper)
scf_fws = createSRCFireworks(setup_task=setup_scf_task, run_task=run_scf_task, control_task=control_scf_task,
task_index=scf_helper.task_type,
spec=spec, initialization_info=initialization_info)
fws.extend(scf_fws['fws'])
links_dict_update(links_dict=links_dict, links_update=scf_fws['links_dict'])
# Non self-consistent calculation
nscf_helper = NscfTaskHelper()
nscf_controllers = [AbinitController.from_helper(nscf_helper)]
nscf_controllers.extend(additional_controllers)
nscf_control_procedure = ControlProcedure(controllers=nscf_controllers)
setup_nscf_task = AbinitSetupTask(abiinput=nscf_input, task_helper=nscf_helper,
deps={run_scf_task.task_type: 'DEN'})
run_nscf_task = AbinitRunTask(control_procedure=nscf_control_procedure, task_helper=nscf_helper)
control_nscf_task = AbinitControlTask(control_procedure=nscf_control_procedure, task_helper=nscf_helper)
nscf_fws = createSRCFireworks(setup_task=setup_nscf_task, run_task=run_nscf_task,
control_task=control_nscf_task, task_index=nscf_helper.task_type, spec=spec,
initialization_info=initialization_info)
fws.extend(nscf_fws['fws'])
links_dict_update(links_dict=links_dict, links_update=nscf_fws['links_dict'])
#Link with previous SCF
links_dict_update(links_dict=links_dict,
links_update={scf_fws['control_fw'].fw_id: nscf_fws['setup_fw'].fw_id})
self.wf = Workflow(fireworks=fws, links_dict=links_dict,
metadata={'workflow_class': self.workflow_class,
'workflow_module': self.workflow_module})
@classmethod
def from_factory(cls, structure, pseudos, kppa=None, ecut=None, pawecutdg=None, nband=None, accuracy="normal",
spin_mode="polarized", smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None,
shift_mode="Monkhorst-Pack", extra_abivars=None, decorators=None, autoparal=False, spec=None,
initialization_info=None):
if extra_abivars is None:
extra_abivars = {}
if decorators is None:
decorators = []
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
raise NotImplementedError('from_factory class method not yet implemented for NscfWorkflowSRC')
class HybridOneShotFWWorkflow(AbstractFWWorkflow):
"""
Generator of a fireworks workflow that performs a SCF calculation with hybrid functional based on GW.
"""
def __init__(self, scf_inp, hybrid_input, autoparal=False, spec=None, initialization_info=None):
"""
Args:
scf_input: an AbinitInput for the SCF calculation.
hybrid_input: an AbinitInput for the hybrid functional calculation.
autoparal: if True autoparal will be used at runtime to optimize the number of processes.
spec: a dict with additional spec for the Firework.
initialization_info: a dict defining additional information about the initialization of the workflow.
"""
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
rf = self.get_reduced_formula(scf_inp)
scf_task = ScfFWTask(scf_inp, is_autoparal=autoparal)
spec = dict(spec)
spec['initialization_info'] = initialization_info
if autoparal:
spec = self.set_short_single_core_to_spec(spec)
self.scf_fw = Firework(scf_task, spec=spec, name=rf+"_"+scf_task.task_type)
hybrid_task = HybridFWTask(hybrid_input, is_autoparal=autoparal, deps=["WFK"])
self.hybrid_fw = Firework(hybrid_task, spec=spec, name=rf+"_"+hybrid_task.task_type)
self.wf = Workflow([self.scf_fw, self.hybrid_fw], {self.scf_fw: self.hybrid_fw})
@classmethod
def from_factory(cls, structure, pseudos, kppa=None, ecut=None, pawecutdg=None, nband=None, accuracy="normal",
spin_mode="polarized", smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None,
shift_mode="Monkhorst-Pack", hybrid_functional="hse06", ecutsigx=None, gw_qprange=1,
extra_abivars=None, decorators=None, autoparal=False, spec=None, initialization_info=None):
"""
Creates an instance of HybridOneShotFWWorkflow using the scf_input and hybrid_oneshot_input factory functions.
See the description of the factories for the definition of the arguments.
"""
if extra_abivars is None:
extra_abivars = {}
if decorators is None:
decorators = []
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
scf_fact = ScfFactory(structure=structure, pseudos=pseudos, kppa=kppa, ecut=ecut, pawecutdg=pawecutdg,
nband=nband, accuracy=accuracy, spin_mode=spin_mode, smearing=smearing, charge=charge,
scf_algorithm=scf_algorithm, shift_mode=shift_mode, extra_abivars=extra_abivars,
decorators=decorators)
hybrid_fact = HybridOneShotFromGsFactory(functional=hybrid_functional, ecutsigx=ecutsigx, gw_qprange=gw_qprange,
decorators=decorators, extra_abivars=extra_abivars)
return cls(scf_fact, hybrid_fact, autoparal=autoparal, spec=spec, initialization_info=initialization_info)
class PhononFWWorkflow(AbstractFWWorkflow):
"""
Generator of a fireworks workflow for the calculation of phonon properties with DFPT.
Parallelization over all the perturbations. The phononic perturbations will be generated at runtime based on the
last input used in the SCF calculation.
Warning: for calculations requiring a large number of perturbations (>1000 perturbations) the generation step will
fail due to limitations in the size of documents in mongodb database. Use PhononFullFWWorkflow if this may
be an issue.
"""
workflow_class = 'PhononFWWorkflow'
workflow_module = 'abiflows.fireworks.workflows.abinit_workflows'
def __init__(self, scf_inp, phonon_factory, autoparal=False, spec=None, initialization_info=None):
"""
Args:
scf_inp: an |AbinitInput| object for the SCF calculation.
phonon_factory: an PhononsFromGsFactory for the generation of the inputs for phonon perturbations.
autoparal: if True autoparal will be used at runtime to optimize the number of processes.
spec: a dict with additional spec for the Firework.
initialization_info: a dict defining additional information about the initialization of the workflow.
"""
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
start_task_index = 1
rf = self.get_reduced_formula(scf_inp)
scf_task = ScfFWTask(scf_inp, is_autoparal=autoparal)
spec = dict(spec)
spec['initialization_info'] = initialization_info
if autoparal:
spec = self.set_short_single_core_to_spec(spec)
start_task_index = 'autoparal'
spec['wf_task_index'] = 'scf_' + str(start_task_index)
self.scf_fw = Firework(scf_task, spec=spec, name=rf+"_"+scf_task.task_type)
ph_generation_task = GeneratePhononFlowFWAbinitTask(phonon_factory, previous_task_type=scf_task.task_type,
with_autoparal=autoparal)
spec['wf_task_index'] = 'gen_ph'
self.ph_generation_fw = Firework(ph_generation_task, spec=spec, name=rf+"_gen_ph")
self.wf = Workflow([self.scf_fw, self.ph_generation_fw], {self.scf_fw: self.ph_generation_fw},
metadata={'workflow_class': self.workflow_class,
'workflow_module': self.workflow_module})
@classmethod
def from_factory(cls, structure, pseudos, kppa=None, ecut=None, pawecutdg=None, nband=None, accuracy="normal",
spin_mode="polarized", smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None,
shift_mode="Symmetric", ph_ngqpt=None, qpoints=None, qppa=None, with_ddk=True, with_dde=True,
with_bec=False, scf_tol=None, ph_tol=None, ddk_tol=None, dde_tol=None, wfq_tol=None,
qpoints_to_skip=None, extra_abivars=None, decorators=None, autoparal=False, spec=None,
initialization_info=None, manager=None):
"""
Creates an instance of PhononFWWorkflow using the scf_for_phonons and phonons_from_gsinput factory functions.
See the description of the factories for the definition of the arguments.
The manager can be a TaskManager or a FWTaskManager.
"""
if extra_abivars is None:
extra_abivars = {}
if decorators is None:
decorators = []
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
if isinstance(manager, FWTaskManager):
manager = manager.task_manager
if manager is None:
raise ValueError("A TaskManager is required in the FWTaskManager.")
if qppa is not None and (ph_ngqpt is not None or qpoints is not None):
raise ValueError("qppa is incompatible with ph_ngqpt and qpoints")
if qppa is not None:
initialization_info['qppa'] = qppa
ph_ngqpt = KSampling.automatic_density(structure, qppa, chksymbreak=0).kpts[0]
initialization_info['ngqpt'] = ph_ngqpt
initialization_info['qpoints'] = qpoints
if 'kppa' not in initialization_info:
initialization_info['kppa'] = kppa
extra_abivars_scf = dict(extra_abivars)
extra_abivars_scf['tolwfr'] = scf_tol if scf_tol else 1.e-22
scf_fact = ScfForPhononsFactory(structure=structure, pseudos=pseudos, kppa=kppa, ecut=ecut, pawecutdg=pawecutdg,
nband=nband, accuracy=accuracy, spin_mode=spin_mode, smearing=smearing,
charge=charge, scf_algorithm=scf_algorithm, shift_mode=shift_mode,
extra_abivars=extra_abivars_scf, decorators=decorators)
phonon_fact = PhononsFromGsFactory(ph_ngqpt=ph_ngqpt, with_ddk=with_ddk, with_dde=with_dde, with_bec=with_bec,
ph_tol=ph_tol, ddk_tol=ddk_tol, dde_tol=dde_tol, wfq_tol=wfq_tol,
qpoints_to_skip=qpoints_to_skip, extra_abivars=extra_abivars,
qpoints=qpoints, decorators=decorators, manager=manager)
ph_wf = cls(scf_fact, phonon_fact, autoparal=autoparal, spec=spec, initialization_info=initialization_info)
return ph_wf
@classmethod
def from_gs_input(cls, gs_input, structure=None, ph_ngqpt=None, qpoints=None, qppa=None, with_ddk=True,
with_dde=True, with_bec=False, scf_tol=None, ph_tol=None, ddk_tol=None, dde_tol=None, wfq_tol=None,
qpoints_to_skip=None, extra_abivars=None, decorators=None, autoparal=False, spec=None,
initialization_info=None, manager=None):
"""
Creates an instance of PhononFWWorkflow using a custom |AbinitInput| for a ground state calculation and the
phonons_from_gsinput factory function. Tolerances for the scf will be set accordingly to scf_tol (with
default 1e-22) and keys relative to relaxation and parallelization will be removed from gs_input.
See the description of the phonons_from_gsinput factory for the definition of the arguments.
The manager can be a TaskManager or a FWTaskManager.
"""
if extra_abivars is None:
extra_abivars = {}
if decorators is None:
decorators = []
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
if isinstance(manager, FWTaskManager):
manager = manager.task_manager
if manager is None:
raise ValueError("A TaskManager is required in the FWTaskManager.")
if qppa is not None and (ph_ngqpt is not None or qpoints is not None):
raise ValueError("qppa is incompatible with ph_ngqpt and qpoints")
if qppa is not None:
if structure is None:
structure = gs_input.structure
initialization_info['qppa'] = qppa
ph_ngqpt = KSampling.automatic_density(structure, qppa, chksymbreak=0).kpts[0]
initialization_info['ngqpt'] = ph_ngqpt
initialization_info['qpoints'] = qpoints
scf_inp = gs_input.deepcopy()
if structure:
scf_inp.set_structure(structure)
if scf_tol:
scf_inp.update(scf_tol)
else:
scf_inp['tolwfr'] = 1.e-22
scf_inp['chksymbreak'] = 1
if not scf_inp.get('nbdbuf', 0):
scf_inp['nbdbuf'] = 4
scf_inp['nband'] = scf_inp['nband'] + 4
abi_vars = get_abinit_variables()
# remove relaxation variables in case gs_input is a relaxation
for v in abi_vars.vars_with_varset('rlx'):
scf_inp.pop(v.name, None)
# remove parallelization variables in case gs_input is coming from a previous run with parallelization
for v in abi_vars.vars_with_varset('paral'):
scf_inp.pop(v.name, None)
scf_inp.set_vars(extra_abivars)
phonon_fact = PhononsFromGsFactory(ph_ngqpt=ph_ngqpt, with_ddk=with_ddk, with_dde=with_dde, with_bec=with_bec,
ph_tol=ph_tol, ddk_tol=ddk_tol, dde_tol=dde_tol, wfq_tol=wfq_tol,
qpoints_to_skip=qpoints_to_skip, extra_abivars=extra_abivars,
qpoints=qpoints, decorators=decorators, manager=manager)
ph_wf = cls(scf_inp, phonon_fact, autoparal=autoparal, spec=spec, initialization_info=initialization_info)
return ph_wf
def add_anaddb_ph_bs_fw(self, structure, ph_ngqpt, ndivsm=20, nqsmall=15):
"""
Appends a Firework with a task for the calculation of phonon band structure and dos with anaddb.
Args:
structure: the input structure
ngqpt: Monkhorst-Pack divisions for the phonon Q-mesh (coarse one)
nqsmall: Used to generate the (dense) mesh for the DOS.
It defines the number of q-points used to sample the smallest lattice vector.
ndivsm: Used to generate a normalized path for the phonon bands.
If gives the number of divisions for the smallest segment of the path.
"""
anaddb_input = AnaddbInput.phbands_and_dos(structure=structure, ngqpt=ph_ngqpt, ndivsm=ndivsm,nqsmall=nqsmall,
asr=2, chneut=1, dipdip=1, lo_to_splitting=True)
anaddb_task = AnaDdbAbinitTask(anaddb_input, deps={MergeDdbAbinitTask.task_type: "DDB"})
spec = dict(self.scf_fw.spec)
spec['wf_task_index'] = 'anaddb'
anaddb_fw = Firework(anaddb_task, spec=spec, name='anaddb')
self.append_fw(anaddb_fw, short_single_spec=True)
@classmethod
def get_mongoengine_results(cls, wf):
"""
Generates the PhononResult mongoengine document containing the results of the calculation.
The workflow should have been generated from this class and requires an open connection to the
fireworks database and access to the file system containing the calculations.
Args:
wf: the fireworks Workflow instance of the workflow.
Returns:
A PhononResult document.
"""
assert wf.metadata['workflow_class'] == cls.workflow_class
assert wf.metadata['workflow_module'] == cls.workflow_module
scf_index = 0
ph_index = 0
ddk_index = 0
dde_index = 0
wfq_index = 0
anaddb_task = None
for fw in wf.fws:
task_index = fw.spec.get('wf_task_index', '')
if task_index == 'anaddb':
anaddb_launch = get_last_completed_launch(fw)
anaddb_task = fw.tasks[-1]
anaddb_task.set_workdir(workdir=anaddb_launch.launch_dir)
elif task_index == 'mrgddb':
mrgddb_launch = get_last_completed_launch(fw)
mrgddb_task = fw.tasks[-1]
mrgddb_task.set_workdir(workdir=mrgddb_launch.launch_dir)
elif task_index.startswith('scf_') and not task_index.endswith('autoparal'):
current_index = int(task_index.split('_')[-1])
if current_index > scf_index:
scf_index = current_index
scf_fw = fw
elif task_index.startswith('phonon_0') and not task_index.endswith('autoparal'):
current_index = int(task_index.split('_')[-1])
if current_index > ph_index:
ph_index = current_index
ph_fw = fw
elif task_index.startswith('ddk_0') and not task_index.endswith('autoparal'):
current_index = int(task_index.split('_')[-1])
if current_index > ddk_index:
ddk_index = current_index
ddk_fw = fw
elif task_index.startswith('dde_0') and not task_index.endswith('autoparal'):
current_index = int(task_index.split('_')[-1])
if current_index > dde_index:
dde_index = current_index
dde_fw = fw
elif task_index.startswith('nscf_wfq_0') and not task_index.endswith('autoparal'):
current_index = int(task_index.split('_')[-1])
if current_index > wfq_index:
wfq_index = current_index
wfq_fw = fw
scf_launch = get_last_completed_launch(scf_fw)
with open(os.path.join(scf_launch.launch_dir, 'history.json'), "rt") as fh:
scf_history = json.load(fh, cls=MontyDecoder)
scf_task = scf_fw.tasks[-1]
scf_task.set_workdir(workdir=scf_launch.launch_dir)
document = PhononResult()
gs_input = scf_history.get_events_by_types(TaskEvent.FINALIZED)[0].details['final_input']
document.abinit_input.gs_input = gs_input.as_dict()
document.abinit_input.set_abinit_basic_from_abinit_input(gs_input)
structure = gs_input.structure
document.abinit_output.structure = structure.as_dict()
document.set_material_data_from_structure(structure)
initialization_info = scf_history.get_events_by_types(TaskEvent.INITIALIZED)[0].details.get('initialization_info', {})
document.mp_id = initialization_info.get('mp_id', None)
document.custom = initialization_info.get("custom", None)
document.relax_db = initialization_info['relax_db'].as_dict() if 'relax_db' in initialization_info else None
document.relax_id = initialization_info.get('relax_id', None)
document.abinit_input.ngqpt = initialization_info.get('ngqpt', None)
document.abinit_input.qpoints = initialization_info.get('qpoints', None)
document.abinit_input.qppa = initialization_info.get('qppa', None)
document.abinit_input.kppa = initialization_info.get('kppa', None)
document.abinit_input.pseudopotentials.set_pseudos_from_files_file(scf_task.files_file.path,
len(structure.composition.elements))
document.created_on = datetime.datetime.now()
document.modified_on = datetime.datetime.now()
document.set_dir_names_from_fws_wf(wf)
# read in binary for py3k compatibility with mongoengine
with open(mrgddb_task.merged_ddb_path, "rb") as f:
document.abinit_output.ddb.put(f)
if ph_index > 0:
ph_task = ph_fw.tasks[-1]
document.abinit_input.phonon_input = ph_task.abiinput.as_dict()
if ddk_index > 0:
ddk_task = ddk_fw.tasks[-1]
document.abinit_input.ddk_input = ddk_task.abiinput.as_dict()
if dde_index > 0:
dde_task = dde_fw.tasks[-1]
document.abinit_input.dde_input = dde_task.abiinput.as_dict()
if wfq_index > 0:
wfq_task = wfq_fw.tasks[-1]
document.abinit_input.wfq_input = wfq_task.abiinput.as_dict()
if anaddb_task is not None:
with open(anaddb_task.phbst_path, "rb") as f:
document.abinit_output.phonon_bs.put(f)
with open(anaddb_task.phdos_path, "rb") as f:
document.abinit_output.phonon_dos.put(f)
with open(anaddb_task.anaddb_nc_path, "rb") as f:
document.abinit_output.anaddb_nc.put(f)
document.fw_id = scf_fw.fw_id
document.time_report = get_time_report_for_wf(wf).as_dict()
with open(scf_task.gsr_path, "rb") as f:
document.abinit_output.gs_gsr.put(f)
# read in binary for py3k compatibility with mongoengine
with open(scf_task.output_file.path, "rb") as f:
document.abinit_output.gs_outfile.put(f)
return document
class PhononFullFWWorkflow(PhononFWWorkflow):
"""
Generator of a fireworks workflow for the calculation of phonon properties with DFPT.
Parallelization over all the perturbations.
Subclass of PhononFWWorkflow but the Fireworks with phonon perturbations will be generated immediately.
"""
workflow_class = 'PhononFullFWWorkflow'
workflow_module = 'abiflows.fireworks.workflows.abinit_workflows'
def __init__(self, scf_inp, phonon_factory, autoparal=False, spec=None, initialization_info=None):
"""
Args:
scf_inp: an |AbinitInput| for the SCF calculation.
phonon_factory: an PhononsFromGsFactory for the generation of the inputs for phonon perturbations.
autoparal: if True autoparal will be used at runtime to optimize the number of processes.
spec: a dict with additional spec for the Firework.
initialization_info: a dict defining additional information about the initialization of the workflow.
"""
spec = spec or {}
initialization_info = initialization_info or {}
start_task_index = 1
rf = self.get_reduced_formula(scf_inp)
scf_task = ScfFWTask(scf_inp, is_autoparal=autoparal)
spec = dict(spec)
spec['initialization_info'] = initialization_info
if autoparal:
spec = self.set_short_single_core_to_spec(spec)
start_task_index = 'autoparal'
spec['wf_task_index'] = 'scf_' + str(start_task_index)
self.scf_fw = Firework(scf_task, spec=spec, name=rf+"_"+scf_task.task_type)
# Generate the phononic part of the workflow
# Since everything is being generated here factories should be used to generate the AbinitInput
if isinstance(scf_inp, InputFactory):
scf_inp = scf_inp.build_input()
if isinstance(phonon_factory, InputFactory):
initialization_info['input_factory'] = phonon_factory.as_dict()
spec['initialization_info']['input_factory'] = phonon_factory.as_dict()
ph_inputs = phonon_factory.build_input(scf_inp)
else:
ph_inputs = phonon_factory
ph_q_pert_inputs = ph_inputs.filter_by_tags(atags.PH_Q_PERT)
ddk_inputs = ph_inputs.filter_by_tags(atags.DDK)
dde_inputs = ph_inputs.filter_by_tags(atags.DDE)
bec_inputs = ph_inputs.filter_by_tags(atags.BEC)
nscf_inputs = ph_inputs.filter_by_tags(atags.NSCF)
previous_task_type = scf_task.task_type
nscf_fws = []
if nscf_inputs is not None:
nscf_fws, nscf_fw_deps = self.get_fws(nscf_inputs, NscfWfqFWTask,
{previous_task_type: "DEN"}, spec)
ph_fws = []
if ph_q_pert_inputs:
ph_q_pert_inputs.set_vars(prtwf=-1)
ph_fws, ph_fw_deps = self.get_fws(ph_q_pert_inputs, PhononTask, {previous_task_type: "WFK"}, spec,
nscf_fws)
ddk_fws = []
if ddk_inputs:
ddk_fws, ddk_fw_deps = self.get_fws(ddk_inputs, DdkTask, {previous_task_type: "WFK"}, spec)
dde_fws = []
if dde_inputs:
dde_inputs.set_vars(prtwf=-1)
dde_fws, dde_fw_deps = self.get_fws(dde_inputs, DdeTask,
{previous_task_type: "WFK", DdkTask.task_type: "DDK"}, spec)
bec_fws = []
if bec_inputs:
bec_inputs.set_vars(prtwf=-1)
bec_fws, bec_fw_deps = self.get_fws(bec_inputs, BecTask,
{previous_task_type: "WFK", DdkTask.task_type: "DDK"}, spec)
mrgddb_spec = dict(spec)
mrgddb_spec['wf_task_index'] = 'mrgddb'
#FIXME import here to avoid circular imports.
#from abiflows.fireworks.utils.fw_utils import get_short_single_core_spec
qadapter_spec = self.set_short_single_core_to_spec(mrgddb_spec)
mrgddb_spec['mpi_ncpus'] = 1
# Set a higher priority to favour the end of the WF
#TODO improve the handling of the priorities
mrgddb_spec['_priority'] = 10
num_ddbs_to_be_merged = len(ph_fws) + len(dde_fws) + len(bec_fws)
mrgddb_fw = Firework(MergeDdbAbinitTask(num_ddbs=num_ddbs_to_be_merged, delete_source_ddbs=False), spec=mrgddb_spec,
name=ph_inputs[0].structure.composition.reduced_formula+'_mergeddb')
fws_deps = defaultdict(list)
autoparal_fws = []
if autoparal:
# add an AutoparalTask for each type and relative dependencies
dfpt_autoparal_fw = self.get_autoparal_fw(ph_q_pert_inputs[0], 'dfpt', {previous_task_type: "WFK"}, spec,
nscf_fws)[0]
autoparal_fws.append(dfpt_autoparal_fw)
fws_deps[dfpt_autoparal_fw] = ph_fws + ddk_fws + dde_fws + bec_fws
fws_deps[self.scf_fw].append(dfpt_autoparal_fw)
if nscf_fws:
nscf_autoparal_fw = self.get_autoparal_fw(nscf_inputs[0], 'nscf',
{previous_task_type: "DEN"}, spec)[0]
fws_deps[nscf_autoparal_fw] = nscf_fws
autoparal_fws.append(nscf_autoparal_fw)
fws_deps[self.scf_fw].append(nscf_autoparal_fw)
if ddk_fws:
for ddk_fw in ddk_fws:
if dde_fws:
fws_deps[ddk_fw] = dde_fws
if bec_fws:
fws_deps[ddk_fw] = bec_fws
# all the abinit related FWs should depend on the scf calculation for the WFK
abinit_fws = nscf_fws + ddk_fws + dde_fws + ph_fws + bec_fws
fws_deps[self.scf_fw].extend(abinit_fws)
ddb_fws = dde_fws + ph_fws + bec_fws
#TODO pass all the tasks to the MergeDdbTask for logging or easier retrieve of the DDK?
for ddb_fw in ddb_fws:
fws_deps[ddb_fw] = mrgddb_fw
total_list_fws = [self.scf_fw] + ddb_fws+ddk_fws+[mrgddb_fw] + nscf_fws + autoparal_fws
fws_deps.update(ph_fw_deps)
self.wf = Workflow(total_list_fws, links_dict=fws_deps,
metadata={'workflow_class': self.workflow_class,
'workflow_module': self.workflow_module})
def get_fws(self, multi_inp, task_class, deps, spec, nscf_fws=None):
"""
Prepares the fireworks for a specific type of calculation
Args:
multi_inp: |MultiDataset| with the inputs that should be run
task_class: class of the tasks that should be generated
deps: dict with the dependencies already set for this type of task
spec: spec for the new Fireworks that will be created
nscf_fws: list of NSCF fws for the calculation of WFQ files, in case they are present.
Will be linked if needed.
Returns:
(tuple): tuple containing:
- fws (list): The list of new Fireworks.
- fw_deps (dict): The dependencies related to these fireworks.
Should be used when generating the workflow.
"""
formula = multi_inp[0].structure.composition.reduced_formula
fws = []
fw_deps = defaultdict(list)
autoparal_spec = {}
for i, inp in enumerate(multi_inp):
spec = dict(spec)
start_task_index = 1
current_deps = dict(deps)
parent_fw = None
if nscf_fws:
qpt = inp['qpt']
for nscf_fw in nscf_fws:
if np.allclose(nscf_fw.tasks[0].abiinput['qpt'], qpt):
parent_fw = nscf_fw
current_deps[nscf_fw.tasks[0].task_type] = "WFQ"
break
task = task_class(inp, deps=current_deps, is_autoparal=False)
# this index is for the different task, each performing a different perturbation
indexed_task_type = task_class.task_type + '_' + str(i)
# this index is to index the restarts of the single task
spec['wf_task_index'] = indexed_task_type + '_' + str(start_task_index)
fw = Firework(task, spec=spec, name=(formula + '_' + indexed_task_type)[:15])
fws.append(fw)
if parent_fw is not None:
fw_deps[parent_fw].append(fw)
return fws, fw_deps
def get_autoparal_fw(self, inp, task_type, deps, spec, nscf_fws=None):
"""
Prepares a single Firework conatining an AutoparalTask to perform the autoparal for each group of calculations.
Args:
inp: one of the inputs for which the autoparal should be run
task_type: task_type of the class for the current type of calculation
deps: dict with the dependencies already set for this type of task
spec: spec for the new Fireworks that will be created
nscf_fws: list of NSCF fws for the calculation of WFQ files, in case they are present.
Will be linked if needed.
Returns:
(tuple): tuple containing:
- fws (list): The new Firework.
- fw_deps (dict): The dependencies between related to this firework.
Should be used when generating the workflow.
"""
formula = inp.structure.composition.reduced_formula
fw_deps = defaultdict(list)
spec = dict(spec)
current_deps = dict(deps)
parent_fw = None
if nscf_fws:
qpt = inp['qpt']
for nscf_fw in nscf_fws:
if np.allclose(nscf_fw.tasks[0].abiinput['qpt'], qpt):
parent_fw = nscf_fw
current_deps[nscf_fw.tasks[0].task_type] = "WFQ"
break
task = AutoparalTask(inp, deps=current_deps, forward_spec=True)
# this index is for the different task, each performing a different perturbation
indexed_task_type = AutoparalTask.task_type
# this index is to index the restarts of the single task
spec['wf_task_index'] = indexed_task_type + '_' + task_type
fw = Firework(task, spec=spec, name=(formula + '_' + indexed_task_type)[:15])
if parent_fw is not None:
fw_deps[parent_fw].append(fw)
return fw, fw_deps
class DteFWWorkflow(AbstractFWWorkflow):
"""
Generator of a fireworks workflow for the calculation of third derivatives with respect to electric field and
atomic position to calculate non-linear optical susceptibilities of a material using DFPT.
Parallelization over all the perturbations. The phonon perturbations are optional.
"""
workflow_class = 'DteFWWorkflow'
workflow_module = 'abiflows.fireworks.workflows.abinit_workflows'
def __init__(self, scf_inp, ddk_inp, dde_inp, dte_inp, ph_inp=None, autoparal=False, spec=None,
initialization_info=None):
"""
Args:
scf_inp: an |AbinitInput| for the SCF calculation.
ddk_inp: a |MultiDataset| with the inputs for the DDK perturbations.
dde_inp: a |MultiDataset| with the inputs for the DDE perturbations.
dte_inp: a |MultiDataset| with the inputs for the DTE perturbations.
ph_inp: a |MultiDataset| with the inputs for the phonon perturbations. If None phonon perturbations
will not be considered.
autoparal: if True autoparal will be used at runtime to optimize the number of processes.
spec: a dict with additional spec for the Firework.
initialization_info: a dict defining additional information about the initialization of the workflow.
"""
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
start_task_index = 1
rf = self.get_reduced_formula(scf_inp)
scf_task = ScfFWTask(scf_inp, is_autoparal=autoparal)
spec = dict(spec)
spec['initialization_info'] = initialization_info
if autoparal:
spec = self.set_short_single_core_to_spec(spec)
start_task_index = 'autoparal'
spec['wf_task_index'] = 'scf_' + str(start_task_index)
self.scf_fw = Firework(scf_task, spec=spec, name=rf+"_"+scf_task.task_type)
self.ph_fws = []
if ph_inp:
self.ph_fws = self.get_fws(ph_inp, PhononTask, {ScfFWTask.task_type: "WFK"}, spec)
self.ddk_fws = self.get_fws(ddk_inp, DdkTask, {ScfFWTask.task_type: "WFK"}, spec)
self.dde_fws = self.get_fws(dde_inp, DdeTask, {ScfFWTask.task_type: "WFK", DdkTask.task_type: "DDK"}, spec)
dte_deps = {ScfFWTask.task_type: ["WFK", "DEN"], DdeTask.task_type: ["1WF", "1DEN"]}
if ph_inp:
dte_deps[PhononTask.task_type] = ["1WF", "1DEN"]
self.dte_fws = self.get_fws(dte_inp, DteTask, dte_deps, spec)
mrgddb_spec = dict(spec)
mrgddb_spec['wf_task_index'] = 'mrgddb'
self.set_short_single_core_to_spec(mrgddb_spec)
mrgddb_spec['mpi_ncpus'] = 1
num_ddbs_to_be_merged = len(self.ph_fws) + len(self.dde_fws) + len(self.dte_fws)
self.mrgddb_fw = Firework(MergeDdbAbinitTask(num_ddbs=num_ddbs_to_be_merged, delete_source_ddbs=False),
spec=mrgddb_spec,name=scf_inp.structure.composition.reduced_formula+'_mergeddb')
fws_deps = defaultdict(list)
self.autoparal_fws = []
# non-linear calculations do not support autoparal
if autoparal:
# add an AutoparalTask for each type and relative dependencies
dfpt_autoparal_fw = self.get_autoparal_fw(ddk_inp[0], 'dfpt', {ScfFWTask.task_type: "WFK"}, spec)
self.autoparal_fws.append(dfpt_autoparal_fw)
fws_deps[dfpt_autoparal_fw] = self.ph_fws + self.ddk_fws + self.dde_fws
# being a fake autoparal it doesn't need to follow the dde tasks. No other dependencies are enforced.
dte_autoparal_fw = self.get_autoparal_fw(None, 'dte', None, spec,
ddk_inp[0].structure.composition.reduced_formula)
self.autoparal_fws.append(dte_autoparal_fw)
fws_deps[dte_autoparal_fw] = self.dte_fws
for ddk_fw in self.ddk_fws:
fws_deps[ddk_fw] = list(self.dde_fws + self.dte_fws)
for dde_fw in self.dde_fws:
fws_deps[dde_fw] = list(self.dte_fws)
if self.ph_fws:
for ph_fw in self.ph_fws:
fws_deps[ph_fw] = list(self.dte_fws)
ddb_fws = self.dde_fws + self.ph_fws + self.dte_fws
for ddb_fw in ddb_fws:
fws_deps[ddb_fw].append(self.mrgddb_fw)
fws_deps[self.scf_fw] = list(ddb_fws+self.ddk_fws + self.autoparal_fws)
total_list_fws = [self.scf_fw] + ddb_fws+self.ddk_fws+[self.mrgddb_fw] + self.autoparal_fws
self.wf = Workflow(total_list_fws, fws_deps,
metadata={'workflow_class': self.workflow_class, 'workflow_module': self.workflow_module})
def get_fws(self, multi_inp, task_class, deps, spec):
"""
Prepares the fireworks for a specific type of calculation
Args:
multi_inp: |MultiDataset| with the inputs that should be run
task_class: class of the tasks that should be generated
deps: dict with the dependencies already set for this type of task
spec: spec for the new Fireworks that will be created
Returns:
The list of new Fireworks.
"""
formula = multi_inp[0].structure.composition.reduced_formula
fws = []
autoparal_spec = {}
for i, inp in enumerate(multi_inp):
spec = dict(spec)
start_task_index = 1
task = task_class(inp, deps=dict(deps), is_autoparal=False)
# this index is for the different task, each performing a different perturbation
indexed_task_type = task_class.task_type + '_' + str(i)
# this index is to index the restarts of the single task
spec['wf_task_index'] = indexed_task_type + '_' + str(start_task_index)
fw = Firework(task, spec=spec, name=(formula + '_' + indexed_task_type)[:15])
fws.append(fw)
return fws
def get_autoparal_fw(self, inp, task_type, deps, spec, formula=None):
"""
Prepares a single Firework conatining an AutoparalTask to perform the autoparal for each group of calculations.
Args:
inp: one of the inputs for which the autoparal should be run
task_type: task_type of the class for the current type of calculation
deps: dict with the dependencies already set for this type of task
spec: spec for the new Fireworks that will be created
Returns:
The Firework with the autoparal.
"""
if deps is None:
deps = {}
if formula is None:
formula = inp.structure.composition.reduced_formula
spec = dict(spec)
task = AutoparalTask(inp, deps=dict(deps), forward_spec=True)
# this index is for the different task, each performing a different perturbation
indexed_task_type = AutoparalTask.task_type
# this index is to index the restarts of the single task
spec['wf_task_index'] = indexed_task_type + '_' + task_type
fw = Firework(task, spec=spec, name=(formula + '_' + indexed_task_type)[:15])
return fw
@classmethod
def from_factory(cls, structure, pseudos, kppa=None, ecut=None, pawecutdg=None, nband=None, accuracy="normal",
spin_mode="polarized", smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None,
shift_mode="Symmetric", scf_tol=None, ph_tol=None, ddk_tol=None, dde_tol=None,
use_phonons=False, extra_abivars=None, decorators=None, autoparal=False, spec=None,
initialization_info=None, skip_dte_permutations=False, manager=None):
"""
Creates an instance of DteFWWorkflow using the scf_for_phonons and dte_from_gsinput factory functions.
See the description of the factories for the definition of the arguments.
The manager can be a TaskManager or a FWTaskManager.
"""
if extra_abivars is None:
extra_abivars = {}
if decorators is None:
decorators = []
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
if isinstance(manager, FWTaskManager):
manager = manager.task_manager
if manager is None:
raise ValueError("A TaskManager is required in the FWTaskManager.")
initialization_info['use_phonons'] = use_phonons
if 'kppa' not in initialization_info:
initialization_info['kppa'] = kppa
if scf_tol is None:
scf_tol = {'tolwfr': 1e-22}
scf_inp = scf_for_phonons(structure=structure, pseudos=pseudos, kppa=kppa, ecut=ecut, pawecutdg=pawecutdg,
nband=nband, accuracy=accuracy, spin_mode=spin_mode, smearing=smearing,
charge=charge, scf_algorithm=scf_algorithm, shift_mode=shift_mode)
# Set the additional variables coming from the user to the scf_inp before passing it to the factory.
# Here is mandatory since often it would be needed to set manually the ixc, if the proper pseudopotential
# are not available and the generation of the dte inputs will fail if an unsupported ixc is used.
scf_inp.set_vars(extra_abivars)
scf_inp.update(scf_tol)
for d in decorators:
d(scf_inp)
inp = dte_from_gsinput(scf_inp, use_phonons=use_phonons, dde_tol=dde_tol, ddk_tol=ddk_tol, ph_tol=ph_tol,
skip_dte_permutations=skip_dte_permutations, manager=manager)
# set the additional variables coming from the user
scf_inp.set_vars(extra_abivars)
for d in decorators:
d(inp)
dte_wf = cls(scf_inp, ddk_inp=inp.filter_by_tags(atags.DDK), dde_inp=inp.filter_by_tags(atags.DDE),
dte_inp=inp.filter_by_tags(atags.DTE), ph_inp=inp.filter_by_tags(atags.PH_Q_PERT), autoparal=autoparal,
spec=spec, initialization_info=initialization_info)
return dte_wf
@classmethod
def from_gs_input(cls, gs_input, structure=None, scf_tol=None, ph_tol=None, ddk_tol=None, dde_tol=None,
use_phonons=False, extra_abivars=None, decorators=None, autoparal=False, spec=None,
initialization_info=None, skip_dte_permutations=False, manager=None):
"""
Creates an instance of DteFWWorkflow using a custom AbinitInput for a ground state calculation and the
dte_from_gsinput factory function. Tolerances for the scf will be set accordingly to scf_tol (with
default 1e-22) and keys relative to relaxation and parallelization will be removed from gs_input.
See the description of the dte_from_gsinput factory for the definition of the arguments.
The manager can be a TaskManager or a FWTaskManager.
"""
if extra_abivars is None:
extra_abivars = {}
if decorators is None:
decorators = []
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
if isinstance(manager, FWTaskManager):
manager = manager.task_manager
if manager is None:
raise ValueError("A TaskManager is required in the FWTaskManager.")
initialization_info['use_phonos'] = use_phonons
scf_inp = gs_input.deepcopy()
if structure:
scf_inp.set_structure(structure)
if scf_tol:
scf_inp.update(scf_tol)
else:
scf_inp['tolwfr'] = 1.e-22
scf_inp['chksymbreak'] = 1
if not scf_inp.get('nbdbuf', 0):
scf_inp['nbdbuf'] = 4
scf_inp['nband'] = scf_inp['nband'] + 4
abi_vars = get_abinit_variables()
# remove relaxation variables in case gs_input is a relaxation
for v in abi_vars.vars_with_varset('rlx'):
scf_inp.pop(v.name, None)
# remove parallelization variables in case gs_input is coming from a previous run with parallelization
for v in abi_vars.vars_with_varset('paral'):
scf_inp.pop(v.name, None)
# Set the additional variables coming from the user to the scf_inp before passing it to the factory.
# Here is mandatory since often it would be needed to set manually the ixc, if the proper pseudopotential
# are not available and the generation of the dte inputs will fail if an unsupported ixc is used.
scf_inp.set_vars(extra_abivars)
for d in decorators:
d(scf_inp)
inp = dte_from_gsinput(scf_inp, use_phonons=use_phonons, dde_tol=dde_tol, ddk_tol=ddk_tol, ph_tol=ph_tol,
skip_dte_permutations=skip_dte_permutations, manager=manager)
# set the additional variables coming from the user
scf_inp.set_vars(extra_abivars)
for d in decorators:
d(inp)
dte_wf = cls(scf_inp, ddk_inp=inp.filter_by_tags(atags.DDK), dde_inp=inp.filter_by_tags(atags.DDE),
dte_inp=inp.filter_by_tags(atags.DTE), ph_inp=inp.filter_by_tags(atags.PH_Q_PERT), autoparal=autoparal,
spec=spec, initialization_info=initialization_info)
return dte_wf
def add_anaddb_dte_fw(self, structure, dieflag=1, nlflag=1, ramansr=1, alphon=1, prtmbm=1):
"""
Appends a Firework with a task for the calculation of the third derivatives with anaddb.
Args:
structure: the input structure.
dieflag: see anaddb documentation.
nlflag: see anaddb documentation.
ramansr: see anaddb documentation.
alphon: see anaddb documentation.
prtmbm: see anaddb documentation.
Returns:
"""
anaddb_input = AnaddbInput(structure=structure)
anaddb_input.set_vars(dieflag=dieflag, nlflag=nlflag, ramansr=ramansr, alphon=alphon, prtmbm=prtmbm)
anaddb_task = AnaDdbAbinitTask(anaddb_input, deps={MergeDdbAbinitTask.task_type: "DDB"})
spec = dict(self.scf_fw.spec)
spec['wf_task_index'] = 'anaddb'
anaddb_fw = Firework(anaddb_task, spec=spec, name='anaddb')
self.append_fw(anaddb_fw, short_single_spec=True)
@classmethod
def get_mongoengine_results(cls, wf):
"""
Generates the DteResult mongoengine document containing the results of the calculation.
The workflow should have been generated from this class and requires an open connection to the
fireworks database and access to the file system containing the calculations.
Args:
wf: the fireworks Workflow instance of the workflow.
Returns:
A DteResult document.
"""
assert wf.metadata['workflow_class'] == cls.workflow_class
assert wf.metadata['workflow_module'] == cls.workflow_module
scf_index = 0
ph_index = 0
ddk_index = 0
dde_index = 0
dte_index = 0
anaddb_task = None
for fw in wf.fws:
task_index = fw.spec.get('wf_task_index', '')
if task_index == 'anaddb':
anaddb_launch = get_last_completed_launch(fw)
anaddb_task = fw.tasks[-1]
anaddb_task.set_workdir(workdir=anaddb_launch.launch_dir)
elif task_index == 'mrgddb':
mrgddb_launch = get_last_completed_launch(fw)
mrgddb_task = fw.tasks[-1]
mrgddb_task.set_workdir(workdir=mrgddb_launch.launch_dir)
elif task_index.startswith('scf_') and not task_index.endswith('autoparal'):
current_index = int(task_index.split('_')[-1])
if current_index > scf_index:
scf_index = current_index
scf_fw = fw
elif task_index.startswith('phonon_0') and not task_index.endswith('autoparal'):
current_index = int(task_index.split('_')[-1])
if current_index > ph_index:
ph_index = current_index
ph_fw = fw
elif task_index.startswith('ddk_0') and not task_index.endswith('autoparal'):
current_index = int(task_index.split('_')[-1])
if current_index > ddk_index:
ddk_index = current_index
ddk_fw = fw
elif task_index.startswith('dde_0') and not task_index.endswith('autoparal'):
current_index = int(task_index.split('_')[-1])
if current_index > dde_index:
dde_index = current_index
dde_fw = fw
elif task_index.startswith('dte_0') and not task_index.endswith('autoparal'):
current_index = int(task_index.split('_')[-1])
if current_index > dte_index:
dte_index = current_index
dte_fw = fw
scf_launch = get_last_completed_launch(scf_fw)
with open(os.path.join(scf_launch.launch_dir, 'history.json'), "rt") as fh:
scf_history = json.load(fh, cls=MontyDecoder)
scf_task = scf_fw.tasks[-1]
scf_task.set_workdir(workdir=scf_launch.launch_dir)
document = DteResult()
gs_input = scf_history.get_events_by_types(TaskEvent.FINALIZED)[0].details['final_input']
document.abinit_input.gs_input = gs_input.as_dict()
document.abinit_input.set_abinit_basic_from_abinit_input(gs_input)
structure = gs_input.structure
document.abinit_output.structure = structure.as_dict()
document.set_material_data_from_structure(structure)
initialization_info = scf_history.get_events_by_types(TaskEvent.INITIALIZED)[0].details.get('initialization_info', {})
document.mp_id = initialization_info.get('mp_id', None)
document.custom = initialization_info.get("custom", None)
document.relax_db = initialization_info['relax_db'].as_dict() if 'relax_db' in initialization_info else None
document.relax_id = initialization_info.get('relax_id', None)
document.abinit_input.kppa = initialization_info.get('kppa', None)
# True if set in the initialization_info. Otherwise True if phonon inputs are present.
document.abinit_input.with_phonons = initialization_info.get('use_phonons', ph_index > 0)
document.abinit_input.pseudopotentials.set_pseudos_from_files_file(scf_task.files_file.path,
len(structure.composition.elements))
document.created_on = datetime.datetime.now()
document.modified_on = datetime.datetime.now()
document.set_dir_names_from_fws_wf(wf)
# read in binary for py3k compatibility with mongoengine
with open(mrgddb_task.merged_ddb_path, "rb") as f:
document.abinit_output.ddb.put(f)
if ph_index > 0:
ph_task = ph_fw.tasks[-1]
document.abinit_input.phonon_input = ph_task.abiinput.as_dict()
ddk_task = ddk_fw.tasks[-1]
document.abinit_input.ddk_input = ddk_task.abiinput.as_dict()
dde_task = dde_fw.tasks[-1]
document.abinit_input.dde_input = dde_task.abiinput.as_dict()
dte_task = dte_fw.tasks[-1]
document.abinit_input.dte_input = dte_task.abiinput.as_dict()
if anaddb_task is not None:
with open(anaddb_task.anaddb_nc_path, "rb") as f:
document.abinit_output.anaddb_nc.put(f)
anc = AnaddbNcFile(anaddb_task.anaddb_nc_path)
# the result is None if missing from the anaddb.nc
epsinf = anc.epsinf
if epsinf is not None:
document.abinit_output.epsinf = epsinf.tolist()
eps0 = anc.eps0
if eps0 is not None:
document.abinit_output.eps0 = eps0.tolist()
dchide = anc.dchide
if dchide is not None:
document.abinit_output.dchide = dchide.tolist()
dchidt = anc.dchidt
if dchidt is not None:
dchidt_list = []
for i in dchidt:
dd = []
for j in i:
dd.append(j.tolist())
dchidt_list.append(dd)
document.abinit_output.dchidt = dchidt_list
document.fw_id = scf_fw.fw_id
document.time_report = get_time_report_for_wf(wf).as_dict()
with open(scf_task.gsr_path, "rb") as f:
document.abinit_output.gs_gsr.put(f)
# read in binary for py3k compatibility with mongoengine
with open(scf_task.output_file.path, "rb") as f:
document.abinit_output.gs_outfile.put(f)
return document
class PiezoElasticFWWorkflow(AbstractFWWorkflow):
workflow_class = 'PiezoElasticFWWorkflow'
workflow_module = 'abiflows.fireworks.workflows.abinit_workflows'
def __init__(self, scf_inp, ddk_inp, rf_inp, autoparal=False, spec=None, initialization_info=None):
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
rf = self.get_reduced_formula(scf_inp)
scf_task = ScfFWTask(scf_inp, is_autoparal=autoparal)
spec = dict(spec)
spec['initialization_info'] = initialization_info
if autoparal:
spec = self.set_short_single_core_to_spec(spec)
self.scf_fw = Firework(scf_task, spec=spec, name=rf+"_"+scf_task.task_type)
ddk_task = DdkTask(ddk_inp, is_autoparal=autoparal, deps={scf_task.task_type: 'WFK'})
ddk_fw_name = rf+ddk_task.task_type
ddk_fw_name = ddk_fw_name[:8]
self.ddk_fw = Firework(ddk_task, spec=spec, name=ddk_fw_name)
rf_task = StrainPertTask(rf_inp, is_autoparal=autoparal, deps={scf_task.task_type: 'WFK', ddk_task.task_type: 'DDK'})
rf_fw_name = rf+rf_task.task_type
rf_fw_name = rf_fw_name[:8]
self.rf_fw = Firework(rf_task, spec=spec, name=rf_fw_name)
self.wf = Workflow(fireworks=[self.scf_fw, self.ddk_fw, self.rf_fw],
links_dict={self.scf_fw: self.ddk_fw, self.ddk_fw: self.rf_fw},
metadata={'workflow_class': self.workflow_class,
'workflow_module': self.workflow_module})
self.add_anaddb_task(scf_inp.structure)
def add_anaddb_task(self, structure):
spec = self.set_short_single_core_to_spec()
anaddb_task = AnaDdbAbinitTask(AnaddbInput.piezo_elastic(structure))
anaddb_fw = Firework([anaddb_task],
spec=spec,
name='anaddb')
append_fw_to_wf(anaddb_fw, self.wf)
def add_mrgddb_task(self, structure):
spec = self.set_short_single_core_to_spec()
spec['ddb_files_task_types'] = ['scf', 'strain_pert']
mrgddb_task = MergeDdbAbinitTask()
mrgddb_fw = Firework([mrgddb_task], spec=spec, name='mrgddb')
append_fw_to_wf(mrgddb_fw, self.wf)
@classmethod
def get_elastic_tensor_and_history(cls, wf):
assert wf.metadata['workflow_class'] == cls.workflow_class
assert wf.metadata['workflow_module'] == cls.workflow_module
final_fw_id = None
for fw_id, fw in wf.id_fw.items():
if fw.name == 'anaddb':
final_fw_id = fw_id
if final_fw_id is None:
raise RuntimeError('Final anaddb task not found ...')
myfw = wf.id_fw[final_fw_id]
#TODO add a check on the state of the launches
last_launch = (myfw.archived_launches + myfw.launches)[-1]
#TODO add a cycle to find the instance of AbiFireTask?
myfw.tasks[-1].set_workdir(workdir=last_launch.launch_dir)
elastic_tensor = myfw.tasks[-1].get_elastic_tensor()
with open(os.path.join(last_launch.launch_dir, 'history.json'), "rt") as fh:
history = json.load(fh, cls=MontyDecoder)
return {'elastic_properties': elastic_tensor.extended_dict(), 'history': history}
@classmethod
def get_all_elastic_tensors(cls, wf):
assert wf.metadata['workflow_class'] == cls.workflow_class
assert wf.metadata['workflow_module'] == cls.workflow_module
final_fw_id = None
for fw_id, fw in wf.id_fw.items():
if fw.name == 'anaddb':
final_fw_id = fw_id
if final_fw_id is None:
raise RuntimeError('Final anaddb task not found ...')
myfw = wf.id_fw[final_fw_id]
#TODO add a check on the state of the launches
last_launch = (myfw.archived_launches + myfw.launches)[-1]
#TODO add a cycle to find the instance of AbiFireTask?
myfw.tasks[-1].set_workdir(workdir=last_launch.launch_dir)
elastic_tensor = myfw.tasks[-1].get_elastic_tensor()
with open(os.path.join(last_launch.launch_dir, 'history.json'), "rt") as fh:
history = json.load(fh, cls=MontyDecoder)
return {'elastic_properties': elastic_tensor.extended_dict(), 'history': history}
@classmethod
def from_factory(cls):
raise NotImplementedError('from factory method not yet implemented for piezoelasticworkflow')
# TODO old version based on GeneratePiezoElasticFlowFWAbinitTask with SRC. Should be removed after adapting.
# class PiezoElasticFWWorkflowSRCOld(AbstractFWWorkflow):
# workflow_class = 'PiezoElasticFWWorkflowSRC'
# workflow_module = 'abiflows.fireworks.workflows.abinit_workflows'
#
# STANDARD_HANDLERS = {'_all': [MemoryHandler(), WalltimeHandler()]}
# STANDARD_VALIDATORS = {'_all': []}
#
# def __init__(self, scf_inp_ibz, ddk_inp, rf_inp, spec=None, initialization_info=None,
# handlers=None, validators=None, ddk_split=False, rf_split=False):
# if spec is None:
# spec = {}
# if initialization_info is None:
# initialization_info = {}
# if handlers is None:
# handlers = self.STANDARD_HANDLERS
# if validators is None:
# validators = self.STANDARD_VALIDATORS
#
# fws = []
# links_dict = {}
#
# if 'queue_adapter_update' in initialization_info:
# queue_adapter_update = initialization_info['queue_adapter_update']
# else:
# queue_adapter_update = None
#
# # If handlers are passed as a list, they should be applied on all task_types
# if isinstance(handlers, (list, tuple)):
# handlers = {'_all': handlers}
# # If validators are passed as a list, they should be applied on all task_types
# if isinstance(validators, (list, tuple)):
# validators = {'_all': validators}
#
# #1. First SCF run in the irreducible Brillouin Zone
# SRC_scf_ibz_fws = createSRCFireworksOld(task_class=ScfFWTask, task_input=scf_inp_ibz, SRC_spec=spec,
# initialization_info=initialization_info,
# wf_task_index_prefix='scfibz', task_type='scfibz',
# handlers=handlers['_all'], validators=validators['_all'],
# queue_adapter_update=queue_adapter_update)
# fws.extend(SRC_scf_ibz_fws['fws'])
# links_dict_update(links_dict=links_dict, links_update=SRC_scf_ibz_fws['links_dict'])
#
# #2. Second SCF run in the full Brillouin Zone with kptopt 3 in order to allow merging 1st derivative DDB's with
# #2nd derivative DDB's from the DFPT RF run
# scf_inp_fbz = scf_inp_ibz.deepcopy()
# scf_inp_fbz['kptopt'] = 2
# SRC_scf_fbz_fws = createSRCFireworksOld(task_class=ScfFWTask, task_input=scf_inp_fbz, SRC_spec=spec,
# initialization_info=initialization_info,
# wf_task_index_prefix='scffbz', task_type='scffbz',
# handlers=handlers['_all'], validators=validators['_all'],
# deps={SRC_scf_ibz_fws['run_fw'].tasks[0].task_type: ['DEN', 'WFK']},
# queue_adapter_update=queue_adapter_update)
# fws.extend(SRC_scf_fbz_fws['fws'])
# links_dict_update(links_dict=links_dict, links_update=SRC_scf_fbz_fws['links_dict'])
# #Link with previous SCF
# links_dict_update(links_dict=links_dict,
# links_update={SRC_scf_ibz_fws['check_fw'].fw_id: SRC_scf_fbz_fws['setup_fw'].fw_id})
#
# #3. DDK calculation
# if ddk_split:
# raise NotImplementedError('Split Ddk to be implemented in PiezoElasticWorkflow ...')
# else:
# SRC_ddk_fws = createSRCFireworksOld(task_class=DdkTask, task_input=ddk_inp, SRC_spec=spec,
# initialization_info=initialization_info,
# wf_task_index_prefix='ddk',
# handlers=handlers['_all'], validators=validators['_all'],
# deps={SRC_scf_ibz_fws['run_fw'].tasks[0].task_type: 'WFK'},
# queue_adapter_update=queue_adapter_update)
# fws.extend(SRC_ddk_fws['fws'])
# links_dict_update(links_dict=links_dict, links_update=SRC_ddk_fws['links_dict'])
# #Link with the IBZ SCF run
# links_dict_update(links_dict=links_dict,
# links_update={SRC_scf_ibz_fws['check_fw'].fw_id: SRC_ddk_fws['setup_fw'].fw_id})
#
# #4. Response-Function calculation(s) of the elastic constants
# if rf_split:
# rf_ddb_source_task_type = 'mrgddb-strains'
# scf_task_type = SRC_scf_ibz_fws['run_fw'].tasks[0].task_type
# ddk_task_type = SRC_ddk_fws['run_fw'].tasks[0].task_type
# gen_task = GeneratePiezoElasticFlowFWAbinitTask(previous_scf_task_type=scf_task_type,
# previous_ddk_task_type=ddk_task_type,
# handlers=handlers, validators=validators,
# mrgddb_task_type=rf_ddb_source_task_type)
# genrfstrains_spec = set_short_single_core_to_spec(spec)
# gen_fw = Firework([gen_task], spec=genrfstrains_spec, name='gen-piezo-elast')
# fws.append(gen_fw)
# links_dict_update(links_dict=links_dict,
# links_update={SRC_scf_ibz_fws['check_fw'].fw_id: gen_fw.fw_id,
# SRC_ddk_fws['check_fw'].fw_id: gen_fw.fw_id})
# rf_ddb_src_fw = gen_fw
# else:
# SRC_rf_fws = createSRCFireworksOld(task_class=StrainPertTask, task_input=rf_inp, SRC_spec=spec,
# initialization_info=initialization_info,
# wf_task_index_prefix='rf',
# handlers=handlers['_all'], validators=validators['_all'],
# deps={SRC_scf_ibz_fws['run_fw'].tasks[0].task_type: 'WFK',
# SRC_ddk_fws['run_fw'].tasks[0].task_type: 'DDK'},
# queue_adapter_update=queue_adapter_update)
# fws.extend(SRC_rf_fws['fws'])
# links_dict_update(links_dict=links_dict, links_update=SRC_rf_fws['links_dict'])
# #Link with the IBZ SCF run and the DDK run
# links_dict_update(links_dict=links_dict,
# links_update={SRC_scf_ibz_fws['check_fw'].fw_id: SRC_rf_fws['setup_fw'].fw_id,
# SRC_ddk_fws['check_fw'].fw_id: SRC_rf_fws['setup_fw'].fw_id})
# rf_ddb_source_task_type = SRC_rf_fws['run_fw'].tasks[0].task_type
# rf_ddb_src_fw = SRC_rf_fws['check_fw']
#
# #5. Merge DDB files from response function (second derivatives for the elastic constants) and from the
# # SCF run on the full Brillouin zone (first derivatives for the stress tensor, to be used for the
# # stress-corrected elastic constants)
# mrgddb_task = MergeDdbAbinitTask(ddb_source_task_types=[rf_ddb_source_task_type,
# SRC_scf_fbz_fws['run_fw'].tasks[0].task_type],
# delete_source_ddbs=False, num_ddbs=2)
# mrgddb_spec = set_short_single_core_to_spec(spec)
# mrgddb_fw = Firework(tasks=[mrgddb_task], spec=mrgddb_spec, name='mrgddb')
# fws.append(mrgddb_fw)
# links_dict_update(links_dict=links_dict,
# links_update={rf_ddb_src_fw.fw_id: mrgddb_fw.fw_id,
# SRC_scf_fbz_fws['check_fw'].fw_id: mrgddb_fw.fw_id})
#
# #6. Anaddb task to get elastic constants based on the RF run (no stress correction)
# anaddb_tag = 'anaddb-piezo-elast'
# spec = set_short_single_core_to_spec(spec)
# anaddb_task = AnaDdbAbinitTask(AnaddbInput.piezo_elastic(structure=scf_inp_ibz.structure,
# stress_correction=False),
# deps={rf_ddb_source_task_type: ['DDB']},
# task_type=anaddb_tag)
# anaddb_fw = Firework([anaddb_task],
# spec=spec,
# name=anaddb_tag)
# fws.append(anaddb_fw)
# links_dict_update(links_dict=links_dict,
# links_update={rf_ddb_src_fw.fw_id: anaddb_fw.fw_id})
#
# #7. Anaddb task to get elastic constants based on the RF run and the SCF run (with stress correction)
# anaddb_tag = 'anaddb-piezo-elast-stress-corrected'
# spec = set_short_single_core_to_spec(spec)
# anaddb_stress_task = AnaDdbAbinitTask(AnaddbInput.piezo_elastic(structure=scf_inp_ibz.structure,
# stress_correction=True),
# deps={mrgddb_task.task_type: ['DDB']},
# task_type=anaddb_tag)
# anaddb_stress_fw = Firework([anaddb_stress_task],
# spec=spec,
# name=anaddb_tag)
# fws.append(anaddb_stress_fw)
# links_dict_update(links_dict=links_dict,
# links_update={mrgddb_fw.fw_id: anaddb_stress_fw.fw_id})
#
# self.wf = Workflow(fireworks=fws,
# links_dict=links_dict,
# metadata={'workflow_class': self.workflow_class,
# 'workflow_module': self.workflow_module})
#
# @classmethod
# def get_all_elastic_tensors(cls, wf):
# assert wf.metadata['workflow_class'] == cls.workflow_class
# assert wf.metadata['workflow_module'] == cls.workflow_module
#
# anaddb_no_stress_id = None
# anaddb_stress_id = None
# for fw_id, fw in wf.id_fw.items():
# if fw.name == 'anaddb-piezo-elast':
# anaddb_no_stress_id = fw_id
# if fw.name == 'anaddb-piezo-elast-stress-corrected':
# anaddb_stress_id = fw_id
# if anaddb_no_stress_id is None or anaddb_stress_id is None:
# raise RuntimeError('Final anaddb tasks not found ...')
# myfw_nostress = wf.id_fw[anaddb_no_stress_id]
# last_launch_nostress = (myfw_nostress.archived_launches + myfw_nostress.launches)[-1]
# myfw_nostress.tasks[-1].set_workdir(workdir=last_launch_nostress.launch_dir)
#
# myfw_stress = wf.id_fw[anaddb_stress_id]
# last_launch_stress = (myfw_stress.archived_launches + myfw_stress.launches)[-1]
# myfw_stress.tasks[-1].set_workdir(workdir=last_launch_stress.launch_dir)
#
# ec_nostress_clamped = myfw_nostress.tasks[-1].get_elastic_tensor(tensor_type='clamped_ion')
# ec_nostress_relaxed = myfw_nostress.tasks[-1].get_elastic_tensor(tensor_type='relaxed_ion')
# ec_stress_relaxed = myfw_stress.tasks[-1].get_elastic_tensor(tensor_type='relaxed_ion_stress_corrected')
#
# ec_dicts = {'clamped_ion': ec_nostress_clamped.extended_dict(),
# 'relaxed_ion': ec_nostress_relaxed.extended_dict(),
# 'relaxed_ion_stress_corrected': ec_stress_relaxed.extended_dict()}
#
# return {'elastic_properties': ec_dicts}
#
# @classmethod
# def from_factory(cls):
# raise NotImplementedError('from factory method not yet implemented for piezoelasticworkflow')
class PiezoElasticFWWorkflowSRC(AbstractFWWorkflow):
workflow_class = 'PiezoElasticFWWorkflowSRC'
workflow_module = 'abiflows.fireworks.workflows.abinit_workflows'
def __init__(self, scf_inp_ibz, ddk_inp, rf_inp, spec=None, initialization_info=None,
ddk_split=False, rf_split=False, additional_controllers=None, additional_input_vars=None,
allow_parallel_perturbations=True, do_ddk=True, do_phonons=True):
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
fws = []
links_dict = {}
if additional_controllers is None:
additional_controllers = [WalltimeController(), MemoryController()]
else:
additional_controllers = additional_controllers
if additional_input_vars is None:
additional_input_vars = {}
# Dependencies for the ngfft grid (for some reason, the fft grid can change between SCF and nSCF runs
# even when all other parameters are the same ...)
ngfft_deps = ['#outnc.ngfft']
if scf_inp_ibz.ispaw:
ngfft_deps.append('#outnc.ngfftdg')
scf_inp_ibz.set_vars(additional_input_vars)
if do_ddk:
ddk_inp.set_vars(additional_input_vars)
rf_inp.set_vars(additional_input_vars)
if not do_ddk:
rf_inp.set_vars(irdddk=0)
#1. SCF run in the irreducible Brillouin Zone
scf_helper = ScfTaskHelper()
scf_controllers = [AbinitController.from_helper(scf_helper)]
scf_controllers.extend(additional_controllers)
scf_control_procedure = ControlProcedure(controllers=scf_controllers)
setup_scf_task = AbinitSetupTask(abiinput=scf_inp_ibz, task_helper=scf_helper, pass_input=True)
run_scf_task = AbinitRunTask(control_procedure=scf_control_procedure, task_helper=scf_helper,
task_type='scfibz')
control_scf_task = AbinitControlTask(control_procedure=scf_control_procedure, task_helper=scf_helper)
scf_fws = createSRCFireworks(setup_task=setup_scf_task, run_task=run_scf_task, control_task=control_scf_task,
spec=spec, initialization_info=initialization_info)
fws.extend(scf_fws['fws'])
links_dict_update(links_dict=links_dict, links_update=scf_fws['links_dict'])
#2. nSCF run in the full Brillouin Zone with kptopt 2
nscf_helper = NscfTaskHelper()
nscf_controllers = [AbinitController.from_helper(nscf_helper)]
nscf_controllers.extend(additional_controllers)
nscf_control_procedure = ControlProcedure(controllers=nscf_controllers)
nscf_inp_fbz = scf_inp_ibz.deepcopy()
nscf_inp_fbz.set_vars({'tolwfr': 1.0e-20,
'kptopt': 3,
'iscf': -2,
'istwfk': '*1'})
# Adding buffer to help convergence ...
if 'nbdbuf' not in nscf_inp_fbz:
nbdbuf = max(int(0.1*nscf_inp_fbz['nband']), 4)
nscf_inp_fbz.set_vars(nband=nscf_inp_fbz['nband']+nbdbuf, nbdbuf=nbdbuf)
nscffbz_deps = {run_scf_task.task_type: ['DEN']}
nscffbz_deps[run_scf_task.task_type].extend(ngfft_deps)
nscf_inp_fbz['prtvol'] = 10
setup_nscffbz_task = AbinitSetupTask(abiinput=nscf_inp_fbz, task_helper=nscf_helper,
deps=nscffbz_deps, pass_input=True)
run_nscffbz_task = AbinitRunTask(control_procedure=nscf_control_procedure, task_helper=nscf_helper,
task_type='nscffbz')
control_nscffbz_task = AbinitControlTask(control_procedure=nscf_control_procedure, task_helper=nscf_helper)
nscffbz_fws = createSRCFireworks(setup_task=setup_nscffbz_task, run_task=run_nscffbz_task,
control_task=control_nscffbz_task,
spec=spec, initialization_info=initialization_info)
fws.extend(nscffbz_fws['fws'])
links_dict_update(links_dict=links_dict, links_update=nscffbz_fws['links_dict'])
#Link with the IBZ SCF run
links_dict_update(links_dict=links_dict,
links_update={scf_fws['control_fw'].fw_id: nscffbz_fws['setup_fw'].fw_id})
#3. DDK calculation
if do_ddk:
if ddk_split:
raise NotImplementedError('Split Ddk to be implemented in PiezoElasticWorkflow ...')
else:
ddk_helper = DdkTaskHelper()
ddk_controllers = [AbinitController.from_helper(ddk_helper)]
ddk_controllers.extend(additional_controllers)
ddk_control_procedure = ControlProcedure(controllers=ddk_controllers)
ddk_inp.set_vars({'kptopt': 3})
ddk_deps = {run_nscffbz_task.task_type: ['WFK']}
ddk_deps[run_nscffbz_task.task_type].extend(ngfft_deps)
setup_ddk_task = AbinitSetupTask(abiinput=ddk_inp, task_helper=ddk_helper,
deps=ddk_deps)
run_ddk_task = AbinitRunTask(control_procedure=ddk_control_procedure, task_helper=ddk_helper,
task_type='ddk')
control_ddk_task = AbinitControlTask(control_procedure=ddk_control_procedure, task_helper=ddk_helper)
ddk_fws = createSRCFireworks(setup_task=setup_ddk_task, run_task=run_ddk_task,
control_task=control_ddk_task,
spec=spec, initialization_info=initialization_info)
fws.extend(ddk_fws['fws'])
links_dict_update(links_dict=links_dict, links_update=ddk_fws['links_dict'])
#Link with the FBZ nSCF run
links_dict_update(links_dict=links_dict,
links_update={nscffbz_fws['control_fw'].fw_id: ddk_fws['setup_fw'].fw_id})
#4. Response-Function calculation(s) of the elastic constants
rf_ddb_source_task_type = 'mrgddb-strains'
rf_tolvar, value = rf_inp.scf_tolvar
rf_tol = {rf_tolvar: value}
rf_deps = {run_nscffbz_task.task_type: ['WFK']}
if do_ddk:
rf_deps[run_ddk_task.task_type] = ['DDK']
previous_ddk_task_type = run_ddk_task.task_type
else:
previous_ddk_task_type = None
rf_deps[run_nscffbz_task.task_type].extend(ngfft_deps)
gen_task = GeneratePiezoElasticFlowFWSRCAbinitTask(previous_scf_task_type=run_nscffbz_task.task_type,
previous_ddk_task_type=previous_ddk_task_type,
mrgddb_task_type=rf_ddb_source_task_type,
additional_controllers=additional_controllers,
rf_tol=rf_tol, additional_input_vars=additional_input_vars,
rf_deps=rf_deps,
allow_parallel_perturbations=allow_parallel_perturbations,
do_phonons=do_phonons)
genrfstrains_spec = set_short_single_core_to_spec(spec)
gen_fw = Firework([gen_task], spec=genrfstrains_spec, name='gen-piezo-elast')
fws.append(gen_fw)
linkupdate = {nscffbz_fws['control_fw'].fw_id: gen_fw.fw_id}
if do_ddk:
linkupdate[ddk_fws['control_fw'].fw_id] = gen_fw.fw_id
links_dict_update(links_dict=links_dict,
links_update=linkupdate)
rf_ddb_src_fw = gen_fw
#5. Merge DDB files from response function (second derivatives for the elastic constants) and from the
# SCF run on the full Brillouin zone (first derivatives for the stress tensor, to be used for the
# stress-corrected elastic constants)
mrgddb_task = MergeDdbAbinitTask(ddb_source_task_types=[rf_ddb_source_task_type,
run_scf_task.task_type],
delete_source_ddbs=False, num_ddbs=2)
mrgddb_spec = set_short_single_core_to_spec(spec)
if scf_inp_ibz.ispaw:
mrgddb_spec['PAW_datasets_description_correction'] = 'yes'
mrgddb_fw = Firework(tasks=[mrgddb_task], spec=mrgddb_spec, name='mrgddb')
fws.append(mrgddb_fw)
links_dict_update(links_dict=links_dict,
links_update={rf_ddb_src_fw.fw_id: mrgddb_fw.fw_id,
scf_fws['control_fw'].fw_id: mrgddb_fw.fw_id})
#6. Anaddb task to get elastic constants based on the RF run (no stress correction)
anaddb_tag = 'anaddb-piezo-elast'
spec = set_short_single_core_to_spec(spec)
anaddb_task = AnaDdbAbinitTask(AnaddbInput.piezo_elastic(structure=scf_inp_ibz.structure,
stress_correction=False),
deps={rf_ddb_source_task_type: ['DDB']},
task_type=anaddb_tag)
anaddb_fw = Firework([anaddb_task],
spec=spec,
name=anaddb_tag)
fws.append(anaddb_fw)
links_dict_update(links_dict=links_dict,
links_update={rf_ddb_src_fw.fw_id: anaddb_fw.fw_id})
#7. Anaddb task to get elastic constants based on the RF run and the SCF run (with stress correction)
anaddb_tag = 'anaddb-piezo-elast-stress-corrected'
spec = set_short_single_core_to_spec(spec)
anaddb_stress_task = AnaDdbAbinitTask(AnaddbInput.piezo_elastic(structure=scf_inp_ibz.structure,
stress_correction=True),
deps={mrgddb_task.task_type: ['DDB']},
task_type=anaddb_tag)
anaddb_stress_fw = Firework([anaddb_stress_task],
spec=spec,
name=anaddb_tag)
fws.append(anaddb_stress_fw)
links_dict_update(links_dict=links_dict,
links_update={mrgddb_fw.fw_id: anaddb_stress_fw.fw_id})
self.wf = Workflow(fireworks=fws,
links_dict=links_dict,
metadata={'workflow_class': self.workflow_class,
'workflow_module': self.workflow_module})
def add_anaddb_task(self, structure):
spec = self.set_short_single_core_to_spec()
anaddb_task = AnaDdbAbinitTask(AnaddbInput.piezo_elastic(structure))
anaddb_fw = Firework([anaddb_task],
spec=spec,
name='anaddb')
append_fw_to_wf(anaddb_fw, self.wf)
@classmethod
def get_all_elastic_tensors(cls, wf):
assert wf.metadata['workflow_class'] == cls.workflow_class
assert wf.metadata['workflow_module'] == cls.workflow_module
anaddb_no_stress_id = None
anaddb_stress_id = None
for fw_id, fw in wf.id_fw.items():
if fw.name == 'anaddb-piezo-elast':
anaddb_no_stress_id = fw_id
if fw.name == 'anaddb-piezo-elast-stress-corrected':
anaddb_stress_id = fw_id
if anaddb_no_stress_id is None or anaddb_stress_id is None:
raise RuntimeError('Final anaddb tasks not found ...')
myfw_nostress = wf.id_fw[anaddb_no_stress_id]
last_launch_nostress = (myfw_nostress.archived_launches + myfw_nostress.launches)[-1]
myfw_nostress.tasks[-1].set_workdir(workdir=last_launch_nostress.launch_dir)
myfw_stress = wf.id_fw[anaddb_stress_id]
last_launch_stress = (myfw_stress.archived_launches + myfw_stress.launches)[-1]
myfw_stress.tasks[-1].set_workdir(workdir=last_launch_stress.launch_dir)
ec_nostress_clamped = myfw_nostress.tasks[-1].get_elastic_tensor(tensor_type='clamped_ion')
ec_nostress_relaxed = myfw_nostress.tasks[-1].get_elastic_tensor(tensor_type='relaxed_ion')
ec_stress_relaxed = myfw_stress.tasks[-1].get_elastic_tensor(tensor_type='relaxed_ion_stress_corrected')
ec_dicts = {'clamped_ion': ec_nostress_clamped.extended_dict(),
'relaxed_ion': ec_nostress_relaxed.extended_dict(),
'relaxed_ion_stress_corrected': ec_stress_relaxed.extended_dict()}
return {'elastic_properties': ec_dicts}
@classmethod
def from_factory(cls):
raise NotImplementedError('from factory method not yet implemented for piezoelasticworkflow')
class DfptFWWorkflow(AbstractFWWorkflow):
"""
Generator of a fireworks workflow for the calculation of various properties with DFPT.
Parallelization over all the perturbations.
N.B. Currently (version 8.8.3) anaddb does not support a DDB containing both 2nd order derivatives with qpoints
different from gamma AND 3rd order derivatives. The calculations could be run, but the global DDB will not
be directly usable as is in anaddb.
"""
workflow_class = 'DfptFWWorkflow'
workflow_module = 'abiflows.fireworks.workflows.abinit_workflows'
def __init__(self, scf_inp, ph_inp=None, nscf_inp=None, ddk_inp=None, dde_inp=None, strain_inp=None, dte_inp=None,
autoparal=False, spec=None, initialization_info=None):
"""
Args:
scf_inp: an |AbinitInput| for the SCF calculation.
ph_inp: a |MultiDataset| with the phonon inputs
nscf_inp: a |MultiDataset| with the wfq nscf inputs
ddk_inp: a |MultiDataset| with the ddk inputs
dde_inp: a |MultiDataset| with the dde inputs
strain_inp: a |MultiDataset| with the strain inputs
dte_inp: a |MultiDataset| with the non-linear inputs
autoparal: if True autoparal will be used at runtime to optimize the number of processes.
spec: a dict with additional spec for the Firework.
initialization_info: a dict defining additional information about the initialization of the workflow.
"""
if dte_inp is not None and dde_inp is None:
raise ValueError("non-linear calculations require at least the DDE")
if dde_inp is not None and ddk_inp is None:
raise ValueError("DDE calculations require the DDK")
spec = spec or {}
initialization_info = initialization_info or {}
start_task_index = 1
rf = self.get_reduced_formula(scf_inp)
scf_task = ScfFWTask(scf_inp, is_autoparal=autoparal)
spec = dict(spec)
spec['initialization_info'] = initialization_info
if autoparal:
spec = self.set_short_single_core_to_spec(spec)
start_task_index = 'autoparal'
spec['wf_task_index'] = 'scf_' + str(start_task_index)
self.scf_fw = Firework(scf_task, spec=spec, name=rf+"_"+scf_task.task_type)
previous_task_type = scf_task.task_type
nscf_fws = []
if nscf_inp is not None:
nscf_fws, nscf_fw_deps = self.get_fws(nscf_inp, NscfWfqFWTask,
{previous_task_type: "DEN"}, spec)
self.has_gamma = False
ph_fws = []
ph_fw_deps = {}
if ph_inp:
ph_inp.set_vars(prtwf=-1)
# 1WF files from gamma are need for non linear perturbations
if dte_inp:
for inp in ph_inp:
if np.array_equal(inp["qpt"], [0, 0, 0]):
inp['prtwf'] = 1
ph_fws, ph_fw_deps = self.get_fws(ph_inp, PhononTask, {previous_task_type: "WFK"}, spec, nscf_fws)
if any(np.array_equal(ph_fw.tasks[0].abiinput["qpt"], [0, 0, 0]) for ph_fw in ph_fws):
self.has_gamma = True
ddk_fws = []
if ddk_inp:
ddk_fws, ddk_fw_deps = self.get_fws(ddk_inp, DdkTask, {previous_task_type: "WFK"}, spec)
dde_fws = []
if dde_inp:
if not dte_inp:
dde_inp.set_vars(prtwf=-1)
dde_fws, dde_fw_deps = self.get_fws(dde_inp, DdeTask,
{previous_task_type: "WFK", DdkTask.task_type: "DDK"}, spec)
strain_fws = []
if strain_inp:
strain_inp.set_vars(prtwf=-1)
strain_file_deps = {previous_task_type: "WFK"}
if ddk_inp:
strain_file_deps[DdkTask.task_type] = "DDK"
strain_fws, strain_fw_deps = self.get_fws(strain_inp, StrainPertTask,
strain_file_deps, spec)
dte_fws = []
if dte_inp:
dte_deps = {ScfFWTask.task_type: ["WFK", "DEN"], DdeTask.task_type: ["1WF", "1DEN"]}
if ph_inp:
dte_deps[PhononTask.task_type] = ["1WF", "1DEN"]
dte_fws, dte_fw_deps = self.get_fws(dte_inp, DteTask, dte_deps, spec)
mrgddb_spec = dict(spec)
mrgddb_spec['wf_task_index'] = 'mrgddb'
self.set_short_single_core_to_spec(mrgddb_spec)
mrgddb_spec['mpi_ncpus'] = 1
# Set a higher priority to favour the end of the WF
mrgddb_spec['_priority'] = 10
num_ddbs_to_be_merged = len(ph_fws) + len(dde_fws) + len(strain_fws) + len(dte_fws) + 1
mrgddb_fw = Firework(MergeDdbAbinitTask(num_ddbs=num_ddbs_to_be_merged, delete_source_ddbs=False), spec=mrgddb_spec,
name=scf_inp.structure.composition.reduced_formula+'_mergeddb')
fws_deps = defaultdict(list)
autoparal_fws = []
if autoparal:
# add an AutoparalTask for each type and relative dependencies
ref_inp = None
for inp in [ph_inp, ddk_inp, dde_inp, strain_inp]:
if inp:
ref_inp = inp[0]
break
dfpt_autoparal_fw = self.get_autoparal_fw(ref_inp, 'dfpt', {previous_task_type: "WFK"}, spec,
nscf_fws)[0]
autoparal_fws.append(dfpt_autoparal_fw)
fws_deps[dfpt_autoparal_fw] = ph_fws + ddk_fws + dde_fws + strain_fws
fws_deps[self.scf_fw].append(dfpt_autoparal_fw)
if dte_fws:
# being a fake autoparal it doesn't need to follow the dde tasks. No other dependencies are enforced.
dte_autoparal_fw = self.get_autoparal_fw(None, 'dte', None, spec,
formula=dte_inp[0].structure.composition.reduced_formula)[0]
autoparal_fws.append(dte_autoparal_fw)
fws_deps[dte_autoparal_fw] = dte_fws
if nscf_fws:
nscf_autoparal_fw = self.get_autoparal_fw(nscf_inp[0], 'nscf',
{previous_task_type: "DEN"}, spec)[0]
fws_deps[nscf_autoparal_fw] = nscf_fws
autoparal_fws.append(nscf_autoparal_fw)
fws_deps[self.scf_fw].append(nscf_autoparal_fw)
if ddk_fws and (dde_fws or strain_fws):
for ddk_fw in ddk_fws:
fws_deps[ddk_fw] = dde_fws + strain_fws
if dte_fws:
for dde_fw in dde_fws:
fws_deps[dde_fw] = list(dte_fws)
if ph_fws:
for ph_fw in ph_fws:
if np.array_equal(ph_fw.tasks[0].abiinput["qpt"], [0, 0, 0]):
fws_deps[ph_fw] = list(dte_fws)
# all the abinit related FWs should depend on the scf calculation for the WFK
abinit_fws = nscf_fws + ddk_fws + dde_fws + ph_fws + strain_fws + dte_fws
fws_deps[self.scf_fw].extend(abinit_fws)
ddb_fws = [self.scf_fw] + dde_fws + ph_fws + strain_fws + dte_fws
#TODO pass all the tasks to the MergeDdbTask for logging or easier retrieve of the DDK?
for ddb_fw in ddb_fws:
if ddb_fw in fws_deps:
fws_deps[ddb_fw].append(mrgddb_fw)
else:
fws_deps[ddb_fw] = mrgddb_fw
total_list_fws = ddb_fws+ddk_fws+[mrgddb_fw] + nscf_fws + autoparal_fws
fws_deps.update(ph_fw_deps)
self.ph_fws = ph_fws
self.ddk_fws = ddk_fws
self.dde_fws = dde_fws
self.strain_fws = strain_fws
self.dte_fws = dte_fws
self.mrgddb_fw = mrgddb_fw
self.wf = Workflow(total_list_fws, links_dict=fws_deps,
metadata={'workflow_class': self.workflow_class,
'workflow_module': self.workflow_module})
def get_fws(self, multi_inp, task_class, deps, spec, nscf_fws=None):
"""
Prepares the fireworks for a specific type of calculation
Args:
multi_inp: |MultiDataset| with the inputs that should be run
task_class: class of the tasks that should be generated
deps: dict with the dependencies already set for this type of task
spec: spec for the new Fireworks that will be created
nscf_fws: list of NSCF fws for the calculation of WFQ files, in case they are present.
Will be linked if needed.
Returns:
(tuple): tuple containing:
- fws (list): The list of new Fireworks.
- fw_deps (dict): The dependencies related to these fireworks.
Should be used when generating the workflow.
"""
formula = multi_inp[0].structure.composition.reduced_formula
fws = []
fw_deps = defaultdict(list)
for i, inp in enumerate(multi_inp):
spec = dict(spec)
start_task_index = 1
current_deps = dict(deps)
parent_fw = None
if nscf_fws:
qpt = inp['qpt']
for nscf_fw in nscf_fws:
if np.allclose(nscf_fw.tasks[0].abiinput['qpt'], qpt):
parent_fw = nscf_fw
current_deps[nscf_fw.tasks[0].task_type] = "WFQ"
break
task = task_class(inp, deps=current_deps, is_autoparal=False)
# this index is for the different task, each performing a different perturbation
indexed_task_type = task_class.task_type + '_' + str(i)
# this index is to index the restarts of the single task
spec['wf_task_index'] = indexed_task_type + '_' + str(start_task_index)
fw = Firework(task, spec=spec, name=(formula + '_' + indexed_task_type)[:15])
fws.append(fw)
if parent_fw is not None:
fw_deps[parent_fw].append(fw)
return fws, fw_deps
def get_autoparal_fw(self, inp, task_type, deps, spec, nscf_fws=None, formula=None):
"""
Prepares a single Firework containing an AutoparalTask to perform the autoparal for each group of calculations.
Args:
inp: one of the inputs for which the autoparal should be run
task_type: task_type of the class for the current type of calculation
deps: dict with the dependencies already set for this type of task
spec: spec for the new Fireworks that will be created
nscf_fws: list of NSCF fws for the calculation of WFQ files, in case they are present.
Will be linked if needed.
Returns:
(tuple): tuple containing:
- fws (list): The new Firework.
- fw_deps (dict): The dependencies between related to this firework.
Should be used when generating the workflow.
"""
if formula is None:
formula = inp.structure.composition.reduced_formula
if deps is None:
deps = {}
fw_deps = defaultdict(list)
spec = dict(spec)
current_deps = dict(deps)
parent_fw = None
if nscf_fws:
qpt = inp['qpt']
for nscf_fw in nscf_fws:
if np.allclose(nscf_fw.tasks[0].abiinput['qpt'], qpt):
parent_fw = nscf_fw
current_deps[nscf_fw.tasks[0].task_type] = "WFQ"
break
task = AutoparalTask(inp, deps=current_deps, forward_spec=True)
# this index is for the different task, each performing a different perturbation
indexed_task_type = AutoparalTask.task_type
# this index is to index the restarts of the single task
spec['wf_task_index'] = indexed_task_type + '_' + task_type
fw = Firework(task, spec=spec, name=(formula + '_' + indexed_task_type)[:15])
if parent_fw is not None:
fw_deps[parent_fw].append(fw)
return fw, fw_deps
@classmethod
def from_factory(cls, structure, pseudos, kppa=None, ecut=None, pawecutdg=None, nband=None, accuracy="normal",
spin_mode="polarized", smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None,
shift_mode="Symmetric", ph_ngqpt=None, qpoints=None, qppa=None, do_ddk=True, do_dde=True,
do_strain=True, do_dte=False, scf_tol=None, ph_tol=None, ddk_tol=None, dde_tol=None, wfq_tol=None,
strain_tol=None, skip_dte_permutations=False, extra_abivars=None, decorators=None, autoparal=False,
spec=None, initialization_info=None, manager=None):
"""
Creates an instance of DfptFWWorkflow using the scf_for_phonons and dfpt_from_gsinput factory functions.
See the description of the factories for the definition of the arguments.
The manager can be a TaskManager or a FWTaskManager.
"""
if initialization_info is None:
initialization_info = {}
if 'kppa' not in initialization_info:
initialization_info['kppa'] = kppa
scf_inp = scf_for_phonons(structure=structure, pseudos=pseudos, kppa=kppa, ecut=ecut, pawecutdg=pawecutdg,
nband=nband, accuracy=accuracy, spin_mode=spin_mode, smearing=smearing,
charge=charge, scf_algorithm=scf_algorithm, shift_mode=shift_mode)
return cls.from_gs_input(scf_inp, ph_ngqpt=ph_ngqpt, qpoints=qpoints, qppa=qppa, do_ddk=do_ddk, do_dde=do_dde,
do_strain=do_strain, do_dte=do_dte, scf_tol=scf_tol, ph_tol=ph_tol, ddk_tol=ddk_tol,
dde_tol=dde_tol, strain_tol=strain_tol, skip_dte_permutations=skip_dte_permutations,
extra_abivars=extra_abivars, decorators=decorators, autoparal=autoparal, spec=spec,
initialization_info=initialization_info, manager=manager)
@classmethod
def from_gs_input(cls, gs_input, structure=None, ph_ngqpt=None, qpoints=None, qppa=None, do_ddk=True,
do_dde=True, do_strain=True, do_dte=False, scf_tol=None, ph_tol=None, ddk_tol=None, dde_tol=None,
wfq_tol=None, strain_tol=None, skip_dte_permutations=False, extra_abivars=None, decorators=None,
autoparal=False, spec=None, initialization_info=None, manager=None):
"""
Creates an instance of DfptFWWorkflow using a custom |AbinitInput| for a ground state calculation and the
dfpt_from_gsinput factory function. Tolerances for the scf will be set accordingly to scf_tol (with
default 1e-22) and keys relative to relaxation and parallelization will be removed from gs_input.
See the description of the dfpt_from_gsinput factory for the definition of the arguments.
The manager can be a TaskManager or a FWTaskManager.
"""
if extra_abivars is None:
extra_abivars = {}
if decorators is None:
decorators = []
if spec is None:
spec = {}
if initialization_info is None:
initialization_info = {}
if isinstance(manager, FWTaskManager):
manager = manager.task_manager
if manager is None:
raise ValueError("A TaskManager is required in the FWTaskManager.")
if qppa is not None and (ph_ngqpt is not None or qpoints is not None):
raise ValueError("qppa is incompatible with ph_ngqpt and qpoints")
if qppa is not None:
if structure is None:
structure = gs_input.structure
initialization_info['qppa'] = qppa
ph_ngqpt = KSampling.automatic_density(structure, qppa, chksymbreak=0).kpts[0]
initialization_info['ngqpt'] = ph_ngqpt
initialization_info['qpoints'] = qpoints
initialization_info['do_strain'] = do_strain
initialization_info['do_dte'] = do_dte
scf_inp = gs_input.deepcopy()
if structure:
scf_inp.set_structure(structure)
if scf_tol:
scf_inp.update(scf_tol)
else:
scf_inp['tolwfr'] = 1.e-22
scf_inp['chksymbreak'] = 1
if not scf_inp.get('nbdbuf', 0):
scf_inp['nbdbuf'] = 4
scf_inp['nband'] = scf_inp['nband'] + 4
abi_vars = get_abinit_variables()
# remove relaxation variables in case gs_input is a relaxation
for v in abi_vars.vars_with_varset('rlx'):
scf_inp.pop(v.name, None)
# remove parallelization variables in case gs_input is coming from a previous run with parallelization
for v in abi_vars.vars_with_varset('paral'):
scf_inp.pop(v.name, None)
scf_inp.set_vars(extra_abivars)
for d in decorators:
d(scf_inp)
multi = dfpt_from_gsinput(scf_inp, ph_ngqpt=ph_ngqpt, qpoints=qpoints, do_ddk=do_ddk, do_dde=do_dde,
do_strain=do_strain, do_dte=do_dte, ph_tol=ph_tol, ddk_tol=ddk_tol, dde_tol=dde_tol,
wfq_tol=wfq_tol, strain_tol=strain_tol, skip_dte_permutations=skip_dte_permutations,
manager=manager)
ph_inp = multi.filter_by_tags(atags.PH_Q_PERT)
nscf_inp = multi.filter_by_tags(atags.NSCF)
ddk_inp = multi.filter_by_tags(atags.DDK)
dde_inp = multi.filter_by_tags(atags.DDE)
strain_inp = multi.filter_by_tags(atags.STRAIN)
dte_inp = multi.filter_by_tags(atags.DTE)
dfpt_wf = cls(scf_inp, ph_inp, nscf_inp, ddk_inp, dde_inp, strain_inp, dte_inp, autoparal=autoparal,
spec=spec, initialization_info=initialization_info)
return dfpt_wf
@classmethod
def get_mongoengine_results(cls, wf):
"""
Generates the PhononResult mongoengine document containing the results of the calculation.
The workflow should have been generated from this class and requires an open connection to the
fireworks database and access to the file system containing the calculations.
Args:
wf: the fireworks Workflow instance of the workflow.
Returns:
A PhononResult document.
"""
assert wf.metadata['workflow_class'] == cls.workflow_class
assert wf.metadata['workflow_module'] == cls.workflow_module
scf_index = 0
ph_index = 0
ddk_index = 0
dde_index = 0
wfq_index = 0
strain_index = 0
dte_index = 0
scf_fw, ph_fw, wfq_fw, ddk_fw, dde_fw, strain_fw, dte_fw = None, None, None, None, None, None, None
anaddb_task = None
for fw in wf.fws:
task_index = fw.spec.get('wf_task_index', '')
if task_index == 'anaddb':
anaddb_launch = get_last_completed_launch(fw)
anaddb_task = fw.tasks[-1]
anaddb_task.set_workdir(workdir=anaddb_launch.launch_dir)
elif task_index == 'mrgddb':
mrgddb_launch = get_last_completed_launch(fw)
mrgddb_task = fw.tasks[-1]
mrgddb_task.set_workdir(workdir=mrgddb_launch.launch_dir)
elif task_index.startswith('scf_') and not task_index.endswith('autoparal'):
current_index = int(task_index.split('_')[-1])
if current_index > scf_index:
scf_index = current_index
scf_fw = fw
elif task_index.startswith('phonon_0') and not task_index.endswith('autoparal'):
current_index = int(task_index.split('_')[-1])
if current_index > ph_index:
ph_index = current_index
ph_fw = fw
elif task_index.startswith('ddk_0') and not task_index.endswith('autoparal'):
current_index = int(task_index.split('_')[-1])
if current_index > ddk_index:
ddk_index = current_index
ddk_fw = fw
elif task_index.startswith('dde_0') and not task_index.endswith('autoparal'):
current_index = int(task_index.split('_')[-1])
if current_index > dde_index:
dde_index = current_index
dde_fw = fw
elif task_index.startswith('nscf_wfq_0') and not task_index.endswith('autoparal'):
current_index = int(task_index.split('_')[-1])
if current_index > wfq_index:
wfq_index = current_index
wfq_fw = fw
elif task_index.startswith('strain_pert_0') and not task_index.endswith('autoparal'):
current_index = int(task_index.split('_')[-1])
if current_index > strain_index:
strain_index = current_index
strain_fw = fw
elif task_index.startswith('dte_0') and not task_index.endswith('autoparal'):
current_index = int(task_index.split('_')[-1])
if current_index > dte_index:
dte_index = current_index
dte_fw = fw
scf_launch = get_last_completed_launch(scf_fw)
with open(os.path.join(scf_launch.launch_dir, 'history.json'), "rt") as fh:
scf_history = json.load(fh, cls=MontyDecoder)
scf_task = scf_fw.tasks[-1]
scf_task.set_workdir(workdir=scf_launch.launch_dir)
document = DfptResult()
document.has_phonons = ph_fw is not None
document.has_ddk = ddk_fw is not None
document.has_dde = dde_fw is not None
document.has_strain = strain_fw is not None
document.has_dte = dte_fw is not None
gs_input = scf_history.get_events_by_types(TaskEvent.FINALIZED)[0].details['final_input']
document.abinit_input.gs_input = gs_input.as_dict()
document.abinit_input.set_abinit_basic_from_abinit_input(gs_input)
structure = gs_input.structure
document.abinit_output.structure = structure.as_dict()
document.set_material_data_from_structure(structure)
initialization_info = scf_history.get_events_by_types(TaskEvent.INITIALIZED)[0].details.get('initialization_info', {})
document.mp_id = initialization_info.get('mp_id', None)
document.custom = initialization_info.get("custom", None)
document.relax_db = initialization_info['relax_db'].as_dict() if 'relax_db' in initialization_info else None
document.relax_id = initialization_info.get('relax_id', None)
document.abinit_input.ngqpt = initialization_info.get('ngqpt', None)
document.abinit_input.qpoints = initialization_info.get('qpoints', None)
document.abinit_input.qppa = initialization_info.get('qppa', None)
document.abinit_input.kppa = initialization_info.get('kppa', None)
document.abinit_input.pseudopotentials.set_pseudos_from_files_file(scf_task.files_file.path,
len(structure.composition.elements))
document.created_on = datetime.datetime.now()
document.modified_on = datetime.datetime.now()
document.set_dir_names_from_fws_wf(wf)
# read in binary for py3k compatibility with mongoengine
with open(mrgddb_task.merged_ddb_path, "rb") as f:
document.abinit_output.ddb.put(f)
if ph_index > 0:
ph_task = ph_fw.tasks[-1]
document.abinit_input.phonon_input = ph_task.abiinput.as_dict()
if ddk_index > 0:
ddk_task = ddk_fw.tasks[-1]
document.abinit_input.ddk_input = ddk_task.abiinput.as_dict()
if dde_index > 0:
dde_task = dde_fw.tasks[-1]
document.abinit_input.dde_input = dde_task.abiinput.as_dict()
if wfq_index > 0:
wfq_task = wfq_fw.tasks[-1]
document.abinit_input.wfq_input = wfq_task.abiinput.as_dict()
if strain_index > 0:
strain_task = strain_fw.tasks[-1]
document.abinit_input.strain_input = strain_task.abiinput.as_dict()
if dte_index > 0:
dte_task = dte_fw.tasks[-1]
document.abinit_input.dte_input = dte_task.abiinput.as_dict()
if anaddb_task is not None:
with open(anaddb_task.anaddb_nc_path, "rb") as f:
document.abinit_output.anaddb_nc.put(f)
if os.path.isfile(anaddb_task.phbst_path):
with open(anaddb_task.phbst_path, "rb") as f:
document.abinit_output.phonon_bs.put(f)
if os.path.isfile(anaddb_task.phdos_path):
with open(anaddb_task.phdos_path, "rb") as f:
document.abinit_output.phonon_dos.put(f)
document.fw_id = scf_fw.fw_id
document.time_report = get_time_report_for_wf(wf).as_dict()
with open(scf_task.gsr_path, "rb") as f:
document.abinit_output.gs_gsr.put(f)
# read in binary for py3k compatibility with mongoengine
with open(scf_task.output_file.path, "rb") as f:
document.abinit_output.gs_outfile.put(f)
return document
def add_anaddb_dfpt_fw(self, structure, ph_ngqpt=None, ndivsm=20, nqsmall=15, qppa=None, line_density=None):
"""
Appends a Firework with a task for the calculation of various properties with anaddb.
Notice that in the current abinit version, the presence of the third order derivatives is incompatible with
the presence q-points different from gamma in the DDB and first order derivative. Anaddb calculation can
either fail or produce meaningless results.
Args:
structure: the input structure
ngqpt: Monkhorst-Pack divisions for the phonon Q-mesh (coarse one)
nqsmall: Used to generate the (dense) mesh for the DOS.
It defines the number of q-points used to sample the smallest lattice vector.
ndivsm: Used to generate a normalized path for the phonon bands.
If gives the number of divisions for the smallest segment of the path.
"""
anaddb_input = AnaddbInput.dfpt(structure, ngqpt=ph_ngqpt, relaxed_ion=self.has_gamma,
piezo=len(self.strain_fws) > 0 and len(self.dde_fws) > 0,
dde=len(self.dde_fws) > 0, strain=len(self.strain_fws) > 0,
dte=len(self.dte_fws) > 0, stress_correction=True,
nqsmall=nqsmall, qppa=qppa, ndivsm=ndivsm, line_density=line_density,
q1shft=(0, 0, 0), asr=2, chneut=1, dipdip=1, dos_method="tetra")
anaddb_task = AnaDdbAbinitTask(anaddb_input, deps={MergeDdbAbinitTask.task_type: "DDB"})
spec = dict(self.scf_fw.spec)
spec['wf_task_index'] = 'anaddb'
anaddb_fw = Firework(anaddb_task, spec=spec, name='anaddb')
self.append_fw(anaddb_fw, short_single_spec=True)
|
davidwaroquiers/abiflows
|
abiflows/fireworks/workflows/abinit_workflows.py
|
Python
|
gpl-2.0
| 154,521
|
[
"ABINIT"
] |
f3ac8b3d6a2b138bd0413e9e7dc766cfb07dd646fadad2571517ab67c34ab5ef
|
"""Redis Key Commands Mixin"""
from tornado import concurrent
# Python 2 support for ascii()
if 'ascii' not in dir(__builtins__): # pragma: nocover
from tredis.compat import ascii
class KeysMixin(object):
"""Redis Key Commands Mixin"""
def delete(self, *keys):
"""Removes the specified keys. A key is ignored if it does not exist.
Returns :data:`True` if all keys are removed.
.. note::
**Time complexity**: ``O(N)`` where ``N`` is the number of keys that
will be removed. When a key to remove holds a value other than a
string, the individual complexity for this key is ``O(M)`` where
``M`` is the number of elements in the list, set, sorted set or
hash. Removing a single key that holds a string value is ``O(1)``.
:param keys: One or more keys to remove
:type keys: :class:`str`, :class:`bytes`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'DEL'] + list(keys), len(keys))
def dump(self, key):
"""Serialize the value stored at key in a Redis-specific format and
return it to the user. The returned value can be synthesized back into
a Redis key using the :meth:`~tredis.RedisClient.restore` command.
The serialization format is opaque and non-standard, however it has a
few semantic characteristics:
- It contains a 64-bit checksum that is used to make sure errors
will be detected. The :meth:`~tredis.RedisClient.restore` command
makes sure to check the checksum before synthesizing a key using
the serialized value.
- Values are encoded in the same format used by RDB.
- An RDB version is encoded inside the serialized value, so that
different Redis versions with incompatible RDB formats will
refuse to process the serialized value.
- The serialized value does NOT contain expire information. In
order to capture the time to live of the current value the
:meth:`~tredis.RedisClient.pttl` command should be used.
If key does not exist :data:`None` is returned.
.. note::
**Time complexity**: ``O(1)`` to access the key and additional
``O(N*M)`` to serialized it, where N is the number of Redis objects
composing the value and ``M`` their average size. For small string
values the time complexity is thus ``O(1)+O(1*M)`` where ``M`` is
small, so simply ``O(1)``.
:param key: The key to dump
:type key: :class:`str`, :class:`bytes`
:rtype: bytes, None
"""
return self._execute([b'DUMP', key])
def exists(self, key):
"""Returns :data:`True` if the key exists.
.. note::
**Time complexity**: ``O(1)``
**Command Type**: String
:param key: One or more keys to check for
:type key: :class:`str`, :class:`bytes`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'EXISTS', key], 1)
def expire(self, key, timeout):
"""Set a timeout on key. After the timeout has expired, the key will
automatically be deleted. A key with an associated timeout is often
said to be volatile in Redis terminology.
The timeout is cleared only when the key is removed using the
:meth:`~tredis.RedisClient.delete` method or overwritten using the
:meth:`~tredis.RedisClient.set` or :meth:`~tredis.RedisClient.getset`
methods. This means that all the operations that conceptually alter the
value stored at the key without replacing it with a new one will leave
the timeout untouched. For instance, incrementing the value of a key
with :meth:`~tredis.RedisClient.incr`, pushing a new value into a
list with :meth:`~tredis.RedisClient.lpush`, or altering the field
value of a hash with :meth:`~tredis.RedisClient.hset` are all
operations that will leave the timeout untouched.
The timeout can also be cleared, turning the key back into a persistent
key, using the :meth:`~tredis.RedisClient.persist` method.
If a key is renamed with :meth:`~tredis.RedisClient.rename`,
the associated time to live is transferred to the new key name.
If a key is overwritten by :meth:`~tredis.RedisClient.rename`, like in
the case of an existing key ``Key_A`` that is overwritten by a call
like ``client.rename(Key_B, Key_A)`` it does not matter if the original
``Key_A`` had a timeout associated or not, the new key ``Key_A`` will
inherit all the characteristics of ``Key_B``.
.. note::
**Time complexity**: ``O(1)``
:param key: The key to set an expiration for
:type key: :class:`str`, :class:`bytes`
:param int timeout: The number of seconds to set the timeout to
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute(
[b'EXPIRE', key, ascii(timeout).encode('ascii')], 1)
def expireat(self, key, timestamp):
""":meth:`~tredis.RedisClient.expireat` has the same effect and
semantic as :meth:`~tredis.RedisClient.expire`, but instead of
specifying the number of seconds representing the TTL (time to live),
it takes an absolute Unix timestamp (seconds since January 1, 1970).
Please for the specific semantics of the command refer to the
documentation of :meth:`~tredis.RedisClient.expire`.
.. note::
**Time complexity**: ``O(1)``
:param key: The key to set an expiration for
:type key: :class:`str`, :class:`bytes`
:param int timestamp: The UNIX epoch value for the expiration
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute(
[b'EXPIREAT', key,
ascii(timestamp).encode('ascii')], 1)
def keys(self, pattern):
"""Returns all keys matching pattern.
While the time complexity for this operation is ``O(N)``, the constant
times are fairly low. For example, Redis running on an entry level
laptop can scan a 1 million key database in 40 milliseconds.
.. warning:: Consider :meth:`~tredis.RedisClient.keys` as a
command that should only be used in production environments with
extreme care. It may ruin performance when it is executed against
large databases. This command is intended for debugging and special
operations, such as changing your keyspace layout. Don't use
:meth:`~tredis.RedisClient.keys` in your regular application code.
If you're looking for a way to find keys in a subset of your
keyspace, consider using :meth:`~tredis.RedisClient.scan` or sets.
Supported glob-style patterns:
- ``h?llo`` matches ``hello``, ``hallo`` and ``hxllo``
- ``h*llo`` matches ``hllo`` and ``heeeello``
- ``h[ae]llo`` matches ``hello`` and ``hallo``, but not ``hillo``
- ``h[^e]llo`` matches ``hallo``, ``hbllo``, but not ``hello``
- ``h[a-b]llo`` matches ``hallo`` and ``hbllo``
Use a backslash (``\``) to escape special characters if you want to
match them verbatim.
.. note::
**Time complexity**: ``O(N)``
:param pattern: The pattern to use when looking for keys
:type pattern: :class:`str`, :class:`bytes`
:rtype: list
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'KEYS', pattern])
def migrate(self,
host,
port,
key,
destination_db,
timeout,
copy=False,
replace=False):
"""Atomically transfer a key from a source Redis instance to a
destination Redis instance. On success the key is deleted from the
original instance and is guaranteed to exist in the target instance.
The command is atomic and blocks the two instances for the time
required to transfer the key, at any given time the key will appear to
exist in a given instance or in the other instance, unless a timeout
error occurs.
.. note::
**Time complexity**: This command actually executes a DUMP+DEL in
the source instance, and a RESTORE in the target instance. See the
pages of these commands for time complexity. Also an ``O(N)`` data
transfer between the two instances is performed.
:param host: The host to migrate the key to
:type host: bytes, str
:param int port: The port to connect on
:param key: The key to migrate
:type key: bytes, str
:param int destination_db: The database number to select
:param int timeout: The maximum idle time in milliseconds
:param bool copy: Do not remove the key from the local instance
:param bool replace: Replace existing key on the remote instance
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
command = [
b'MIGRATE', host,
ascii(port).encode('ascii'), key,
ascii(destination_db).encode('ascii'),
ascii(timeout).encode('ascii')
]
if copy is True:
command.append(b'COPY')
if replace is True:
command.append(b'REPLACE')
return self._execute(command, b'OK')
def move(self, key, db):
"""Move key from the currently selected database (see
:meth:`~tredis.RedisClient.select`) to the specified destination
database. When key already exists in the destination database, or it
does not exist in the source database, it does nothing. It is possible
to use :meth:`~tredis.RedisClient.move` as a locking primitive because
of this.
.. note::
**Time complexity**: ``O(1)``
:param key: The key to move
:type key: :class:`str`, :class:`bytes`
:param int db: The database number
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'MOVE', key, ascii(db).encode('ascii')], 1)
def object_encoding(self, key):
"""Return the kind of internal representation used in order to store
the value associated with a key
.. note::
**Time complexity**: ``O(1)``
:param key: The key to get the encoding for
:type key: :class:`str`, :class:`bytes`
:rtype: bytes
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'OBJECT', b'ENCODING', key])
def object_idle_time(self, key):
"""Return the number of seconds since the object stored at the
specified key is idle (not requested by read or write operations).
While the value is returned in seconds the actual resolution of this
timer is 10 seconds, but may vary in future implementations of Redis.
.. note::
**Time complexity**: ``O(1)``
:param key: The key to get the idle time for
:type key: :class:`str`, :class:`bytes`
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'OBJECT', b'IDLETIME', key])
def object_refcount(self, key):
"""Return the number of references of the value associated with the
specified key. This command is mainly useful for debugging.
.. note::
**Time complexity**: ``O(1)``
:param key: The key to get the refcount for
:type key: :class:`str`, :class:`bytes`
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'OBJECT', b'REFCOUNT', key])
def persist(self, key):
"""Remove the existing timeout on key, turning the key from volatile
(a key with an expire set) to persistent (a key that will never expire
as no timeout is associated).
.. note::
**Time complexity**: ``O(1)``
:param key: The key to move
:type key: :class:`str`, :class:`bytes`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'PERSIST', key], 1)
def pexpire(self, key, timeout):
"""This command works exactly like :meth:`~tredis.RedisClient.pexpire`
but the time to live of the key is specified in milliseconds instead of
seconds.
.. note::
**Time complexity**: ``O(1)``
:param key: The key to set an expiration for
:type key: :class:`str`, :class:`bytes`
:param int timeout: The number of milliseconds to set the timeout to
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute(
[b'PEXPIRE', key, ascii(timeout).encode('ascii')], 1)
def pexpireat(self, key, timestamp):
""":meth:`~tredis.RedisClient.pexpireat` has the same effect and
semantic as :meth:`~tredis.RedisClient.expireat`, but the Unix time
at which the key will expire is specified in milliseconds instead of
seconds.
.. note::
**Time complexity**: ``O(1)``
:param key: The key to set an expiration for
:type key: :class:`str`, :class:`bytes`
:param int timestamp: The expiration UNIX epoch value in milliseconds
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute(
[b'PEXPIREAT', key,
ascii(timestamp).encode('ascii')], 1)
def pttl(self, key):
"""Like :meth:`~tredis.RedisClient.ttl` this command returns the
remaining time to live of a key that has an expire set, with the sole
difference that :meth:`~tredis.RedisClient.ttl` returns the amount of
remaining time in seconds while :meth:`~tredis.RedisClient.pttl`
returns it in milliseconds.
In Redis 2.6 or older the command returns ``-1`` if the key does not
exist or if the key exist but has no associated expire.
Starting with Redis 2.8 the return value in case of error changed:
- The command returns ``-2`` if the key does not exist.
- The command returns ``-1`` if the key exists but has no associated
expire.
.. note::
**Time complexity**: ``O(1)``
:param key: The key to get the PTTL for
:type key: :class:`str`, :class:`bytes`
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'PTTL', key])
def randomkey(self):
"""Return a random key from the currently selected database.
.. note::
**Time complexity**: ``O(1)``
:rtype: bytes
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'RANDOMKEY'])
def rename(self, key, new_key):
"""Renames ``key`` to ``new_key``. It returns an error when the source
and destination names are the same, or when ``key`` does not exist.
If ``new_key`` already exists it is overwritten, when this happens
:meth:`~tredis.RedisClient.rename` executes an implicit
:meth:`~tredis.RedisClient.delete` operation, so if the deleted key
contains a very big value it may cause high latency even if
:meth:`~tredis.RedisClient.rename` itself is usually a constant-time
operation.
.. note::
**Time complexity**: ``O(1)``
:param key: The key to rename
:type key: :class:`str`, :class:`bytes`
:param new_key: The key to rename it to
:type new_key: :class:`str`, :class:`bytes`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'RENAME', key, new_key], b'OK')
def renamenx(self, key, new_key):
"""Renames ``key`` to ``new_key`` if ``new_key`` does not yet exist.
It returns an error under the same conditions as
:meth:`~tredis.RedisClient.rename`.
.. note::
**Time complexity**: ``O(1)``
:param key: The key to rename
:type key: :class:`str`, :class:`bytes`
:param new_key: The key to rename it to
:type new_key: :class:`str`, :class:`bytes`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'RENAMENX', key, new_key], 1)
def restore(self, key, ttl, value, replace=False):
"""Create a key associated with a value that is obtained by
deserializing the provided serialized value (obtained via
:meth:`~tredis.RedisClient.dump`).
If ``ttl`` is ``0`` the key is created without any expire, otherwise
the specified expire time (in milliseconds) is set.
:meth:`~tredis.RedisClient.restore` will return a
``Target key name is busy`` error when key already exists unless you
use the :meth:`~tredis.RedisClient.restore` modifier (Redis 3.0 or
greater).
:meth:`~tredis.RedisClient.restore` checks the RDB version and data
checksum. If they don't match an error is returned.
.. note::
**Time complexity**: ``O(1)`` to create the new key and additional
``O(N*M)`` to reconstruct the serialized value, where ``N`` is the
number of Redis objects composing the value and ``M`` their average
size. For small string values the time complexity is thus
``O(1)+O(1*M)`` where ``M`` is small, so simply ``O(1)``. However
for sorted set values the complexity is ``O(N*M*log(N))`` because
inserting values into sorted sets is ``O(log(N))``.
:param key: The key to get the TTL for
:type key: :class:`str`, :class:`bytes`
:param int ttl: The number of seconds to set the timeout to
:param value: The value to restore to the key
:type value: :class:`str`, :class:`bytes`
:param bool replace: Replace a pre-existing key
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
command = [b'RESTORE', key, ascii(ttl).encode('ascii'), value]
if replace:
command.append(b'REPLACE')
return self._execute(command, b'OK')
def scan(self, cursor=0, pattern=None, count=None):
"""The :meth:`~tredis.RedisClient.scan` command and the closely related
commands :meth:`~tredis.RedisClient.sscan`,
:meth:`~tredis.RedisClient.hscan` and :meth:`~tredis.RedisClient.zscan`
are used in order to incrementally iterate over a collection of
elements.
- :meth:`~tredis.RedisClient.scan` iterates the set of keys in the
currently selected Redis database.
- :meth:`~tredis.RedisClient.sscan` iterates elements of Sets types.
- :meth:`~tredis.RedisClient.hscan` iterates fields of Hash types and
their associated values.
- :meth:`~tredis.RedisClient.zscan` iterates elements of Sorted Set
types and their associated scores.
**Basic usage**
:meth:`~tredis.RedisClient.scan` is a cursor based iterator.
This means that at every call of the command, the server returns an
updated cursor that the user needs to use as the cursor argument in
the next call.
An iteration starts when the cursor is set to ``0``, and terminates
when the cursor returned by the server is ``0``.
For more information on :meth:`~tredis.RedisClient.scan`,
visit the `Redis docs on scan <http://redis.io/commands/scan>`_.
.. note::
**Time complexity**: ``O(1)`` for every call. ``O(N)`` for a
complete iteration, including enough command calls for the cursor to
return back to ``0``. ``N`` is the number of elements inside the
collection.
:param int cursor: The server specified cursor value or ``0``
:param pattern: An optional pattern to apply for key matching
:type pattern: :class:`str`, :class:`bytes`
:param int count: An optional amount of work to perform in the scan
:rtype: int, list
:returns: A tuple containing the cursor and the list of keys
:raises: :exc:`~tredis.exceptions.RedisError`
"""
def format_response(value):
"""Format the response from redis
:param tuple value: The return response from redis
:rtype: tuple(int, list)
"""
return int(value[0]), value[1]
command = [b'SCAN', ascii(cursor).encode('ascii')]
if pattern:
command += [b'MATCH', pattern]
if count:
command += [b'COUNT', ascii(count).encode('ascii')]
return self._execute(command, format_callback=format_response)
def sort(self,
key,
by=None,
external=None,
offset=0,
limit=None,
order=None,
alpha=False,
store_as=None):
"""Returns or stores the elements contained in the list, set or sorted
set at key. By default, sorting is numeric and elements are compared by
their value interpreted as double precision floating point number.
The ``external`` parameter is used to specify the
`GET <http://redis.io/commands/sort#retrieving-external-keys>_`
parameter for retrieving external keys. It can be a single string
or a list of strings.
.. note::
**Time complexity**: ``O(N+M*log(M))`` where ``N`` is the number of
elements in the list or set to sort, and ``M`` the number of
returned elements. When the elements are not sorted, complexity is
currently ``O(N)`` as there is a copy step that will be avoided in
next releases.
:param key: The key to get the refcount for
:type key: :class:`str`, :class:`bytes`
:param by: The optional pattern for external sorting keys
:type by: :class:`str`, :class:`bytes`
:param external: Pattern or list of patterns to return external keys
:type external: :class:`str`, :class:`bytes`, list
:param int offset: The starting offset when using limit
:param int limit: The number of elements to return
:param order: The sort order - one of ``ASC`` or ``DESC``
:type order: :class:`str`, :class:`bytes`
:param bool alpha: Sort the results lexicographically
:param store_as: When specified, the key to store the results as
:type store_as: :class:`str`, :class:`bytes`, None
:rtype: list|int
:raises: :exc:`~tredis.exceptions.RedisError`
:raises: :exc:`ValueError`
"""
if order and order not in [b'ASC', b'DESC', 'ASC', 'DESC']:
raise ValueError('invalid sort order "{}"'.format(order))
command = [b'SORT', key]
if by:
command += [b'BY', by]
if external and isinstance(external, list):
for entry in external:
command += [b'GET', entry]
elif external:
command += [b'GET', external]
if limit:
command += [
b'LIMIT',
ascii(offset).encode('utf-8'),
ascii(limit).encode('utf-8')
]
if order:
command.append(order)
if alpha is True:
command.append(b'ALPHA')
if store_as:
command += [b'STORE', store_as]
return self._execute(command)
def ttl(self, key):
"""Returns the remaining time to live of a key that has a timeout.
This introspection capability allows a Redis client to check how many
seconds a given key will continue to be part of the dataset.
.. note::
**Time complexity**: ``O(1)``
:param key: The key to get the TTL for
:type key: :class:`str`, :class:`bytes`
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'TTL', key])
def type(self, key):
"""Returns the string representation of the type of the value stored at
key. The different types that can be returned are: ``string``,
``list``, ``set``, ``zset``, and ``hash``.
.. note::
**Time complexity**: ``O(1)``
:param key: The key to get the type for
:type key: :class:`str`, :class:`bytes`
:rtype: bytes
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'TYPE', key])
def wait(self, num_slaves, timeout=0):
"""his command blocks the current client until all the previous write
commands are successfully transferred and acknowledged by at least the
specified number of slaves. If the timeout, specified in milliseconds,
is reached, the command returns even if the specified number of slaves
were not yet reached.
The command will always return the number of slaves that acknowledged
the write commands sent before the :meth:`~tredis.RedisClient.wait`
command, both in the case where the specified number of slaves are
reached, or when the timeout is reached.
.. note::
**Time complexity**: ``O(1)``
:param int num_slaves: Number of slaves to acknowledge previous writes
:param int timeout: Timeout in milliseconds
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`
"""
command = [
b'WAIT',
ascii(num_slaves).encode('ascii'),
ascii(timeout).encode('ascii')
]
return self._execute(command)
|
gmr/tredis
|
tredis/keys.py
|
Python
|
bsd-3-clause
| 26,277
|
[
"VisIt"
] |
47a669fd705fa8627606b9555acef04bd249886ff878f030523f339875a00d97
|
from __future__ import print_function
"""Function-like object creating hexagonal lattices.
The following lattice creators are defined:
* Hexagonal
* HexagonalClosedPacked
* Graphite
* Graphene
Example for using Graphene to create atoms object gra::
from ase.lattice.hexagonal import *
import ase.io as io
from ase import Atoms, Atom
index1=6
index2=7
mya = 2.45
myc = 20.0
gra = Graphene(symbol = 'C',latticeconstant={'a':mya,'c':myc},
size=(index1,index2,1))
io.write('test.xyz', gra, format='xyz')
"""
from ase.lattice.triclinic import TriclinicFactory
import numpy as np
from ase.data import reference_states as _refstate
class HexagonalFactory(TriclinicFactory):
"A factory for creating simple hexagonal lattices."
# The name of the crystal structure in ChemicalElements
xtal_name = "hexagonal"
def make_crystal_basis(self):
"Make the basis matrix for the crystal unit cell and the system unit cell."
# First convert the basis specification to a triclinic one
if isinstance(self.latticeconstant, type({})):
self.latticeconstant['alpha'] = 90
self.latticeconstant['beta'] = 90
self.latticeconstant['gamma'] = 120
self.latticeconstant['b/a'] = 1.0
else:
if len(self.latticeconstant) == 2:
a,c = self.latticeconstant
self.latticeconstant = (a,a,c,90,90,120)
else:
raise ValueError("Improper lattice constants for hexagonal crystal.")
TriclinicFactory.make_crystal_basis(self)
def find_directions(self, directions, miller):
"""Find missing directions and miller indices from the specified ones.
Also handles the conversion of hexagonal-style 4-index notation to
the normal 3-index notation.
"""
directions = list(directions)
miller = list(miller)
for obj in (directions,miller):
for i in range(3):
if obj[i] is not None:
(a,b,c,d) = obj[i]
if a + b + c != 0:
raise ValueError(
("(%d,%d,%d,%d) is not a valid hexagonal Miller " +
"index, as the sum of the first three numbers " +
"should be zero.") % (a,b,c,d))
x = 4*a + 2*b
y = 2*a + 4*b
z = 3*d
obj[i] = (x,y,z)
TriclinicFactory.find_directions(self, directions, miller)
def print_directions_and_miller(self, txt=""):
"Print direction vectors and Miller indices."
print("Direction vectors of unit cell%s:" % (txt,))
for i in (0,1,2):
self.print_four_vector("[]", self.directions[i])
print("Miller indices of surfaces%s:" % (txt,))
for i in (0,1,2):
self.print_four_vector("()", self.miller[i])
def print_four_vector(self, bracket, numbers):
bra, ket = bracket
(x,y,z) = numbers
a = 2*x - y
b = -x + 2*y
c = -x -y
d = 2*z
print(" %s%d, %d, %d%s ~ %s%d, %d, %d, %d%s" % \
(bra,x,y,z,ket, bra,a,b,c,d,ket))
Hexagonal = HexagonalFactory()
class HexagonalClosedPackedFactory(HexagonalFactory):
"A factory for creating HCP lattices."
xtal_name = "hcp"
bravais_basis = [[0,0,0], [1.0/3.0, 2.0/3.0, 0.5]]
HexagonalClosedPacked = HexagonalClosedPackedFactory()
class GraphiteFactory(HexagonalFactory):
"A factory for creating graphite lattices."
xtal_name = "graphite"
bravais_basis = [[0,0,0], [1.0/3.0, 2.0/3.0, 0], [1.0/3.0,2.0/3.0,0.5], [2.0/3.0,1.0/3.0,0.5]]
Graphite = GraphiteFactory()
class GrapheneFactory(HexagonalFactory):
"A factory for creating graphene lattices."
xtal_name = "graphene"
bravais_basis = [[0,0,0], [1.0/3.0, 2.0/3.0, 0]]
Graphene = GrapheneFactory()
|
suttond/MODOI
|
ase/lattice/hexagonal.py
|
Python
|
lgpl-3.0
| 4,016
|
[
"ASE",
"CRYSTAL"
] |
5095577322089b4b6bcdafd551cd7a557f02406bcf53a1884aef2d2e99700e37
|
tests=[
("python","testMatricCalc.py",{}),
]
longTests=[]
if __name__=='__main__':
import sys
from rdkit import TestRunner
failed,tests = TestRunner.RunScript('test_list.py',0,1)
sys.exit(len(failed))
|
adalke/rdkit
|
Code/DataManip/MetricMatrixCalc/Wrap/test_list.py
|
Python
|
bsd-3-clause
| 220
|
[
"RDKit"
] |
f1265bed919ad40c1f99e0150f5b7c59f1e085318e63adc49c90136d6aaf29fa
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# title : scanco.py
# description : provides support for Scanco (TM) files
# copyright : (c) 2017 I3MTO laboratory. All Rights Reserved
# author(s) : Thomas Janvier
# creation : 01 December 2017
# modification : 19 December 2017
#
# TODO:
# - finish *.aim cases
# - test with several files (isq works, other haven't been tested)
# - switch docstring to Numpy style for module-scale normalization
import os
import struct
import numpy as np
__all__ = ['Scanco, scanco']
# Normalized header of Scanco(TM) .ISQ; .RSQ; .RAD files
_ISQ_ = "CTDATA-HEADER_"
_AIM_ = "AIMDATA_" # Normalized header of Scanco(TM) .AIM files
class Scanco(object):
"""Class to read Scanco(TM) files
Notes
-----
Only the filename and the header are loaded into RAM, voxels data are read on specific request.
See Also
--------
bitk.io.scanco : abstract class for script-style use
References
----------
.. [1] vtk-dicom C++ library,
https://github.com/dgobbi/vtk-dicom/blob/master/Source/vtkScancoCTReader.cxx
Examples
--------
>>> volume = Scanco('phantom.isq')
"""
file = ''
info = {}
def __init__(self, file=''):
if os.path.isfile(file):
self.file = file
self.open()
else:
self.file = ''
self.info = scanco.header()
def ext(self, format='list'):
"""Return the supported extensions"""
return scanco.ext(format)
def open(self, file=''):
"""Open file and try to read the header"""
if file:
self.info = scanco.info(file)
self.file = file
else:
self.info = scanco.info(self.file)
def data(self, filter=True):
"""Return the raw data (voxels)"""
return scanco.data(self.file, filter)
class scanco():
"""Abstract class to read Scanco(TM) files
Notes
-----
Only the filename and the header are loaded into RAM, voxels data are read on specific request.
See Also
--------
bitk.io.Scanco : class wrapper for object-oriented use
References
----------
.. [1] vtk-dicom C++ library,
https://github.com/dgobbi/vtk-dicom/blob/master/Source/vtkScancoCTReader.cxx
Examples
--------
>>> metadata = scanco.header('phantom.isq')
>>> volume = scanco.data('phantom.isq')
>>> metadata, volume = scanco.read('phantom.isq')
"""
@staticmethod
def ext(format='list'):
if format == 'list':
return ['.isq', '.aim', '.rsq', '.rad']
elif format == 'filter':
return '*' + ' *'.join(scanco.ext())
@staticmethod
def info(file):
"""Read the header (metadata) and stop before raw data (voxels)"""
# file must be a string
if not isinstance(file, str):
raise TypeError(
"Attribute 'file' must be a String not: {}".format(type(file)))
# check if the file exists
if not os.path.isfile(file):
raise IOError("File not found: {}".format(file))
# open the file
fid = open(file, 'rb')
# handle IO errors
try:
# Scanco header begins with at least a 512 bytes block
# in which first 16 bytes block indicate file mod
buff = fid.read(16)
# deduce the reader modality
mod = scanco.__mod(buff)
# decode the header properly
if mod == 'isq':
return scanco.__headerISQ(fid)
elif mod == 'aim':
return scanco.__headerAIM(fid)
else:
raise IOError('Incorrect file: ' + fname)
except Exception as err:
raise (err)
finally:
fid.close()
@staticmethod
def data(file, filter=True):
"""Return raw data (voxels)"""
data = scanco.read(file, filter=filter)[1]
return data
@staticmethod
def read(file, filter=True):
"""Read the header (metadata) AND raw data (voxels)"""
# read header (include all file verifications)
header = scanco.info(file)
# exctract the offset
offset = header["Header Size [bytes]"]
# exctract the size
x, y, z = header["Scan Dimensions [px]"]
# open the file (errors have been handled in @header)
fid = open(file, 'rb')
try:
# deduce the number of voxels to read
length = (x * y * z)
# skip the file header
fid.seek(offset)
# find & read raw data as little-endian uint16
data = np.fromfile(fid, dtype='<H', count=length)
except Exception as err:
raise (err)
finally:
fid.close()
# reshape data to a 3D numpy array
data = data.reshape((x, y, z), order='F')
if filter:
data[data > 0.99 * np.max(data)] = 0
return (header, data)
@staticmethod
def header():
"""Return default empty header"""
header = {}
header["Version"] = ""
header["Patient Name"] = ""
header["Creation Date"] = ""
header["Modification Date"] = ""
header["Scan Dimensions [px]"] = (0, 0, 0)
header["Scan Dimensions [mm]"] = (0.0, 0.0, 0.0)
header["Patient ID"] = 0
header["Scanner ID"] = 0
header["Slice Thickness"] = 0
header["Slice Increment"] = 0
header["Start Position"] = 0
header["End Position"] = 0
header["Z Position"] = 0
header["Data Range"] = (0, 0)
header["Mu Scaling"] = 1.0
header["Number of Samples"] = 0
header["Number of Projections"] = 0
header["Scan Distance"] = 0
header["Sample Time"] = 0
header["Scanner Type"] = 0
header["Measurement Index"] = 0
header["Site"] = 0
header["Reconstruction Algorithm"] = 0
header["Reference Line"] = 0
header["Energy"] = 0
header["Intensity"] = 0
header["Rescale Type"] = 0
header["Rescale Units"] = ""
header["CalibrationData"] = ""
header["Rescale Slope"] = 1.0
header["Rescale Intercept"] = 0.0
header["Mu Water"] = 0
header["Compression"] = 0
header["Header Size [bytes]"] = 0
return header
def __mod(buff):
version = buff.decode()
if version.startswith(_ISQ_):
return 'isq'
elif version.startswith(_AIM_):
return 'aim'
else:
pi_size, ii_size = struct.unpack('<II', buff[0:8])
if pi_size == 20 and ii_size == 140:
return 'aim'
else:
return 'unknown'
def __headerISQ(fid):
header = scanco.header()
fid.seek(0, 0)
# 'Version' is a clear string
header["Version"] = fid.read(16).decode()
data_type = struct.unpack('<I', fid.read(4))[0]
num_bytes = struct.unpack('<I', fid.read(4))[0]
num_blocks = struct.unpack('<I', fid.read(4))[0]
header["Patient ID"] = struct.unpack('<I', fid.read(4))[0]
header["Scanner ID"] = struct.unpack('<I', fid.read(4))[0]
fid.seek(8, 1) # date coded with 8 bytes
header["Scan Dimensions [px]"] = struct.unpack('<III', fid.read(12))
header["Scan Dimensions [mm]"] = struct.unpack('<III', fid.read(12))
# check if the file is a proper .RAD ...
rad = (data_type == 9 or header["Scan Dimensions [mm]"][2] == 0)
# ... then read it as a .RAD file ...
if rad:
header["Measurement ID"] = struct.unpack('<I', fid.read(4))[0]
header["Data Range"] = struct.unpack('<II', fid.read(8))
header["Mu Scaling"] = struct.unpack('<I', fid.read(4))[0]
header["Patient Name"] = fid.read(40).decode()
header["Z Position"] = struct.unpack('<I', fid.read(4))[0] * 1.e-3
fid.seek(4, 1) # unknown field
header["Sample Time"] = struct.unpack('<I', fid.read(4))[0] * 1.e-3
header["Energy"] = struct.unpack('<I', fid.read(4))[0] * 1.e-3
header["Intensity"] = struct.unpack('<I', fid.read(4))[0] * 1.e-3
header["Reference Line"] = struct.unpack(
'<I', fid.read(4))[0] * 1.e-3
header["Start Position"] = struct.unpack(
'<I', fid.read(4))[0] * 1.e-3
header["End Position"] = struct.unpack(
'<I', fid.read(4))[0] * 1.e-3
fid.seek(88 * 4, 1) # skip to data
# ... otherwise consider it as .ISQ or .RSQ
else:
header["Slice Thickness"] = struct.unpack(
'<I', fid.read(4))[0] * 1.e-3
header["Slice Increment"] = struct.unpack(
'<I', fid.read(4))[0] * 1.e-3
header["Start Position"] = struct.unpack(
'<I', fid.read(4))[0] * 1.e-3
header["End Position"] = header["Start Position"] + \
header["Scan Dimensions [px]"][2] / 1000 * \
(header["Scan Dimensions [px]"][2] - 1) / \
header["Scan Dimensions [px]"][2]
header["Data Range"] = struct.unpack('<II', fid.read(8))
header["Mu Scaling"] = struct.unpack('<I', fid.read(4))[0]
header["Number of Samples"] = struct.unpack('<I', fid.read(4))[0]
header["Number of Projections"] = struct.unpack('<I', fid.read(4))[
0]
header["Scan Distance"] = struct.unpack(
'<I', fid.read(4))[0] * 1.e-3
header["Scanner Type"] = struct.unpack('<I', fid.read(4))[0]
header["Sample Time"] = struct.unpack('<I', fid.read(4))[0] * 1.e-3
header["Measurement Index"] = struct.unpack('<I', fid.read(4))[0]
header["Site"] = struct.unpack('<I', fid.read(4))[0]
header["Reference Line"] = struct.unpack(
'<I', fid.read(4))[0] * 1.e-3
header["Reconstruction Algorithm"] = struct.unpack('<I', fid.read(4))[
0]
header["Patient Name"] = fid.read(40).decode()
header["Energy"] = struct.unpack('<I', fid.read(4))[0] * 1.e-3
header["Intensity"] = struct.unpack('<I', fid.read(4))[0] * 1.e-3
fid.seek(83 * 4, 1) # skip to data
# fix 'Slice Thickness' and 'Slice Increment' if they were truncated
if (header["Scan Dimensions [mm]"][2] != 0):
spacing = header["Scan Dimensions [mm]"][2] * \
1.1e-3 / header["Scan Dimensions [px]"][2]
if abs(spacing - header["Slice Thickness"]) < 1.1e-3:
header["Slice Thickness"] = spacing
if abs(spacing - header["Slice Increment"]) < 1.1e-3:
header["Slice Increment"] = spacing
# check the Scan Dimensions and set default values if needed
header["Scan Dimensions [px]"] = tuple(
1 if el < 1 else el for el in header["Scan Dimensions [px]"])
header["Scan Dimensions [mm]"] = tuple(
el * 1e-6 if rad else el * 1e-3 for el in header["Scan Dimensions [mm]"])
header["Scan Dimensions [mm]"] = tuple(
1 if el == 0 else el for el in header["Scan Dimensions [mm]"])
# read the offset
offset = struct.unpack('<I', fid.read(4))[0]
header["Header Size [bytes]"] = (offset + 1) * 512
return header
def __headerAIM(fid):
header = scanco.header()
header["Header Size [bytes]"] = 0
fid.seek(0, 0)
# 'Version' is a clear string
header["Version"] = fid.read(16).decode()
# header uses little endian 32-bit ints (8 bytes)
if header["Version"].startswith("AIMDATA_V030"):
itype = '<Q'
isize = 8
header["Header Size [bytes]"] += 16
# header uses little endian 32-bit ints (4 bytes)
else:
itype = '<I'
isize = 4
fid.seek(header["Header Size [bytes]"], 1)
# read header sections sizes
preheader_size = struct.unpack(itype, fid.read(isize))[0]
struct_size = struct.unpack(itype, fid.read(isize))[0]
log_size = struct.unpack(itype, fid.read(isize))[0]
# update header total size
header["Header Size [bytes]"] += preheader_size + \
struct_size + log_size
# ignore pre-header data
fid.seek(preheader_size, 11)
fid.seek(20, 1) # unknown field
data_type = struct.unpack('<I', fid.read(4))[0]
struct_val = []
for i in range(0, 21):
struct_val.append(struct.unpack(itype, fid.read(isize))[0])
el_size = struct.unpack('<III', fid.read(12))
return header
|
Bone-Imaging-ToolKit/BItk
|
bitk/io/scanco.py
|
Python
|
gpl-3.0
| 12,859
|
[
"VTK"
] |
b3c726e8d2c43ff821fb533c2652291eb8647129b64a767b61b6bec15bc25c59
|
#coding:utf-8
import os, sys
import timeit
import numpy, math
import scipy.spatial.distance as sp
import common_functions
################################################
# Parameters
################################################
# define the default parameters
train = "DR1"
test = "DR2"
lesions = ["exsudato-duro","hemorragia-superficial","hemorragia-profunda","lesoes-vermelhas","mancha-algodonosa","drusas-maculares"]
techniquesLow = ["sparse","dense"]
techniquesMid = ["hard","soft"]
image = ""
# ShowOptions function
def showOptions():
print "-h : show options"
print "-train dataset : define the training dataset (default DR1)\n\tDR1 -- DR1 as the training dataset\n\tDR2 -- DR2 as the training dataset"
print "-test dataset : define test dataset (default DR2)\n\tDR1 -- DR1 as the test dataset\n\tDR2 -- DR2 as the test dataset"
print "-l lesion : define a specific DR-related lesion (default [exsudato-duro, hemorragia-superficial, hemorragia-profunda, lesoes-vermelhas, mancha-algodonosa, drusas-maculares)\n\texsudato-duro\t\t -- Hard Exudates\n\themorragia-superficial\t -- Superficial Hemorrhages\n\themorragia-profunda\t -- Deep Hemorrhages\n\tlesoes-vermelhas\t -- Red Lesions\n\tmancha-algodonosa\t -- Cotton-wool Spots\n\tdrusas-maculares\t -- Drusen"
print "-low technique : define a specific low-level technique (default [sparse, dense])\n\tsparse -- Sparse low-level feature extraction\n\tdense -- Dense low-level feature extraction"
print "-mid technique : define a specific mid-level technique (default [hard, soft])\n\thard -- Hard-Sum coding/pooling\n\tsoft -- Soft-Max coding/pooling"
print "-i image : define the image name (used only for cases where we are interested in describing only one image)"
quit()
# take the parameters
if len(sys.argv) > 1:
for i in range(1, len(sys.argv),2):
op = sys.argv[i]
if op == "-h": showOptions()
elif op == "-train": train = sys.argv[i+1]
elif op == "-test": test = sys.argv[i+1]
elif op == "-l": lesions = [sys.argv[i+1]]
elif op == "-low": techniquesLow = [sys.argv[i+1]]
elif op == "-mid": techniquesMid = [sys.argv[i+1]]
elif op == "-i": image = sys.argv[i+1]
################################################
################################################
# create directories
################################################
directory = "mid-level/"
for techniqueMid in techniquesMid:
for techniqueLow in techniquesLow:
for type in [train,test]:
for lesion in lesions:
if not os.path.exists(directory + techniqueLow + "/" + type + "/" + techniqueMid + "/" + lesion):
os.makedirs(directory + techniqueLow + "/" + type + "/" + techniqueMid + "/" + lesion)
################################################
################################################
# HARD-SUM
################################################
def hardSum(PoIs, Words, ArqOut, numeroPalavras, label):
ArqOut = open(ArqOut,"wb")
ArqOut.write(label + " ")
histograma = [0 for i in range(numeroPalavras)]
distMatrix = sp.cdist(PoIs, Words, 'euclidean') # first points, after codewords. Return a len(PoIs) x len(Words) matrix of distances
for i in range(len(PoIs)):
minimum = min(distMatrix[i])
ind = numpy.where(distMatrix[i]==minimum)[0][0]
histograma[ind] += 1
histograma = common_functions.l1norm(histograma)
for h in histograma:
ArqOut.write(str(h) + " ")
ArqOut.close()
################################################
################################################
# SOFT-MAX
################################################
def gaussiankernel(sigma, x):
return (1.0/(math.sqrt(sigma*2*math.pi)))*math.exp(-(x)**2/(2.0*sigma**2))
def softMax(PoIs, Words, ArqOut, numeroPalavras, label):
ArqOut = open(ArqOut,"wb")
ArqOut.write(label + " ")
# distances - Matriz n * V (número de pontos * número de palavras) que calculará apenas uma vez as distâncias
distances = sp.cdist(PoIs, Words, 'euclidean') # first points, after codewords. Return a len(PoIs) x len(Words) matrix of distances
distances = [ gaussiankernel(45.0, dist) for dist in numpy.reshape(distances, (1,distances.size))[0] ] # apply the gaussian kernel
distances = numpy.reshape(distances, (len(PoIs), len(Words))) # put again in the format len(PoIs) x len(Words)
# distToAll - Vetor que armazenará para cada ponto o somatório das distâncias para todas as palavras
distToAll = []
for point in distances:
distToAll.append(sum(point))
distToAll = numpy.asarray(distToAll)
features = []
distances = numpy.transpose(distances) # transpose. Format len(Words) x len(PoIs)
division = numpy.divide(distances, distToAll) # Equivalent to divide the distance of codeword i to PoI j by the summation of the distances of PoI j to all codewords
features = [ max(dist) for dist in division ] # get the maximum activation for each codeword
features = common_functions.l1norm(features)
for f in features:
ArqOut.write(str(f) + " ")
ArqOut.close()
################################################
################################################
# MAIN
################################################
en = dict(zip(["exsudato-duro","hemorragia-superficial","hemorragia-profunda","lesoes-vermelhas","mancha-algodonosa","drusas-maculares","imagem-normal"], ["Hard Exudates","Superficial Hemorrhages","Deep Hemorrhages","Red Lesions","Cotton-wool Spots","Drusen","Normal Images"]))
print "################################################"
print "# Mid-level feature extraction"
print "################################################"
for techniqueMid in techniquesMid:
for techniqueLow in techniquesLow:
if techniqueLow == "sparse": size = 500
else: size = 1500
for type in [train,test]:
for lesion in lesions:
print "Extracting features for " + en[lesion] + "\nLow-level: " + techniqueLow + "\nMid-level: " + techniqueMid
start = timeit.default_timer()
sys.stdout.write(". ")
sys.stdout.flush()
# get the codebook
CodebookTemp = open("codebooks/" + techniqueLow + "/complete-codebook-" + lesion + ".cb", "rb").readlines()
Codebook = []
for cb in CodebookTemp[1:]:
Codebook.append([ float(c) for c in cb.split() ])
Codebook = numpy.asarray(Codebook)
# define the directory of the input file (points of interest)
if image == "":
PoIsDir = "low-level/" + techniqueLow + "/" + type + "/"
else:
PoIsDir = "low-level/" + techniqueLow + "/DR2/"
# define the directory of the output file (histogram)
if image == "":
OutDir = "mid-level/" + techniqueLow + "/" + type + "/" + techniqueMid + "/" + lesion + "/"
else: # Interest in describing only one image
if not os.path.exists("mid-level/" + techniqueLow + "/DR2/" + techniqueMid + "/additional/" + lesion):
os.makedirs("mid-level/" + techniqueLow + "/DR2/" + techniqueMid + "/additional/" + lesion)
OutDir = "mid-level/" + techniqueLow + "/DR2/" + techniqueMid + "/additional/" + lesion + "/"
for label in ["+1","-1"]:
# describe the normal images
if label == "-1": lesion = "imagem-normal"
lesion_en = en[lesion]
if image == "":
if type == "DR2" and (lesion == "hemorragia-superficial" or lesion == "hemorragia-profunda"):
listImages = os.listdir("datasets/" + type + "-images-by-lesions/Red Lesions")
else:
listImages = os.listdir("datasets/" + type + "-images-by-lesions/" + lesion_en)
else: listImages = [image]
for im in listImages:
im_special = common_functions.specialName(im)
if os.path.exists(OutDir + im[:-3] + "hist"): continue
# define the output file (histogram)
OutFile = OutDir + im[:-3] + "hist"
f = open(OutFile,"wb")
# get the points of interest
PoIsTemp = open(PoIsDir + im[:-3] + "key","rb").readlines()
PoIs = []
for i in range(2,len(PoIsTemp),2):
PoIs.append([ float(p) for p in PoIsTemp[i].split() ])
PoIs = numpy.asarray(PoIs)
sys.stdout.write(". ")
sys.stdout.flush()
if techniqueMid == "hard":
hardSum(PoIs, Codebook, OutFile, size, label)
else: # techniqueMid == "soft":
softMax(PoIs, Codebook, OutFile, size, label)
stop = timeit.default_timer()
sys.stdout.write(" Done in " + common_functions.convertTime(stop - start) + "\n")
################################################
|
piresramon/pires.ramon.msc
|
source/mid_level_script.py
|
Python
|
gpl-3.0
| 8,459
|
[
"Gaussian"
] |
93364d079a00892c29a3e4c412835b2897329ecb23b477419fcefe42c2ddff64
|
#!/usr/bin/env python
'''
This tool installs the macports package management infrastructure in a
custom directory which allows you to create a USB or portable drive
with your favorite macports tools. It also makes removal easier because
no system directories are affected.
It has a couple of important features.
1. It automatically selects the latest release.
2. It automatically handles the case where port 873 (rsync) is
blocked by the firewall by changing the configuration to use
port 80 (HTTP).
3. It is isolated in its own directory tree.
Here is how you might use it:
$ # If this is not your first installation, grab the existing
$ # packages that you have installed.
$ port installed requested >/tmp/existing-pkgs.txt
$ # Install and capture the output to a log file (-t).
$ sudo ./mpinstall.py -t -b /tmp/macports -r /opt/macports
$ # Update your ~/.bashrc file.
$ cat >>~/.bashrc <<EOF
export MP_PATH="/opt/macports"
export PATH="${MP_PATH}/bin:${PATH}"
export MANPATH="${MP_PATH}/share/man:${MANPATH}"
EOF
$ source ~/.bashrc
$ # Run it.
$ port list
$ sudo port install org-server
$ # Clean up the build data.
$ sudo rm -rf /tmp/macports
$ # If this is not your first installation, reinstall
$ # the packages.
$ grep '^ ' /tmp/existing-pkgs.txt | grep '(active)' | awk '{print $1;}' | xargs -L 1 sudo port install
$ # If it is your first installation, install packages:
$ sudo port install htop
$ sudo port install nodejs
$ sudo port install wireshark
$ # Update periodically.
$ sudo port -v selfupdate # if rsync is not blocked
$ sudo port -v sync # if rsync is blocked
$ sudo port upgrade outdated
If you do not specify -b (build directory) or -r (release directory),
macports will be built and installed in the current directory. The
build data will be in the "bld" subdirectory. The release data will be
in the "rel" (release to field) subdirectory.
For more information about the macports project, visit
https://macports.org.
'''
# MIT License
#
# Copyright (c) 2015 Joe Linoff
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
import argparse
import datetime
import logging
import os
import re
import shutil
import subprocess
import sys
import tarfile
import time
import urllib2
VERSION = '1.0'
class Tee(object):
'''
Tee output to a log file.
This allows stdout data to be tee'ed to stdout and to a file
simultaneously.
'''
s_enable_file_writes = True
def __init__(self, logfile):
self.stdout = sys.stdout
self.ofp = open(logfile, 'a')
def write(self, msg):
self.stdout.write(msg)
if Tee.s_enable_file_writes is True:
self.ofp.write(msg)
self.flush()
def flush(self):
self.stdout.flush()
self.ofp.flush()
@classmethod
def disable_file_writes(cls):
cls.s_enable_file_writes = False
@classmethod
def enable_file_writes(cls):
cls.s_enable_file_writes = True
def __runcmd(opts, logger, cmd, show_output=True, exit_on_error=True):
'''
Execute a shell command with no inputs.
Capture output and exit status.
For long running commands, this implementation displays output
information as it is captured.
For fast running commands it would be better to use
subprocess.check_output.
'''
logger.info('Running command: {0}'.format(cmd))
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Read the output 1 character at a time so that it can be
# displayed in real time.
out = ''
while not proc.returncode:
char = proc.stdout.read(1)
if not char:
# all done, wait for returncode to get populated
break
else:
out += char
if show_output:
sys.stdout.write(char)
sys.stdout.flush()
proc.wait()
sts = proc.returncode
if sts != 0:
logger.error('Command failed with exit status {0}.'.format(sts))
if exit_on_error:
if show_output is False:
sys.stdout.write(out)
sys.exit(1)
return sts, out
def runcmd(opts, logger, cmd):
'''
Execute a shell command with no inputs.
Exit on error.
'''
__runcmd(opts, logger, cmd, show_output=True, exit_on_error=True)
def init_logger(name):
'''
Initialize the logger.
'''
logger = logging.getLogger(name)
lch = logging.StreamHandler(stream=sys.stdout)
fmt = '%(asctime)s %(levelname)-7s %(filename)s %(lineno)5d %(message)s'
formatter = logging.Formatter(fmt)
lch.setFormatter(formatter)
logger.addHandler(lch)
logger.setLevel(logging.INFO)
return logger
def xcode_check(opts, logger):
'''
Check to see if xcode is installed.
If it isn't, try to install it.
'''
cmd = 'sudo xcode-select -p'
_, out = __runcmd(opts, logger, cmd, show_output=True, exit_on_error=False)
expected = '/Applications/Xcode.app/Contents/Developer'
if out.find(expected) < 0:
logger.info('Could not find expected output: "{0}".'.format(expected))
logger.info('Installing xcode.')
cmd = 'sudo xcode-select --install'
runcmd(opts, logger, cmd)
else:
logger.info('Xcode installed.')
# Make sure that the xcode license has been agreed to.
cmd = 'sudo clang --version'
runcmd(opts, logger, cmd)
def get_content_length(url):
'''
Get URL content length.
'''
response = urllib2.urlopen(url)
for header in response.info().headers:
match = re.search(r'content-length:\s+(\d+)', header, flags=re.IGNORECASE)
if match:
return int(match.group(1))
return 0
def get_all_pkgs(opts, logger):
'''
Get the list of packages available from the
official release area.
'''
logger.info('Get all releases from "{0}".'.format(opts.url))
response = urllib2.urlopen(opts.url)
html = response.read()
vermap = {}
releases = []
for line in html.split('\n'):
match = re.search(r'href="(\d+)\.(\d+)\.(\d+)/"', line)
if match:
ver = '{0}.{1}.{2}'.format(match.group(1), match.group(2), match.group(3))
key = match.group(1).zfill(5) + '-' + match.group(2).zfill(5) + '-' + match.group(3).zfill(5)
tarfile_name = 'MacPorts-{0}.tar.bz2'.format(ver)
newurl = opts.url + ver + '/' + tarfile_name
vermap[key] = {'tarfile': tarfile_name, 'url': newurl,}
for key in sorted(vermap, key=str.lower):
releases.append( (vermap[key]['tarfile'], vermap[key]['url']) )
return releases # last one is guaranteed to be the latest
def list_pkgs(opts, logger, pkgs):
'''
List the available releases.
'''
logger.info('List available releases from "{0}".'.format(opts.url))
for pkg in pkgs:
name = pkg[0]
url = pkg[1]
size = get_content_length(url)
print('{0:<10} {1:>10} {2}'.format(name, size, url))
print('{0} items'.format(len(pkgs)))
def download(opts, logger, tarfile_name, url):
'''
Download the tar file.
'''
if os.path.exists(tarfile_name) is False:
# download the tar data and create the tar file
clen = get_content_length(url)
logger.info('Downloading "{0}".'.format(url))
# Show progress as the data is downloaded.
response = urllib2.urlopen(url)
chunk_size = clen / 100 # read 1% at a time.
tardata = ''
read = 0
Tee.disable_file_writes()
while read < clen:
cdata = response.read(chunk_size)
tardata += cdata
read += len(cdata)
per = 100. * float(read) / float(clen)
sys.stdout.write('\b'*(32 * len(url)))
sys.stdout.write('{0:>10} of {1} {2:5.1f}% {3}'.format(read, clen, per, url))
sys.stdout.flush()
sys.stdout.write('\b'*(32 * len(url)))
sys.stdout.write(' '*(32 * len(url)))
sys.stdout.write('\b'*(32 * len(url)))
Tee.enable_file_writes()
logger.info('Read {0} bytes.'.format(len(tardata)))
# Create the tar file.
with open(tarfile_name, 'wb') as ofp:
ofp.write(tardata)
else:
logger.info('Downloaded "{0}".'.format(tarfile_name))
def build(opts, logger, base, tarfile_name):
'''
Build the infrastructure.
'''
if os.path.exists(base) is False:
# extract the tar contents
logger.info('Extracting "{0}".'.format(tarfile_name))
tar = tarfile.open(tarfile_name)
tar.extractall()
tar.close()
# build mac ports
os.chdir(base) # change the working directory
logger.info('Changed working directory to "{0}".'.format(os.getcwd()))
logger.info('Building "{0}".'.format(base))
cmds = ['sudo find /Library/ -type f -name \'*macports*\' -delete',
'./configure --help > configure.help',
'./configure --prefix="{0}" --with-applications-dir={0}/Applications'.format(opts.reldir),
'make',
'sudo make install',
]
for cmd in cmds:
runcmd(opts, logger, cmd)
os.chdir('..') # change the working directory
logger.info('Changed working directory to "{0}".'.format(os.getcwd()))
else:
logger.info('Already built "{0}".'.format(base))
def update(opts, logger):
'''
Update the installations.
'''
logger.info('Updating mac ports.')
# Setup the path so that commands like "port" work correctly.
os.environ['PATH'] = os.path.join(opts.reldir, 'bin') + os.pathsep + os.environ['PATH']
os.environ['MANPATH'] = os.path.join(opts.reldir, 'share', 'man') + os.pathsep + os.environ['PATH']
runcmd(opts, logger, 'which port') # verify PATH setup
sync_cmd = 'sudo port -v selfupdate'
sts, out = __runcmd(opts, logger, sync_cmd, show_output=True, exit_on_error=False)
if sts != 0:
# The update failed: sudo port -v selfupdate.
# This may be because you cannot run rsync from behind your firewall.
# Automatically configure to run behind your firewall by using http access.
logger.info('Rsync update failed. Rsync operations may be blocked. Trying another option.')
conf = os.path.join(opts.reldir, 'etc', 'macports', 'sources.conf')
orig = conf + '.orig'
if os.path.exists(orig) is False:
runcmd(opts, logger, 'sudo cp -v {0} {1}'.format(conf, orig))
runcmd(opts, logger, 'sudo chmod 0666 {0}'.format(conf))
# Comment out the rsync: access line and insert the http access line.
# The sed command is Mac OS X specific, it relies on the fact that
# the older version of of the bash shell shipped by default
# interprets $'\n' as a newline.
with open(conf, 'r') as ifp:
data = ifp.read()
update = re.sub(r'^rsync:',
'http://distfiles.macports.org/ports.tar.gz [default]\n##rsync:',
data,
flags=re.MULTILINE)
with open(conf, 'w') as ofp:
ofp.write(update)
runcmd(opts, logger, 'sudo chmod 0644 {0}'.format(conf))
sync_cmd = 'sudo port -v sync'
runcmd(opts, logger, sync_cmd)
logger.info('Alternative approach worked! You must use "sync" to update instead of "selfupdate".')
logger.info('To allow the use of "selfupdate", open up port 873 for rsync on your firewall.')
logger.info('Macports has successfully been installed in "{0}".'.format(opts.reldir))
return sync_cmd
def alldone(opts, logger, sync_cmd):
'''
Final installation message.
'''
sys.stdout.write('''
The macports installation has been successfully installed in
{1}.
To use it please update the PATH and MANPATH environment variables in
your ~/.bashrc file as follows:
export MP_PATH="{1}"
export PATH="${{MP_PATH}}/bin:${{PATH}}"
export MANPATH="${{MP_PATH}}/share/man:${{MANPATH}}"
Once that is done and you have sourced ~/.bashrc, you will be able to
run the "port: command directly.
$ source ~/.bashrc
$ port list
If that works you can start installing packages like this:
$ sudo port install org-server
You can update your installation like this:
$ {0}
To clean up the build data now that it is no longer needed:
$ sudo rm -rf {2}
To delete this installation simply remove the build, installation and
release areas as follows. Then remove the MP_PATH data from ~/.bashrc.
$ sudo rm -rf {1} {2}
'''.format(sync_cmd, opts.reldir, opts.blddir))
def install(opts, logger, pkgs):
'''
Install macports.
'''
logger.info('Install macports.')
xcode_check(opts, logger)
# Get the configuration data.
latest = pkgs[-1] # could be selectable but is that needed?
tarfile_name = latest[0]
base = tarfile_name[:-len('.tar.bz2')]
url = latest[1]
logger.info(' Base : "{0}".'.format(base))
logger.info(' BldDir : "{0}".'.format(opts.blddir))
logger.info(' RelDir : "{0}".'.format(opts.reldir))
logger.info(' TarFile : "{0}".'.format(tarfile_name))
logger.info(' URL : "{0}".'.format(url))
# create the installation (bld) area
if os.path.exists(opts.blddir) is False:
logger.info('Creating build directory tree: "{0}".'.format(opts.blddir))
os.makedirs(opts.blddir)
os.chdir(opts.blddir) # change the working directory
logger.info('Changed working directory to "{0}".'.format(os.getcwd()))
download(opts, logger, tarfile_name, url)
build(opts, logger, base, tarfile_name)
sync_cmd = update(opts, logger)
alldone(opts, logger, sync_cmd)
def getopts():
'''
Get the command line options.
'''
base = os.path.basename(sys.argv[0])
def usage():
'usage'
usage = '{0} [OPTIONS]'.format(base)
return usage
def epilog():
'epilogue'
epilog = r'''
examples:
$ # Example 1. Help
$ {0} -h
$ {0} --help
$ # Example 2. Build and install in the current directory
$ sudo {0}
$ sudo {0} -b ./bld -r ./rel
$ # Example 3. Build and install in a specific directory
$ sudo {0} -b /tmp/macports -r /opt/macports
$ sudo {0} --blddir /tmp/macports --reldir /opt/macports
$ # Example 4. Build and install in a specific directory,
$ # and capture everything in a log file.
$ sudo {0} -t -b /tmp/macports -r /opt/macports
$ sudo {0} --tee --blddir /opt/macports --reldir /opt/macports
$ ls -l {0}-*.log
'''.format(base)
return epilog
now = datetime.datetime.now()
dts = now.strftime('%Y%m%d%H%M')
log = '{0}-{1}.log'.format(base[:base.find('.')], dts)
afc = argparse.RawDescriptionHelpFormatter
desc = 'description:{0}'.format('\n'.join(__doc__.split('\n')))
parser = argparse.ArgumentParser(formatter_class=afc,
description=desc[:-2],
usage=usage(),
epilog=epilog())
parser.add_argument('-b', '--blddir',
action='store',
type=str,
metavar=('DIR'),
default=os.path.join(os.path.abspath(os.getcwd()), 'bld'),
help='build directory (%(default)s)')
parser.add_argument('-r', '--reldir',
action='store',
type=str,
metavar=('DIR'),
default=os.path.join(os.path.abspath(os.getcwd()), 'rel'),
help='release directory (%(default)s)')
parser.add_argument('-t', '--tee',
action='store_true',
help='tee output to stdout and to a logfile named {0}'.format(log))
parser.add_argument('-V', '--version',
action='version',
help='%(prog)s v{0}'.format(VERSION))
parser.add_argument('-u', '--url',
action='store',
type=str,
default='http://iweb.dl.sourceforge.net/project/macports/MacPorts/',
help='macports download url (%(default)s)')
opts = parser.parse_args()
if opts.tee:
sys.stdout = Tee(log)
logger = init_logger(base)
if opts.tee:
logger.info('Logging to "{0}".'.format(log))
if os.path.isabs(opts.reldir) is False:
opts.reldir = os.path.abspath(opts.reldir)
if os.path.isabs(opts.blddir) is False:
opts.blddir = os.path.abspath(opts.blddir)
return opts, logger
def main():
'''
main
'''
opts, logger = getopts()
pkgs = get_all_pkgs(opts, logger)
list_pkgs(opts, logger, pkgs)
install(opts, logger, pkgs)
logger.info('Done.')
if __name__ == '__main__':
main()
|
jlinoff/mpinstall
|
mpinstall.py
|
Python
|
mit
| 18,426
|
[
"VisIt"
] |
05fe3af57af4706d08e8870269cbc8237e0c8b148d1e29e38eb1969642a4d9ec
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import numpy as np
from fractions import Fraction
from math import gcd, floor, cos
from functools import reduce
from pymatgen import Structure, Lattice
from pymatgen.core.sites import PeriodicSite
from monty.fractions import lcm
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
import itertools
import logging
import warnings
# This module implements representations of grain boundaries, as well as
# algorithms for generating them.
__author__ = "Xiang-Guo Li"
__copyright__ = "Copyright 2018, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Xiang-Guo Li"
__email__ = "xil110@ucsd.edu"
__date__ = "7/30/18"
logger = logging.getLogger(__name__)
class GrainBoundary(Structure):
"""
Subclass of Structure representing a GrainBoundary (gb) object.
Implements additional attributes pertaining to gbs, but the
init method does not actually implement any algorithm that
creates a gb. This is a DUMMY class who's init method only holds
information about the gb. Also has additional methods that returns
other information about a gb such as sigma value.
Note that all gbs have the gb surface normal oriented in the c-direction.
This means the lattice vectors a and b are in the gb surface plane (at
least for one grain) and the c vector is out of the surface plane
(though not necessary perpendicular to the surface.)
"""
def __init__(self, lattice, species, coords, rotation_axis, rotation_angle,
gb_plane, join_plane, init_cell, vacuum_thickness, ab_shift,
site_properties, oriented_unit_cell, validate_proximity=False,
coords_are_cartesian=False):
"""
Makes a gb structure, a structure object with additional information
and methods pertaining to gbs.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
rotation_axis (list): Rotation axis of GB in the form of a list of
integers, e.g. [1, 1, 0].
rotation_angle (float, in unit of degree): rotation angle of GB.
gb_plane (list): Grain boundary plane in the form of a list of integers
e.g.: [1, 2, 3].
join_plane (list): Joining plane of the second grain in the form of a list of
integers. e.g.: [1, 2, 3].
init_cell (Structure): initial bulk structure to form the GB.
site_properties (dict): Properties associated with the sites as a
dict of sequences, The sequences have to be the same length as
the atomic species and fractional_coords. For gb, you should
have the 'grain_label' properties to classify the sites as 'top',
'bottom', 'top_incident', or 'bottom_incident'.
vacuum_thickness (float in angstrom): The thickness of vacuum inserted
between two grains of the GB.
ab_shift (list of float, in unit of crystal vector a, b): The relative
shift along a, b vectors.
oriented_unit_cell (Structure): oriented unit cell of the bulk init_cell.
Help to accurate calculate the bulk properties that are consistent
with gb calculations.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
"""
self.oriented_unit_cell = oriented_unit_cell
self.rotation_axis = rotation_axis
self.rotation_angle = rotation_angle
self.gb_plane = gb_plane
self.join_plane = join_plane
self.init_cell = init_cell
self.vacuum_thickness = vacuum_thickness
self.ab_shift = ab_shift
super().__init__(
lattice, species, coords, validate_proximity=validate_proximity,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties)
def copy(self):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Returns:
A copy of the Structure, with optionally new site_properties and
optionally sanitized.
"""
return GrainBoundary(self.lattice, self.species_and_occu, self.frac_coords,
self.rotation_axis, self.rotation_angle, self.gb_plane,
self.join_plane, self.init_cell, self.vacuum_thickness,
self.ab_shift, self.site_properties, self.oriented_unit_cell)
def get_sorted_structure(self, key=None, reverse=False):
"""
Get a sorted copy of the structure. The parameters have the same
meaning as in list.sort. By default, sites are sorted by the
electronegativity of the species. Note that Slab has to override this
because of the different __init__ args.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
sites = sorted(self, key=key, reverse=reverse)
s = Structure.from_sites(sites)
return GrainBoundary(s.lattice, s.species_and_occu, s.frac_coords,
self.rotation_axis, self.rotation_angle, self.gb_plane,
self.join_plane, self.init_cell, self.vacuum_thickness,
self.ab_shift, self.site_properties, self.oriented_unit_cell)
@property
def sigma(self):
"""
This method returns the sigma value of the gb.
If using 'quick_gen' to generate GB, this value is not valid.
"""
return int(round(self.oriented_unit_cell.volume / self.init_cell.volume))
@property
def sigma_from_site_prop(self):
"""
This method returns the sigma value of the gb from site properties.
If the GB structure merge some atoms due to the atoms too closer with
each other, this property will not work.
"""
num_coi = 0
if None in self.site_properties['grain_label']:
raise RuntimeError('Site were merged, this property do not work')
for tag in self.site_properties['grain_label']:
if 'incident' in tag:
num_coi += 1
return int(round(self.num_sites / num_coi))
@property
def top_grain(self):
"""
return the top grain (Structure) of the GB.
"""
top_sites = []
for i, tag in enumerate(self.site_properties['grain_label']):
if 'top' in tag:
top_sites.append(self.sites[i])
return Structure.from_sites(top_sites)
@property
def bottom_grain(self):
"""
return the bottom grain (Structure) of the GB.
"""
bottom_sites = []
for i, tag in enumerate(self.site_properties['grain_label']):
if 'bottom' in tag:
bottom_sites.append(self.sites[i])
return Structure.from_sites(bottom_sites)
@property
def coincidents(self):
"""
return the a list of coincident sites.
"""
coincident_sites = []
for i, tag in enumerate(self.site_properties['grain_label']):
if 'incident' in tag:
coincident_sites.append(self.sites[i])
return coincident_sites
def __str__(self):
comp = self.composition
outs = [
"Gb Summary (%s)" % comp.formula,
"Reduced Formula: %s" % comp.reduced_formula,
"Rotation axis: %s" % (self.rotation_axis,),
"Rotation angle: %s" % (self.rotation_angle,),
"GB plane: %s" % (self.gb_plane,),
"Join plane: %s" % (self.join_plane,),
"vacuum thickness: %s" % (self.vacuum_thickness,),
"ab_shift: %s" % (self.ab_shift,), ]
def to_s(x, rjust=10):
return ("%0.6f" % x).rjust(rjust)
outs.append("abc : " + " ".join([to_s(i) for i in self.lattice.abc]))
outs.append("angles: " + " ".join([to_s(i) for i in self.lattice.angles]))
outs.append("Sites ({i})".format(i=len(self)))
for i, site in enumerate(self):
outs.append(" ".join([str(i + 1), site.species_string, " ".join([to_s(j, 12) for j in site.frac_coords])]))
return "\n".join(outs)
def as_dict(self):
d = super().as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["init_cell"] = self.init_cell.as_dict()
d["rotation_axis"] = self.rotation_axis
d["rotation_angle"] = self.rotation_angle
d["gb_plane"] = self.gb_plane
d["join_plane"] = self.join_plane
d["vacuum_thickness"] = self.vacuum_thickness
d["ab_shift"] = self.ab_shift
d["oriented_unit_cell"] = self.oriented_unit_cell.as_dict()
return d
@classmethod
def from_dict(cls, d):
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
s = Structure.from_sites(sites)
return GrainBoundary(
lattice=lattice,
species=s.species_and_occu, coords=s.frac_coords,
rotation_axis=d["rotation_axis"],
rotation_angle=d["rotation_angle"],
gb_plane=d["gb_plane"],
join_plane=d["join_plane"],
init_cell=Structure.from_dict(d["init_cell"]),
vacuum_thickness=d["vacuum_thickness"],
ab_shift=d["ab_shift"],
oriented_unit_cell=Structure.from_dict(d["oriented_unit_cell"]),
site_properties=s.site_properties)
class GrainBoundaryGenerator:
"""
This class is to generate grain boundaries (GBs) from bulk
conventional cell (fcc, bcc can from the primitive cell), and works for Cubic,
Tetragonal, Orthorhombic, Rhombohedral, and Hexagonal systems.
It generate GBs from given parameters, which includes
GB plane, rotation axis, rotation angle.
This class works for any general GB, including twist, tilt and mixed GBs.
The three parameters, rotation axis, GB plane and rotation angle, are
sufficient to identify one unique GB. While sometimes, users may not be able
to tell what exactly rotation angle is but prefer to use sigma as an parameter,
this class also provides the function that is able to return all possible
rotation angles for a specific sigma value.
The same sigma value (with rotation axis fixed) can correspond to
multiple rotation angles.
Users can use structure matcher in pymatgen to get rid of the redundant structures.
"""
def __init__(self, initial_structure, symprec=0.1, angle_tolerance=1):
"""
initial_structure (Structure): Initial input structure. It can
be conventional or primitive cell (primitive cell works for bcc and fcc).
For fcc and bcc, using conventional cell can lead to a non-primitive
grain boundary structure.
This code supplies Cubic, Tetragonal, Orthorhombic, Rhombohedral, and
Hexagonal systems.
symprec (float): Tolerance for symmetry finding. Defaults to 0.1 (the value used
in Materials Project), which is for structures with slight deviations
from their proper atomic positions (e.g., structures relaxed with
electronic structure codes).
A smaller value of 0.01 is often used for properly refined
structures with atoms in the proper symmetry coordinates.
User should make sure the symmetry is what you want.
angle_tolerance (float): Angle tolerance for symmetry finding.
"""
analyzer = SpacegroupAnalyzer(initial_structure, symprec, angle_tolerance)
self.lat_type = analyzer.get_lattice_type()[0]
if (self.lat_type == 't'):
# need to use the conventional cell for tetragonal
initial_structure = analyzer.get_conventional_standard_structure()
a, b, c = initial_structure.lattice.abc
# c axis of tetragonal structure not in the third direction
if abs(a - b) > symprec:
# a == c, rotate b to the third direction
if abs(a - c) < symprec:
initial_structure.make_supercell([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
# b == c, rotate a to the third direction
else:
initial_structure.make_supercell([[0, 1, 0], [0, 0, 1], [1, 0, 0]])
elif (self.lat_type == 'h'):
alpha, beta, gamma = initial_structure.lattice.angles
# c axis is not in the third direction
if (abs(gamma - 90) < angle_tolerance):
# alpha = 120 or 60, rotate b, c to a, b vectors
if (abs(alpha - 90) > angle_tolerance):
initial_structure.make_supercell([[0, 1, 0], [0, 0, 1], [1, 0, 0]])
# beta = 120 or 60, rotate c, a to a, b vectors
elif (abs(beta - 90) > angle_tolerance):
initial_structure.make_supercell([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
elif (self.lat_type == 'r'):
# need to use primitive cell for rhombohedra
initial_structure = analyzer.get_primitive_standard_structure()
elif (self.lat_type == 'o'):
# need to use the conventional cell for orthorombic
initial_structure = analyzer.get_conventional_standard_structure()
self.initial_structure = initial_structure
def gb_from_parameters(self, rotation_axis, rotation_angle, expand_times=4, vacuum_thickness=0.0,
ab_shift=[0, 0], normal=False, ratio=None, plane=None, max_search=20,
tol_coi=1.e-8, rm_ratio=0.7, quick_gen=False):
"""
Args:
rotation_axis (list): Rotation axis of GB in the form of a list of integer
e.g.: [1, 1, 0]
rotation_angle (float, in unit of degree): rotation angle used to generate GB.
Make sure the angle is accurate enough. You can use the enum* functions
in this class to extract the accurate angle.
e.g.: The rotation angle of sigma 3 twist GB with the rotation axis
[1, 1, 1] and GB plane (1, 1, 1) can be 60.000000000 degree.
If you do not know the rotation angle, but know the sigma value, we have
provide the function get_rotation_angle_from_sigma which is able to return
all the rotation angles of sigma value you provided.
expand_times (int): The multiple times used to expand one unit grain to larger grain.
This is used to tune the grain length of GB to warrant that the two GBs in one
cell do not interact with each other. Default set to 4.
vacuum_thickness (float, in angstrom): The thickness of vacuum that you want to insert
between two grains of the GB. Default to 0.
ab_shift (list of float, in unit of a, b vectors of Gb): in plane shift of two grains
normal (logic):
determine if need to require the c axis of top grain (first transformation matrix)
perperdicular to the surface or not.
default to false.
ratio (list of integers):
lattice axial ratio.
For cubic system, ratio is not needed.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha))/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
This code also supplies a class method to generate the ratio from the
structure (get_ratio). User can also make their own approximation and
input the ratio directly.
plane (list): Grain boundary plane in the form of a list of integers
e.g.: [1, 2, 3]. If none, we set it as twist GB. The plane will be perpendicular
to the rotation axis.
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane. For complex GB, if you want to speed up, you can reduce this value.
But too small of this value may lead to error.
tol_coi (float): tolerance to find the coincidence sites. When making approximations to
the ratio needed to generate the GB, you probably need to increase this tolerance to
obtain the correct number of coincidence sites. To check the number of coincidence
sites are correct or not, you can compare the generated Gb object's sigma_from_site_prop
with enum* sigma values (what user expected by input).
rm_ratio (float): the criteria to remove the atoms which are too close with each other.
rm_ratio*bond_length of bulk system is the criteria of bond length, below which the atom
will be removed. Default to 0.7.
quick_gen (bool): whether to quickly generate a supercell, if set to true, no need to
find the smallest cell.
Returns:
Grain boundary structure (gb object).
"""
lat_type = self.lat_type
# if the initial structure is primitive cell in cubic system,
# calculate the transformation matrix from its conventional cell
# to primitive cell, basically for bcc and fcc systems.
trans_cry = np.eye(3)
if lat_type == 'c':
analyzer = SpacegroupAnalyzer(self.initial_structure)
convention_cell = analyzer.get_conventional_standard_structure()
vol_ratio = self.initial_structure.volume / convention_cell.volume
# bcc primitive cell, belong to cubic system
if abs(vol_ratio - 0.5) < 1.e-3:
trans_cry = np.array([[0.5, 0.5, -0.5], [-0.5, 0.5, 0.5], [0.5, -0.5, 0.5]])
logger.info("Make sure this is for cubic with bcc primitive cell")
# fcc primitive cell, belong to cubic system
elif abs(vol_ratio - 0.25) < 1.e-3:
trans_cry = np.array([[0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5]])
logger.info("Make sure this is for cubic with fcc primitive cell")
else:
logger.info("Make sure this is for cubic with conventional cell")
elif lat_type == 't':
logger.info("Make sure this is for tetragonal system")
if ratio is None:
logger.info('Make sure this is for irrational c2/a2')
elif len(ratio) != 2:
raise RuntimeError('Tetragonal system needs correct c2/a2 ratio')
elif lat_type == 'o':
logger.info('Make sure this is for orthorhombic system')
if ratio is None:
raise RuntimeError('CSL does not exist if all axial ratios are irrational '
'for an orthorhombic system')
elif len(ratio) != 3:
raise RuntimeError('Orthorhombic system needs correct c2:b2:a2 ratio')
elif lat_type == 'h':
logger.info('Make sure this is for hexagonal system')
if ratio is None:
logger.info('Make sure this is for irrational c2/a2')
elif len(ratio) != 2:
raise RuntimeError('Hexagonal system needs correct c2/a2 ratio')
elif lat_type == 'r':
logger.info('Make sure this is for rhombohedral system')
if ratio is None:
logger.info('Make sure this is for irrational (1+2*cos(alpha)/cos(alpha) ratio')
elif len(ratio) != 2:
raise RuntimeError('Rhombohedral system needs correct '
'(1+2*cos(alpha)/cos(alpha) ratio')
else:
raise RuntimeError('Lattice type not implemented. This code works for cubic, '
'tetragonal, orthorhombic, rhombehedral, hexagonal systems')
# transform four index notation to three index notation for hexagonal and rhombohedral
if len(rotation_axis) == 4:
u1 = rotation_axis[0]
v1 = rotation_axis[1]
w1 = rotation_axis[3]
if lat_type.lower() == 'h':
u = 2 * u1 + v1
v = 2 * v1 + u1
w = w1
rotation_axis = [u, v, w]
elif lat_type.lower() == 'r':
u = 2 * u1 + v1 + w1
v = v1 + w1 - u1
w = w1 - 2 * v1 - u1
rotation_axis = [u, v, w]
# make sure gcd(rotation_axis)==1
if reduce(gcd, rotation_axis) != 1:
rotation_axis = [int(round(x / reduce(gcd, rotation_axis))) for x in rotation_axis]
# transform four index notation to three index notation for plane
if plane is not None:
if len(plane) == 4:
u1 = plane[0]
v1 = plane[1]
w1 = plane[3]
plane = [u1, v1, w1]
# set the plane for grain boundary when plane is None.
if plane is None:
if lat_type.lower() == 'c':
plane = rotation_axis
else:
if lat_type.lower() == 'h':
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, -0.5, 0], [-0.5, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == 'r':
if ratio is None:
cos_alpha = 0.5
else:
cos_alpha = 1.0 / (ratio[0] / ratio[1] - 2)
metric = np.array([[1, cos_alpha, cos_alpha], [cos_alpha, 1, cos_alpha],
[cos_alpha, cos_alpha, 1]])
elif lat_type.lower() == 't':
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, 0, 0], [0, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == 'o':
for i in range(3):
if ratio[i] is None:
ratio[i] = 1
metric = np.array([[1, 0, 0], [0, ratio[1] / ratio[2], 0],
[0, 0, ratio[0] / ratio[2]]])
else:
raise RuntimeError('Lattice type has not implemented.')
plane = np.matmul(rotation_axis, metric)
fractions = [Fraction(x).limit_denominator() for x in plane]
least_mul = reduce(lcm, [f.denominator for f in fractions])
plane = [int(round(x * least_mul)) for x in plane]
if reduce(gcd, plane) != 1:
index = reduce(gcd, plane)
plane = [int(round(x / index)) for x in plane]
t1, t2 = self.get_trans_mat(r_axis=rotation_axis, angle=rotation_angle, normal=normal,
trans_cry=trans_cry, lat_type=lat_type, ratio=ratio,
surface=plane, max_search=max_search, quick_gen=quick_gen)
# find the join_plane
if lat_type.lower() != 'c':
if lat_type.lower() == 'h':
if ratio is None:
mu, mv = [1, 1]
else:
mu, mv = ratio
trans_cry1 = np.array([[1, 0, 0], [-0.5, np.sqrt(3.0) / 2.0, 0],
[0, 0, np.sqrt(mu / mv)]])
elif lat_type.lower() == 'r':
if ratio is None:
c2_a2_ratio = 1
else:
mu, mv = ratio
c2_a2_ratio = 3.0 / (2 - 6 * mv / mu)
trans_cry1 = np.array([[0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[-0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[0, -1 * np.sqrt(3.0) / 3.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)]])
else:
if lat_type.lower() == 't':
if ratio is None:
mu, mv = [1, 1]
else:
mu, mv = ratio
lam = mv
elif lat_type.lower() == 'o':
new_ratio = [1 if v is None else v for v in ratio]
mu, lam, mv = new_ratio
trans_cry1 = np.array([[1, 0, 0], [0, np.sqrt(lam / mv), 0], [0, 0, np.sqrt(mu / mv)]])
else:
trans_cry1 = trans_cry
grain_matrix = np.dot(t2, trans_cry1)
plane_init = np.cross(grain_matrix[0], grain_matrix[1])
if lat_type.lower() != 'c':
plane_init = np.dot(plane_init, trans_cry1.T)
join_plane = self.vec_to_surface(plane_init)
parent_structure = self.initial_structure.copy()
# calculate the bond_length in bulk system.
if len(parent_structure) == 1:
temp_str = parent_structure.copy()
temp_str.make_supercell([1, 1, 2])
distance = temp_str.distance_matrix
else:
distance = parent_structure.distance_matrix
bond_length = np.min(distance[np.nonzero(distance)])
# top grain
top_grain = fix_pbc(parent_structure * t1)
# obtain the smallest oriended cell
if normal and not quick_gen:
t_temp = self.get_trans_mat(r_axis=rotation_axis, angle=rotation_angle, normal=False,
trans_cry=trans_cry, lat_type=lat_type, ratio=ratio,
surface=plane, max_search=max_search)
oriended_unit_cell = fix_pbc(parent_structure * t_temp[0])
t_matrix = oriended_unit_cell.lattice.matrix
normal_v_plane = np.cross(t_matrix[0], t_matrix[1])
unit_normal_v = normal_v_plane / np.linalg.norm(normal_v_plane)
unit_ab_adjust = (t_matrix[2] - np.dot(unit_normal_v, t_matrix[2]) * unit_normal_v) \
/ np.dot(unit_normal_v, t_matrix[2])
else:
oriended_unit_cell = top_grain.copy()
unit_ab_adjust = 0.0
# bottom grain, using top grain's lattice matrix
bottom_grain = fix_pbc(parent_structure * t2, top_grain.lattice.matrix)
# label both grains with 'top','bottom','top_incident','bottom_incident'
n_sites = top_grain.num_sites
t_and_b = Structure(top_grain.lattice, top_grain.species + bottom_grain.species,
list(top_grain.frac_coords) + list(bottom_grain.frac_coords))
t_and_b_dis = t_and_b.lattice.get_all_distances(t_and_b.frac_coords[0:n_sites],
t_and_b.frac_coords[n_sites:n_sites * 2])
index_incident = np.nonzero(t_and_b_dis < np.min(t_and_b_dis) + tol_coi)
top_labels = []
for i in range(n_sites):
if i in index_incident[0]:
top_labels.append('top_incident')
else:
top_labels.append('top')
bottom_labels = []
for i in range(n_sites):
if i in index_incident[1]:
bottom_labels.append('bottom_incident')
else:
bottom_labels.append('bottom')
top_grain = Structure(Lattice(top_grain.lattice.matrix), top_grain.species,
top_grain.frac_coords, site_properties={'grain_label': top_labels})
bottom_grain = Structure(Lattice(bottom_grain.lattice.matrix), bottom_grain.species,
bottom_grain.frac_coords, site_properties={'grain_label': bottom_labels})
# expand both grains
top_grain.make_supercell([1, 1, expand_times])
bottom_grain.make_supercell([1, 1, expand_times])
top_grain = fix_pbc(top_grain)
bottom_grain = fix_pbc(bottom_grain)
# determine the top-grain location.
edge_b = 1.0 - max(bottom_grain.frac_coords[:, 2])
edge_t = 1.0 - max(top_grain.frac_coords[:, 2])
c_adjust = (edge_t - edge_b) / 2.0
# construct all species
all_species = []
all_species.extend([site.specie for site in bottom_grain])
all_species.extend([site.specie for site in top_grain])
half_lattice = top_grain.lattice
# calculate translation vector, perpendicular to the plane
normal_v_plane = np.cross(half_lattice.matrix[0], half_lattice.matrix[1])
unit_normal_v = normal_v_plane / np.linalg.norm(normal_v_plane)
translation_v = unit_normal_v * vacuum_thickness
# construct the final lattice
whole_matrix_no_vac = np.array(half_lattice.matrix)
whole_matrix_no_vac[2] = half_lattice.matrix[2] * 2
whole_matrix_with_vac = whole_matrix_no_vac.copy()
whole_matrix_with_vac[2] = whole_matrix_no_vac[2] + translation_v * 2
whole_lat = Lattice(whole_matrix_with_vac)
# construct the coords, move top grain with translation_v
all_coords = []
grain_labels = bottom_grain.site_properties['grain_label'] + top_grain.site_properties['grain_label']
for site in bottom_grain:
all_coords.append(site.coords)
for site in top_grain:
all_coords.append(site.coords + half_lattice.matrix[2] * (1 + c_adjust) +
unit_ab_adjust * np.linalg.norm(half_lattice.matrix[2] * (1 + c_adjust)) +
translation_v + ab_shift[0] * whole_matrix_with_vac[0] +
ab_shift[1] * whole_matrix_with_vac[1])
gb_with_vac = Structure(whole_lat, all_species, all_coords,
coords_are_cartesian=True,
site_properties={'grain_label': grain_labels})
# merge closer atoms. extract near gb atoms.
cos_c_norm_plane = np.dot(unit_normal_v, whole_matrix_with_vac[2]) / whole_lat.c
range_c_len = abs(bond_length / cos_c_norm_plane / whole_lat.c)
sites_near_gb = []
sites_away_gb = []
for site in gb_with_vac.sites:
if site.frac_coords[2] < range_c_len or site.frac_coords[2] > 1 - range_c_len \
or (site.frac_coords[2] > 0.5 - range_c_len and site.frac_coords[2] < 0.5 + range_c_len):
sites_near_gb.append(site)
else:
sites_away_gb.append(site)
if len(sites_near_gb) >= 1:
s_near_gb = Structure.from_sites(sites_near_gb)
s_near_gb.merge_sites(tol=bond_length * rm_ratio, mode='d')
all_sites = sites_away_gb + s_near_gb.sites
gb_with_vac = Structure.from_sites(all_sites)
return GrainBoundary(whole_lat, gb_with_vac.species, gb_with_vac.cart_coords, rotation_axis,
rotation_angle, plane, join_plane, self.initial_structure,
vacuum_thickness, ab_shift, site_properties=gb_with_vac.site_properties,
oriented_unit_cell=oriended_unit_cell,
coords_are_cartesian=True)
def get_ratio(self, max_denominator=5, index_none=None):
"""
find the axial ratio needed for GB generator input.
Args:
max_denominator (int): the maximum denominator for
the computed ratio, default to be 5.
index_none (int): specify the irrational axis.
0-a, 1-b, 2-c. Only may be needed for orthorombic system.
Returns:
axial ratio needed for GB generator (list of integers).
"""
structure = self.initial_structure
lat_type = self.lat_type
if lat_type == 't' or lat_type == 'h':
# For tetragonal and hexagonal system, ratio = c2 / a2.
a, c = (structure.lattice.a, structure.lattice.c)
if c > a:
frac = Fraction(c ** 2 / a ** 2).limit_denominator(max_denominator)
ratio = [frac.numerator, frac.denominator]
else:
frac = Fraction(a ** 2 / c ** 2).limit_denominator(max_denominator)
ratio = [frac.denominator, frac.numerator]
elif lat_type == 'r':
# For rhombohedral system, ratio = (1 + 2 * cos(alpha)) / cos(alpha).
cos_alpha = cos(structure.lattice.alpha / 180 * np.pi)
frac = Fraction((1 + 2 * cos_alpha) / cos_alpha).limit_denominator(max_denominator)
ratio = [frac.numerator, frac.denominator]
elif lat_type == 'o':
# For orthorhombic system, ratio = c2:b2:a2.If irrational for one axis, set it to None.
ratio = [None] * 3
lat = (structure.lattice.c, structure.lattice.b, structure.lattice.a)
index = [0, 1, 2]
if index_none is None:
min_index = np.argmin(lat)
index.pop(min_index)
frac1 = Fraction(lat[index[0]] ** 2 / lat[min_index] ** 2).limit_denominator(max_denominator)
frac2 = Fraction(lat[index[1]] ** 2 / lat[min_index] ** 2).limit_denominator(max_denominator)
com_lcm = lcm(frac1.denominator, frac2.denominator)
ratio[min_index] = com_lcm
ratio[index[0]] = frac1.numerator * int(round((com_lcm / frac1.denominator)))
ratio[index[1]] = frac2.numerator * int(round((com_lcm / frac2.denominator)))
else:
index.pop(index_none)
if (lat[index[0]] > lat[index[1]]):
frac = Fraction(lat[index[0]] ** 2 / lat[index[1]] ** 2).limit_denominator(max_denominator)
ratio[index[0]] = frac.numerator
ratio[index[1]] = frac.denominator
else:
frac = Fraction(lat[index[1]] ** 2 / lat[index[0]] ** 2).limit_denominator(max_denominator)
ratio[index[1]] = frac.numerator
ratio[index[0]] = frac.denominator
elif lat_type == 'c':
raise RuntimeError('Cubic system does not need axial ratio.')
else:
raise RuntimeError('Lattice type not implemented.')
return ratio
@staticmethod
def get_trans_mat(r_axis, angle, normal=False, trans_cry=np.eye(3), lat_type='c',
ratio=None, surface=None, max_search=20, quick_gen=False):
"""
Find the two transformation matrix for each grain from given rotation axis,
GB plane, rotation angle and corresponding ratio (see explanation for ratio
below).
The structure of each grain can be obtained by applying the corresponding
transformation matrix to the conventional cell.
The algorithm for this code is from reference, Acta Cryst, A32,783(1976).
Args:
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w for hex/rho system only):
the rotation axis of the grain boundary.
angle (float, in unit of degree) :
the rotation angle of the grain boundary
normal (logic):
determine if need to require the c axis of one grain associated with
the first transformation matrix perperdicular to the surface or not.
default to false.
trans_cry (3 by 3 array):
if the structure given are primitive cell in cubic system, e.g.
bcc or fcc system, trans_cry is the transformation matrix from its
conventional cell to the primitive cell.
lat_type ( one character):
'c' or 'C': cubic system
't' or 'T': tetragonal system
'o' or 'O': orthorhombic system
'h' or 'H': hexagonal system
'r' or 'R': rhombohedral system
default to cubic system
ratio (list of integers):
lattice axial ratio.
For cubic system, ratio is not needed.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha)/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
surface (list of three integers, e.g. h, k, l
or four integers, e.g. h, k, i, l for hex/rho system only):
the miller index of grain boundary plane, with the format of [h,k,l]
if surface is not given, the default is perpendicular to r_axis, which is
a twist grain boundary.
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane.
quick_gen (bool): whether to quickly generate a supercell, if set to true, no need to
find the smallest cell.
Returns:
t1 (3 by 3 integer array):
The transformation array for one grain.
t2 (3 by 3 integer array):
The transformation array for the other grain
"""
# transform four index notation to three index notation
if len(r_axis) == 4:
u1 = r_axis[0]
v1 = r_axis[1]
w1 = r_axis[3]
if lat_type.lower() == 'h':
u = 2 * u1 + v1
v = 2 * v1 + u1
w = w1
r_axis = [u, v, w]
elif lat_type.lower() == 'r':
u = 2 * u1 + v1 + w1
v = v1 + w1 - u1
w = w1 - 2 * v1 - u1
r_axis = [u, v, w]
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
if surface is not None:
if len(surface) == 4:
u1 = surface[0]
v1 = surface[1]
w1 = surface[3]
surface = [u1, v1, w1]
# set the surface for grain boundary.
if surface is None:
if lat_type.lower() == 'c':
surface = r_axis
else:
if lat_type.lower() == 'h':
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, -0.5, 0], [-0.5, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == 'r':
if ratio is None:
cos_alpha = 0.5
else:
cos_alpha = 1.0 / (ratio[0] / ratio[1] - 2)
metric = np.array([[1, cos_alpha, cos_alpha], [cos_alpha, 1, cos_alpha],
[cos_alpha, cos_alpha, 1]])
elif lat_type.lower() == 't':
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, 0, 0], [0, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == 'o':
for i in range(3):
if ratio[i] is None:
ratio[i] = 1
metric = np.array([[1, 0, 0], [0, ratio[1] / ratio[2], 0],
[0, 0, ratio[0] / ratio[2]]])
else:
raise RuntimeError('Lattice type has not implemented.')
surface = np.matmul(r_axis, metric)
fractions = [Fraction(x).limit_denominator() for x in surface]
least_mul = reduce(lcm, [f.denominator for f in fractions])
surface = [int(round(x * least_mul)) for x in surface]
if reduce(gcd, surface) != 1:
index = reduce(gcd, surface)
surface = [int(round(x / index)) for x in surface]
if lat_type.lower() == 'h':
# set the value for u,v,w,mu,mv,m,n,d,x
# check the reference for the meaning of these parameters
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError('For irrational c2/a2, CSL only exist for [0,0,1] '
'or [u,v,0] and m = 0')
else:
mu, mv = ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
d = (u ** 2 + v ** 2 - u * v) * mv + w ** 2 * mu
if abs(angle - 180.0) < 1.e0:
m = 0
n = 1
else:
fraction = Fraction(np.tan(angle / 2 / 180.0 * np.pi) /
np.sqrt(float(d) / 3.0 / mu)).limit_denominator()
m = fraction.denominator
n = fraction.numerator
# construct the rotation matrix, check reference for details
r_list = [(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 +
2 * w * mu * m * n + 3 * mu * m ** 2,
(2 * v - u) * u * mv * n ** 2 - 4 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n ** 2 + 4 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 -
2 * w * mu * m * n + 3 * mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n ** 2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n ** 2 + 3 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv + u * v * mv) *
n ** 2 + 3 * mu * m ** 2]
m = -1 * m
r_list_inv = [(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 +
2 * w * mu * m * n + 3 * mu * m ** 2,
(2 * v - u) * u * mv * n ** 2 - 4 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n ** 2 + 4 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 -
2 * w * mu * m * n + 3 * mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n ** 2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n ** 2 + 3 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv + u * v * mv) *
n ** 2 + 3 * mu * m ** 2]
m = -1 * m
F = 3 * mu * m ** 2 + d * n ** 2
all_list = r_list + r_list_inv + [F]
com_fac = reduce(gcd, all_list)
sigma = F / com_fac
r_matrix = (np.array(r_list) / com_fac / sigma).reshape(3, 3)
elif lat_type.lower() == 'r':
# set the value for u,v,w,mu,mv,m,n,d
# check the reference for the meaning of these parameters
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if ratio is None:
mu, mv = [1, 1]
if u + v + w != 0:
if u != v or u != w:
raise RuntimeError('For irrational ratio_alpha, CSL only exist for [1,1,1]'
'or [u, v, -(u+v)] and m =0')
else:
mu, mv = ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
d = (u ** 2 + v ** 2 + w ** 2) * (mu - 2 * mv) + \
2 * mv * (v * w + w * u + u * v)
if abs(angle - 180.0) < 1.e0:
m = 0
n = 1
else:
fraction = Fraction(np.tan(angle / 2 / 180.0 * np.pi) /
np.sqrt(float(d) / mu)).limit_denominator()
m = fraction.denominator
n = fraction.numerator
# construct the rotation matrix, check reference for details
r_list = [(mu - 2 * mv) * (u ** 2 - v ** 2 - w ** 2) * n ** 2 +
2 * mv * (v - w) * m * n - 2 * mv * v * w * n ** 2 +
mu * m ** 2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
(mu - 2 * mv) * (v ** 2 - w ** 2 - u ** 2) * n ** 2 +
2 * mv * (w - u) * m * n - 2 * mv * u * w * n ** 2 +
mu * m ** 2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
(mu - 2 * mv) * (w ** 2 - u ** 2 - v ** 2) * n ** 2 +
2 * mv * (u - v) * m * n - 2 * mv * u * v * n ** 2 +
mu * m ** 2]
m = -1 * m
r_list_inv = [(mu - 2 * mv) * (u ** 2 - v ** 2 - w ** 2) * n ** 2 +
2 * mv * (v - w) * m * n - 2 * mv * v * w * n ** 2 +
mu * m ** 2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
(mu - 2 * mv) * (v ** 2 - w ** 2 - u ** 2) * n ** 2 +
2 * mv * (w - u) * m * n - 2 * mv * u * w * n ** 2 +
mu * m ** 2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
(mu - 2 * mv) * (w ** 2 - u ** 2 - v ** 2) * n ** 2 +
2 * mv * (u - v) * m * n - 2 * mv * u * v * n ** 2 +
mu * m ** 2]
m = -1 * m
F = mu * m ** 2 + d * n ** 2
all_list = r_list_inv + r_list + [F]
com_fac = reduce(gcd, all_list)
sigma = F / com_fac
r_matrix = (np.array(r_list) / com_fac / sigma).reshape(3, 3)
else:
u, v, w = r_axis
if lat_type.lower() == 'c':
mu = 1
lam = 1
mv = 1
elif lat_type.lower() == 't':
if ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError('For irrational c2/a2, CSL only exist for [0,0,1] '
'or [u,v,0] and m = 0')
else:
mu, mv = ratio
lam = mv
elif lat_type.lower() == 'o':
if None in ratio:
mu, lam, mv = ratio
non_none = [i for i in ratio if i is not None]
if len(non_none) < 2:
raise RuntimeError('No CSL exist for two irrational numbers')
non1, non2 = non_none
if mu is None:
lam = non1
mv = non2
mu = 1
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError('For irrational c2, CSL only exist for [0,0,1] '
'or [u,v,0] and m = 0')
elif lam is None:
mu = non1
mv = non2
lam = 1
if v != 0:
if u != 0 or (w != 0):
raise RuntimeError('For irrational b2, CSL only exist for [0,1,0] '
'or [u,0,w] and m = 0')
elif mv is None:
mu = non1
lam = non2
mv = 1
if u != 0:
if w != 0 or (v != 0):
raise RuntimeError('For irrational a2, CSL only exist for [1,0,0] '
'or [0,v,w] and m = 0')
else:
mu, lam, mv = ratio
if u == 0 and v == 0:
mu = 1
if u == 0 and w == 0:
lam = 1
if v == 0 and w == 0:
mv = 1
# make sure mu, lambda, mv are coprime integers.
if reduce(gcd, [mu, lam, mv]) != 1:
temp = reduce(gcd, [mu, lam, mv])
mu = int(round(mu / temp))
mv = int(round(mv / temp))
lam = int(round(lam / temp))
d = (mv * u ** 2 + lam * v ** 2) * mv + w ** 2 * mu * mv
if abs(angle - 180.0) < 1.e0:
m = 0
n = 1
else:
fraction = Fraction(np.tan(angle / 2 / 180.0 * np.pi) /
np.sqrt(d / mu / lam)).limit_denominator()
m = fraction.denominator
n = fraction.numerator
r_list = [(u ** 2 * mv * mv - lam * v ** 2 * mv -
w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * lam * (v * u * mv * n ** 2 - w * mu * m * n),
2 * mu * (u * w * mv * n ** 2 + v * lam * m * n),
2 * mv * (u * v * mv * n ** 2 + w * mu * m * n),
(v ** 2 * mv * lam - u ** 2 * mv * mv -
w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * mv * mu * (v * w * n ** 2 - u * m * n),
2 * mv * (u * w * mv * n ** 2 - v * lam * m * n),
2 * lam * mv * (v * w * n ** 2 + u * m * n),
(w ** 2 * mu * mv - u ** 2 * mv * mv -
v ** 2 * mv * lam) * n ** 2 + lam * mu * m ** 2]
m = -1 * m
r_list_inv = [(u ** 2 * mv * mv - lam * v ** 2 * mv -
w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * lam * (v * u * mv * n ** 2 - w * mu * m * n),
2 * mu * (u * w * mv * n ** 2 + v * lam * m * n),
2 * mv * (u * v * mv * n ** 2 + w * mu * m * n),
(v ** 2 * mv * lam - u ** 2 * mv * mv -
w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * mv * mu * (v * w * n ** 2 - u * m * n),
2 * mv * (u * w * mv * n ** 2 - v * lam * m * n),
2 * lam * mv * (v * w * n ** 2 + u * m * n),
(w ** 2 * mu * mv - u ** 2 * mv * mv -
v ** 2 * mv * lam) * n ** 2 + lam * mu * m ** 2]
m = -1 * m
F = mu * lam * m ** 2 + d * n ** 2
all_list = r_list + r_list_inv + [F]
com_fac = reduce(gcd, all_list)
sigma = F / com_fac
r_matrix = (np.array(r_list) / com_fac / sigma).reshape(3, 3)
if (sigma > 1000):
raise RuntimeError('Sigma >1000 too large. Are you sure what you are doing, '
'Please check the GB if exist')
# transform surface, r_axis, r_matrix in terms of primitive lattice
surface = np.matmul(surface, np.transpose(trans_cry))
fractions = [Fraction(x).limit_denominator() for x in surface]
least_mul = reduce(lcm, [f.denominator for f in fractions])
surface = [int(round(x * least_mul)) for x in surface]
if reduce(gcd, surface) != 1:
index = reduce(gcd, surface)
surface = [int(round(x / index)) for x in surface]
r_axis = np.rint(np.matmul(r_axis, np.linalg.inv(trans_cry))).astype(int)
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
r_matrix = np.dot(np.dot(np.linalg.inv(trans_cry.T), r_matrix), trans_cry.T)
# set one vector of the basis to the rotation axis direction, and
# obtain the corresponding transform matrix
eye = np.eye(3, dtype=np.int)
for h in range(3):
if abs(r_axis[h]) != 0:
eye[h] = np.array(r_axis)
k = h + 1 if h + 1 < 3 else abs(2 - h)
l = h + 2 if h + 2 < 3 else abs(1 - h)
break
trans = eye.T
new_rot = np.array(r_matrix)
# with the rotation matrix to construct the CSL lattice, check reference for details
fractions = [Fraction(x).limit_denominator() for x in new_rot[:, k]]
least_mul = reduce(lcm, [f.denominator for f in fractions])
scale = np.zeros((3, 3))
scale[h, h] = 1
scale[k, k] = least_mul
scale[l, l] = sigma / least_mul
for i in range(least_mul):
check_int = i * new_rot[:, k] + (sigma / least_mul) * new_rot[:, l]
if all([np.round(x, 5).is_integer() for x in list(check_int)]):
n_final = i
break
try:
n_final
except NameError:
raise RuntimeError('Something is wrong. Check if this GB exists or not')
scale[k, l] = n_final
# each row of mat_csl is the CSL lattice vector
csl_init = np.rint(np.dot(np.dot(r_matrix, trans), scale)).astype(int).T
if abs(r_axis[h]) > 1:
csl_init = GrainBoundaryGenerator.reduce_mat(np.array(csl_init), r_axis[h], r_matrix)
csl = np.rint(Lattice(csl_init).get_niggli_reduced_lattice().matrix).astype(int)
# find the best slab supercell in terms of the conventional cell from the csl lattice,
# which is the transformation matrix
# now trans_cry is the transformation matrix from crystal to cartesian coordinates.
# for cubic, do not need to change.
if lat_type.lower() != 'c':
if lat_type.lower() == 'h':
trans_cry = np.array([[1, 0, 0], [-0.5, np.sqrt(3.0) / 2.0, 0],
[0, 0, np.sqrt(mu / mv)]])
elif lat_type.lower() == 'r':
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = 3.0 / (2 - 6 * mv / mu)
trans_cry = np.array([[0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[-0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[0, -1 * np.sqrt(3.0) / 3.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)]])
else:
trans_cry = np.array([[1, 0, 0], [0, np.sqrt(lam / mv), 0], [0, 0, np.sqrt(mu / mv)]])
t1_final = GrainBoundaryGenerator.slab_from_csl(csl, surface, normal, trans_cry, max_search=max_search,
quick_gen=quick_gen)
t2_final = np.array(np.rint(np.dot(t1_final, np.linalg.inv(r_matrix.T)))).astype(int)
return t1_final, t2_final
@staticmethod
def enum_sigma_cubic(cutoff, r_axis):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in cubic system.
The algorithm for this code is from reference, Acta Cryst, A40,108(1984)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angles of one grain respect to
the other grain.
When generate the microstructures of the grain boundary using these angles,
you need to analyze the symmetry of the structure. Different angles may
result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
# count the number of odds in r_axis
odd_r = len(list(filter(lambda x: x % 2 == 1, r_axis)))
# Compute the max n we need to enumerate.
if odd_r == 3:
a_max = 4
elif odd_r == 0:
a_max = 1
else:
a_max = 2
n_max = int(np.sqrt(cutoff * a_max / sum(np.array(r_axis) ** 2)))
# enumerate all possible n, m to give possible sigmas within the cutoff.
for n_loop in range(1, n_max + 1):
n = n_loop
m_max = int(np.sqrt(cutoff * a_max - n ** 2 * sum(np.array(r_axis) ** 2)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
if m == 0:
n = 1
else:
n = n_loop
# construct the quadruple [m, U,V,W], count the number of odds in
# quadruple to determine the parameter a, refer to the reference
quadruple = [m] + [x * n for x in r_axis]
odd_qua = len(list(filter(lambda x: x % 2 == 1, quadruple)))
if odd_qua == 4:
a = 4
elif odd_qua == 2:
a = 2
else:
a = 1
sigma = int(round((m ** 2 + n ** 2 * sum(np.array(r_axis) ** 2)) / a))
if (sigma <= cutoff) and (sigma > 1):
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n * np.sqrt(sum(np.array(r_axis) ** 2)) / m) \
/ np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n * np.sqrt(sum(np.array(r_axis) ** 2)) / m) \
/ np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
return sigmas
@staticmethod
def enum_sigma_hex(cutoff, r_axis, c2_a2_ratio):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in hexagonal system.
The algorithm for this code is from reference, Acta Cryst, A38,550(1982)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w):
the rotation axis of the grain boundary.
c2_a2_ratio (list of two integers, e.g. mu, mv):
mu/mv is the square of the hexagonal axial ratio, which is rational
number. If irrational, set c2_a2_ratio = None
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
# transform four index notation to three index notation
if len(r_axis) == 4:
u1 = r_axis[0]
v1 = r_axis[1]
w1 = r_axis[3]
u = 2 * u1 + v1
v = 2 * v1 + u1
w = w1
else:
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if c2_a2_ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError('For irrational c2/a2, CSL only exist for [0,0,1] '
'or [u,v,0] and m = 0')
else:
mu, mv = c2_a2_ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
# refer to the meaning of d in reference
d = (u ** 2 + v ** 2 - u * v) * mv + w ** 2 * mu
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * 12 * mu * mv) / abs(d)))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
if (c2_a2_ratio is None) and w == 0:
m_max = 0
else:
m_max = int(np.sqrt((cutoff * 12 * mu * mv - n ** 2 * d) / (3 * mu)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 +
2 * w * mu * m * n + 3 * mu * m ** 2,
(2 * v - u) * u * mv * n ** 2 - 4 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n ** 2 + 4 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 -
2 * w * mu * m * n + 3 * mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n ** 2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n ** 2 + 3 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv + u * v * mv) *
n ** 2 + 3 * mu * m ** 2]
m = -1 * m
# inverse of the rotation matrix
R_list_inv = [(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 +
2 * w * mu * m * n + 3 * mu * m ** 2,
(2 * v - u) * u * mv * n ** 2 - 4 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n ** 2 + 4 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 -
2 * w * mu * m * n + 3 * mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n ** 2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n ** 2 + 3 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv + u * v * mv) *
n ** 2 + 3 * mu * m ** 2]
m = -1 * m
F = 3 * mu * m ** 2 + d * n ** 2
all_list = R_list_inv + R_list + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round((3 * mu * m ** 2 + d * n ** 2) / com_fac))
if (sigma <= cutoff) and (sigma > 1):
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / 3.0 / mu)) \
/ np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / 3.0 / mu)) \
/ np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_sigma_rho(cutoff, r_axis, ratio_alpha):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in rhombohedral system.
The algorithm for this code is from reference, Acta Cryst, A45,505(1989).
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w):
the rotation axis of the grain boundary, with the format of [u,v,w]
or Weber indices [u, v, t, w].
ratio_alpha (list of two integers, e.g. mu, mv):
mu/mv is the ratio of (1+2*cos(alpha))/cos(alpha) with rational number.
If irrational, set ratio_alpha = None.
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# transform four index notation to three index notation
if len(r_axis) == 4:
u1 = r_axis[0]
v1 = r_axis[1]
w1 = r_axis[3]
u = 2 * u1 + v1 + w1
v = v1 + w1 - u1
w = w1 - 2 * v1 - u1
r_axis = [u, v, w]
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if ratio_alpha is None:
mu, mv = [1, 1]
if u + v + w != 0:
if u != v or u != w:
raise RuntimeError('For irrational ratio_alpha, CSL only exist for [1,1,1]'
'or [u, v, -(u+v)] and m =0')
else:
mu, mv = ratio_alpha
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
# refer to the meaning of d in reference
d = (u ** 2 + v ** 2 + w ** 2) * (mu - 2 * mv) + \
2 * mv * (v * w + w * u + u * v)
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * abs(4 * mu * (mu - 3 * mv))) / abs(d)))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
if ratio_alpha is None and u + v + w == 0:
m_max = 0
else:
m_max = int(np.sqrt((cutoff * abs(4 * mu * (mu - 3 * mv)) - n ** 2 * d) / (mu)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [(mu - 2 * mv) * (u ** 2 - v ** 2 - w ** 2) * n ** 2 +
2 * mv * (v - w) * m * n - 2 * mv * v * w * n ** 2 +
mu * m ** 2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
(mu - 2 * mv) * (v ** 2 - w ** 2 - u ** 2) * n ** 2 +
2 * mv * (w - u) * m * n - 2 * mv * u * w * n ** 2 +
mu * m ** 2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
(mu - 2 * mv) * (w ** 2 - u ** 2 - v ** 2) * n ** 2 +
2 * mv * (u - v) * m * n - 2 * mv * u * v * n ** 2 +
mu * m ** 2]
m = -1 * m
# inverse of the rotation matrix
R_list_inv = [(mu - 2 * mv) * (u ** 2 - v ** 2 - w ** 2) * n ** 2 +
2 * mv * (v - w) * m * n - 2 * mv * v * w * n ** 2 +
mu * m ** 2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) *
m * w * n + (mu - 2 * mv) * u * v * n ** 2),
(mu - 2 * mv) * (v ** 2 - w ** 2 - u ** 2) * n ** 2 +
2 * mv * (w - u) * m * n - 2 * mv * u * w * n ** 2 +
mu * m ** 2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) *
m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) *
m * u * n + (mu - 2 * mv) * w * v * n ** 2),
(mu - 2 * mv) * (w ** 2 - u ** 2 - v ** 2) * n ** 2 +
2 * mv * (u - v) * m * n - 2 * mv * u * v * n ** 2 +
mu * m ** 2]
m = -1 * m
F = mu * m ** 2 + d * n ** 2
all_list = R_list_inv + R_list + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round(abs(F / com_fac)))
if (sigma <= cutoff) and (sigma > 1):
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) \
/ np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) \
/ np.pi * 180.0
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_sigma_tet(cutoff, r_axis, c2_a2_ratio):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in tetragonal system.
The algorithm for this code is from reference, Acta Cryst, B46,117(1990)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
c2_a2_ratio (list of two integers, e.g. mu, mv):
mu/mv is the square of the tetragonal axial ratio with rational number.
if irrational, set c2_a2_ratio = None
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if c2_a2_ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError('For irrational c2/a2, CSL only exist for [0,0,1] '
'or [u,v,0] and m = 0')
else:
mu, mv = c2_a2_ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
# refer to the meaning of d in reference
d = (u ** 2 + v ** 2) * mv + w ** 2 * mu
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * 4 * mu * mv) / d))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
if c2_a2_ratio is None and w == 0:
m_max = 0
else:
m_max = int(np.sqrt((cutoff * 4 * mu * mv - n ** 2 * d) / mu))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 +
mu * m ** 2,
2 * v * u * mv * n ** 2 - 2 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * v * mu * m * n,
2 * u * v * mv * n ** 2 + 2 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 +
mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * u * mu * m * n,
2 * u * w * mv * n ** 2 - 2 * v * mv * m * n,
2 * v * w * mv * n ** 2 + 2 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv) * n ** 2 +
mu * m ** 2]
m = -1 * m
# inverse of rotation matrix
R_list_inv = [(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 +
mu * m ** 2,
2 * v * u * mv * n ** 2 - 2 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * v * mu * m * n,
2 * u * v * mv * n ** 2 + 2 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 +
mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * u * mu * m * n,
2 * u * w * mv * n ** 2 - 2 * v * mv * m * n,
2 * v * w * mv * n ** 2 + 2 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv) * n ** 2 +
mu * m ** 2]
m = -1 * m
F = mu * m ** 2 + d * n ** 2
all_list = R_list + R_list_inv + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round((mu * m ** 2 + d * n ** 2) / com_fac))
if (sigma <= cutoff) and (sigma > 1):
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) \
/ np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) \
/ np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_sigma_ort(cutoff, r_axis, c2_b2_a2_ratio):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in orthorhombic system.
The algorithm for this code is from reference, Scipta Metallurgica 27, 291(1992)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
c2_b2_a2_ratio (list of three integers, e.g. mu,lamda, mv):
mu:lam:mv is the square of the orthorhombic axial ratio with rational
numbers. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
u, v, w = r_axis
# make sure mu, lambda, mv are coprime integers.
if None in c2_b2_a2_ratio:
mu, lam, mv = c2_b2_a2_ratio
non_none = [i for i in c2_b2_a2_ratio if i is not None]
if len(non_none) < 2:
raise RuntimeError('No CSL exist for two irrational numbers')
non1, non2 = non_none
if reduce(gcd, non_none) != 1:
temp = reduce(gcd, non_none)
non1 = int(round(non1 / temp))
non2 = int(round(non2 / temp))
if mu is None:
lam = non1
mv = non2
mu = 1
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError('For irrational c2, CSL only exist for [0,0,1] '
'or [u,v,0] and m = 0')
elif lam is None:
mu = non1
mv = non2
lam = 1
if v != 0:
if u != 0 or (w != 0):
raise RuntimeError('For irrational b2, CSL only exist for [0,1,0] '
'or [u,0,w] and m = 0')
elif mv is None:
mu = non1
lam = non2
mv = 1
if u != 0:
if w != 0 or (v != 0):
raise RuntimeError('For irrational a2, CSL only exist for [1,0,0] '
'or [0,v,w] and m = 0')
else:
mu, lam, mv = c2_b2_a2_ratio
if reduce(gcd, c2_b2_a2_ratio) != 1:
temp = reduce(gcd, c2_b2_a2_ratio)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
lam = int(round(lam / temp))
if u == 0 and v == 0:
mu = 1
if u == 0 and w == 0:
lam = 1
if v == 0 and w == 0:
mv = 1
# refer to the meaning of d in reference
d = (mv * u ** 2 + lam * v ** 2) * mv + w ** 2 * mu * mv
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * 4 * mu * mv * mv * lam) / d))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
mu_temp, lam_temp, mv_temp = c2_b2_a2_ratio
if (mu_temp is None and w == 0) or (lam_temp is None and v == 0) \
or (mv_temp is None and u == 0):
m_max = 0
else:
m_max = int(np.sqrt((cutoff * 4 * mu * mv * lam * mv -
n ** 2 * d) / mu / lam))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [(u ** 2 * mv * mv - lam * v ** 2 * mv -
w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * lam * (v * u * mv * n ** 2 - w * mu * m * n),
2 * mu * (u * w * mv * n ** 2 + v * lam * m * n),
2 * mv * (u * v * mv * n ** 2 + w * mu * m * n),
(v ** 2 * mv * lam - u ** 2 * mv * mv -
w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * mv * mu * (v * w * n ** 2 - u * m * n),
2 * mv * (u * w * mv * n ** 2 - v * lam * m * n),
2 * lam * mv * (v * w * n ** 2 + u * m * n),
(w ** 2 * mu * mv - u ** 2 * mv * mv -
v ** 2 * mv * lam) * n ** 2 + lam * mu * m ** 2]
m = -1 * m
# inverse of rotation matrix
R_list_inv = [(u ** 2 * mv * mv - lam * v ** 2 * mv -
w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * lam * (v * u * mv * n ** 2 - w * mu * m * n),
2 * mu * (u * w * mv * n ** 2 + v * lam * m * n),
2 * mv * (u * v * mv * n ** 2 + w * mu * m * n),
(v ** 2 * mv * lam - u ** 2 * mv * mv -
w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * mv * mu * (v * w * n ** 2 - u * m * n),
2 * mv * (u * w * mv * n ** 2 - v * lam * m * n),
2 * lam * mv * (v * w * n ** 2 + u * m * n),
(w ** 2 * mu * mv - u ** 2 * mv * mv -
v ** 2 * mv * lam) * n ** 2 + lam * mu * m ** 2]
m = -1 * m
F = mu * lam * m ** 2 + d * n ** 2
all_list = R_list + R_list_inv + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round((mu * lam * m ** 2 + d * n ** 2) / com_fac))
if (sigma <= cutoff) and (sigma > 1):
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu / lam)) \
/ np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu / lam)) \
/ np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_possible_plane_cubic(plane_cutoff, r_axis, r_angle):
"""
Find all possible plane combinations for GBs given a rotaion axis and angle for
cubic system, and classify them to different categories, including 'Twist',
'Symmetric tilt', 'Normal tilt', 'Mixed' GBs.
Args:
plane_cutoff (integer): the cutoff of plane miller index.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
r_angle (float): rotation angle of the GBs.
Returns:
all_combinations (dict):
dictionary with keys as GB type, e.g. 'Twist','Symmetric tilt',etc.
and values as the combination of the two plane miller index
(GB plane and joining plane).
"""
all_combinations = {}
all_combinations['Symmetric tilt'] = []
all_combinations['Twist'] = []
all_combinations['Normal tilt'] = []
all_combinations['Mixed'] = []
sym_plane = symm_group_cubic([[1, 0, 0], [1, 1, 0]])
j = np.arange(0, plane_cutoff + 1)
combination = []
for i in itertools.product(j, repeat=3):
if sum(abs(np.array(i))) != 0:
combination.append(list(i))
if len(np.nonzero(i)[0]) == 3:
for i1 in range(3):
new_i = list(i).copy()
new_i[i1] = -1 * new_i[i1]
combination.append(new_i)
elif len(np.nonzero(i)[0]) == 2:
new_i = list(i).copy()
new_i[np.nonzero(i)[0][0]] = -1 * new_i[np.nonzero(i)[0][0]]
combination.append(new_i)
miller = np.array(combination)
miller = miller[np.argsort(np.linalg.norm(miller, axis=1))]
for i, val in enumerate(miller):
if reduce(gcd, val) == 1:
matrix = GrainBoundaryGenerator.get_trans_mat(r_axis, r_angle, surface=val, quick_gen=True)
vec = np.cross(matrix[1][0], matrix[1][1])
miller2 = GrainBoundaryGenerator.vec_to_surface(vec)
if np.all(np.abs(np.array(miller2)) <= plane_cutoff):
cos_1 = abs(np.dot(val, r_axis) / np.linalg.norm(val) / np.linalg.norm(r_axis))
if 1 - cos_1 < 1.e-5:
all_combinations['Twist'].append([list(val), miller2])
elif cos_1 < 1.e-8:
sym_tilt = False
if np.sum(np.abs(val)) == np.sum(np.abs(miller2)):
ave = (np.array(val) + np.array(miller2)) / 2
ave1 = (np.array(val) - np.array(miller2)) / 2
for plane in sym_plane:
cos_2 = abs(np.dot(ave, plane) / np.linalg.norm(ave) / np.linalg.norm(plane))
cos_3 = abs(np.dot(ave1, plane) / np.linalg.norm(ave1) / np.linalg.norm(plane))
if 1 - cos_2 < 1.e-5 or 1 - cos_3 < 1.e-5:
all_combinations['Symmetric tilt'].append([list(val), miller2])
sym_tilt = True
break
if not sym_tilt:
all_combinations['Normal tilt'].append([list(val), miller2])
else:
all_combinations['Mixed'].append([list(val), miller2])
return all_combinations
@staticmethod
def get_rotation_angle_from_sigma(sigma, r_axis, lat_type='C', ratio=None):
"""
Find all possible rotation angle for the given sigma value.
Args:
sigma (integer):
sigma value provided
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w for hex/rho system only):
the rotation axis of the grain boundary.
lat_type ( one character):
'c' or 'C': cubic system
't' or 'T': tetragonal system
'o' or 'O': orthorhombic system
'h' or 'H': hexagonal system
'r' or 'R': rhombohedral system
default to cubic system
ratio (list of integers):
lattice axial ratio.
For cubic system, ratio is not needed.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha)/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
Returns:
rotation_angles corresponding to the provided sigma value.
If the sigma value is not correct, return the rotation angle corresponding
to the correct possible sigma value right smaller than the wrong sigma value provided.
"""
if lat_type.lower() == 'c':
logger.info('Make sure this is for cubic system')
sigma_dict = GrainBoundaryGenerator.enum_sigma_cubic(cutoff=sigma, r_axis=r_axis)
elif lat_type.lower() == 't':
logger.info('Make sure this is for tetragonal system')
if ratio is None:
logger.info('Make sure this is for irrational c2/a2 ratio')
elif len(ratio) != 2:
raise RuntimeError('Tetragonal system needs correct c2/a2 ratio')
sigma_dict = GrainBoundaryGenerator.enum_sigma_tet(cutoff=sigma, r_axis=r_axis, c2_a2_ratio=ratio)
elif lat_type.lower() == 'o':
logger.info('Make sure this is for orthorhombic system')
if len(ratio) != 3:
raise RuntimeError('Orthorhombic system needs correct c2:b2:a2 ratio')
sigma_dict = GrainBoundaryGenerator.enum_sigma_ort(cutoff=sigma, r_axis=r_axis, c2_b2_a2_ratio=ratio)
elif lat_type.lower() == 'h':
logger.info('Make sure this is for hexagonal system')
if ratio is None:
logger.info('Make sure this is for irrational c2/a2 ratio')
elif len(ratio) != 2:
raise RuntimeError('Hexagonal system needs correct c2/a2 ratio')
sigma_dict = GrainBoundaryGenerator.enum_sigma_hex(cutoff=sigma, r_axis=r_axis, c2_a2_ratio=ratio)
elif lat_type.lower() == 'r':
logger.info('Make sure this is for rhombohedral system')
if ratio is None:
logger.info('Make sure this is for irrational (1+2*cos(alpha)/cos(alpha) ratio')
elif len(ratio) != 2:
raise RuntimeError('Rhombohedral system needs correct '
'(1+2*cos(alpha)/cos(alpha) ratio')
sigma_dict = GrainBoundaryGenerator.enum_sigma_rho(cutoff=sigma, r_axis=r_axis, ratio_alpha=ratio)
else:
raise RuntimeError('Lattice type not implemented')
sigmas = list(sigma_dict.keys())
if not sigmas:
raise RuntimeError('This is a wriong sigma value, and no sigma exists smaller than this value.')
if sigma in sigmas:
rotation_angles = sigma_dict[sigma]
else:
sigmas.sort()
warnings.warn("This is not the possible sigma value according to the rotation axis!"
"The nearest neighbor sigma and its corresponding angle are returned")
rotation_angles = sigma_dict[sigmas[-1]]
rotation_angles.sort()
return rotation_angles
@staticmethod
def slab_from_csl(csl, surface, normal, trans_cry, max_search=20, quick_gen=False):
"""
By linear operation of csl lattice vectors to get the best corresponding
slab lattice. That is the area of a,b vectors (within the surface plane)
is the smallest, the c vector first, has shortest length perpendicular
to surface [h,k,l], second, has shortest length itself.
Args:
csl (3 by 3 integer array):
input csl lattice.
surface (list of three integers, e.g. h, k, l):
the miller index of the surface, with the format of [h,k,l]
normal (logic):
determine if the c vector needs to perpendicular to surface
trans_cry (3 by 3 array):
transform matrix from crystal system to orthogonal system
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane.
quick_gen (bool): whether to quickly generate a supercell, no need to find the smallest
cell if set to true.
Returns:
t_matrix: a slab lattice ( 3 by 3 integer array):
"""
# set the transform matrix in real space
trans = trans_cry
# transform matrix in reciprocal space
ctrans = np.linalg.inv(trans.T)
t_matrix = csl.copy()
# vectors constructed from csl that perpendicular to surface
ab_vector = []
# obtain the miller index of surface in terms of csl.
miller = np.matmul(surface, csl.T)
if reduce(gcd, miller) != 1:
miller = [int(round(x / reduce(gcd, miller))) for x in miller]
miller_nonzero = []
# quickly generate a supercell, normal is not work in this way
if quick_gen:
scale_factor = []
eye = np.eye(3, dtype=np.int)
for i, j in enumerate(miller):
if j == 0:
scale_factor.append(eye[i])
else:
miller_nonzero.append(i)
if len(scale_factor) < 2:
index_len = len(miller_nonzero)
for i in range(index_len):
for j in range(i + 1, index_len):
lcm_miller = lcm(miller[miller_nonzero[i]], miller[miller_nonzero[j]])
l = [0, 0, 0]
l[miller_nonzero[i]] = -int(round(lcm_miller / miller[miller_nonzero[i]]))
l[miller_nonzero[j]] = int(round(lcm_miller / miller[miller_nonzero[j]]))
scale_factor.append(l)
if len(scale_factor) == 2:
break
t_matrix[0] = np.array(np.dot(scale_factor[0], csl))
t_matrix[1] = np.array(np.dot(scale_factor[1], csl))
t_matrix[2] = csl[miller_nonzero[0]]
if abs(np.linalg.det(t_matrix)) > 1000:
warnings.warn('Too large matrix. Suggest to use quick_gen=False')
return t_matrix
for i, j in enumerate(miller):
if j == 0:
ab_vector.append(csl[i])
else:
c_index = i
miller_nonzero.append(j)
if len(miller_nonzero) > 1:
t_matrix[2] = csl[c_index]
index_len = len(miller_nonzero)
lcm_miller = []
for i in range(index_len):
for j in range(i + 1, index_len):
com_gcd = gcd(miller_nonzero[i], miller_nonzero[j])
mil1 = int(round(miller_nonzero[i] / com_gcd))
mil2 = int(round(miller_nonzero[j] / com_gcd))
lcm_miller.append(max(abs(mil1), abs(mil2)))
lcm_sorted = sorted(lcm_miller)
if index_len == 2:
max_j = lcm_sorted[0]
else:
max_j = lcm_sorted[1]
else:
if not normal:
t_matrix[0] = ab_vector[0]
t_matrix[1] = ab_vector[1]
t_matrix[2] = csl[c_index]
return t_matrix
else:
max_j = abs(miller_nonzero[0])
if max_j > max_search:
max_j = max_search
# area of a, b vectors
area = None
# length of c vector
c_norm = np.linalg.norm(np.matmul(t_matrix[2], trans))
# c vector length along the direction perpendicular to surface
c_length = np.abs(np.dot(t_matrix[2], surface))
# check if the init c vector perpendicular to the surface
if normal:
c_cross = np.cross(np.matmul(t_matrix[2], trans), np.matmul(surface, ctrans))
if np.linalg.norm(c_cross) < 1.e-8:
normal_init = True
else:
normal_init = False
j = np.arange(0, max_j + 1)
combination = []
for i in itertools.product(j, repeat=3):
if sum(abs(np.array(i))) != 0:
combination.append(list(i))
if len(np.nonzero(i)[0]) == 3:
for i1 in range(3):
new_i = list(i).copy()
new_i[i1] = -1 * new_i[i1]
combination.append(new_i)
elif len(np.nonzero(i)[0]) == 2:
new_i = list(i).copy()
new_i[np.nonzero(i)[0][0]] = -1 * new_i[np.nonzero(i)[0][0]]
combination.append(new_i)
for i in combination:
if reduce(gcd, i) == 1:
temp = np.dot(np.array(i), csl)
if abs(np.dot(temp, surface) - 0) < 1.e-8:
ab_vector.append(temp)
else:
# c vector length along the direction perpendicular to surface
c_len_temp = np.abs(np.dot(temp, surface))
# c vector length itself
c_norm_temp = np.linalg.norm(np.matmul(temp, trans))
if normal:
c_cross = np.cross(np.matmul(temp, trans), np.matmul(surface, ctrans))
if np.linalg.norm(c_cross) < 1.e-8:
if normal_init:
if c_norm_temp < c_norm:
t_matrix[2] = temp
c_norm = c_norm_temp
else:
c_norm = c_norm_temp
normal_init = True
t_matrix[2] = temp
else:
if c_len_temp < c_length or \
(abs(c_len_temp - c_length) < 1.e-8 and c_norm_temp < c_norm):
t_matrix[2] = temp
c_norm = c_norm_temp
c_length = c_len_temp
if normal and (not normal_init):
logger.info('Did not find the perpendicular c vector, increase max_j')
while (not normal_init):
if max_j == max_search:
warnings.warn('Cannot find the perpendicular c vector, please increase max_search')
break
max_j = 3 * max_j
if max_j > max_search:
max_j = max_search
j = np.arange(0, max_j + 1)
combination = []
for i in itertools.product(j, repeat=3):
if sum(abs(np.array(i))) != 0:
combination.append(list(i))
if len(np.nonzero(i)[0]) == 3:
for i1 in range(3):
new_i = list(i).copy()
new_i[i1] = -1 * new_i[i1]
combination.append(new_i)
elif len(np.nonzero(i)[0]) == 2:
new_i = list(i).copy()
new_i[np.nonzero(i)[0][0]] = -1 * new_i[np.nonzero(i)[0][0]]
combination.append(new_i)
for i in combination:
if reduce(gcd, i) == 1:
temp = np.dot(np.array(i), csl)
if abs(np.dot(temp, surface) - 0) > 1.e-8:
c_cross = np.cross(np.matmul(temp, trans), np.matmul(surface, ctrans))
if np.linalg.norm(c_cross) < 1.e-8:
# c vetor length itself
c_norm_temp = np.linalg.norm(np.matmul(temp, trans))
if normal_init:
if c_norm_temp < c_norm:
t_matrix[2] = temp
c_norm = c_norm_temp
else:
c_norm = c_norm_temp
normal_init = True
t_matrix[2] = temp
if normal_init:
logger.info('Found perpendicular c vector')
# find the best a, b vectors with their formed area smallest and average norm of a,b smallest.
for i in itertools.combinations(ab_vector, 2):
area_temp = np.linalg.norm(np.cross(np.matmul(i[0], trans),
np.matmul(i[1], trans)))
if abs(area_temp - 0) > 1.e-8:
ab_norm_temp = np.linalg.norm(np.matmul(i[0], trans)) + \
np.linalg.norm(np.matmul(i[1], trans))
if area is None:
area = area_temp
ab_norm = ab_norm_temp
t_matrix[0] = i[0]
t_matrix[1] = i[1]
elif area_temp < area:
t_matrix[0] = i[0]
t_matrix[1] = i[1]
area = area_temp
ab_norm = ab_norm_temp
elif abs(area - area_temp) < 1.e-8 and ab_norm_temp < ab_norm:
t_matrix[0] = i[0]
t_matrix[1] = i[1]
area = area_temp
ab_norm = ab_norm_temp
# make sure we have a left-handed crystallographic system
if np.linalg.det(np.matmul(t_matrix, trans)) < 0:
t_matrix *= -1
if normal and abs(np.linalg.det(t_matrix)) > 1000:
warnings.warn('Too large matrix. Suggest to use Normal=False')
return t_matrix
@staticmethod
def reduce_mat(mat, mag, r_matrix):
"""
Reduce integer array mat's determinant mag times by linear combination
of its row vectors, so that the new array after rotation (r_matrix) is
still an integer array
Args:
mat (3 by 3 array): input matrix
mag (integer): reduce times for the determinant
r_matrix (3 by 3 array): rotation matrix
Return:
the reduced integer array
"""
max_j = abs(int(round(np.linalg.det(mat) / mag)))
reduced = False
for h in range(3):
k = h + 1 if h + 1 < 3 else abs(2 - h)
l = h + 2 if h + 2 < 3 else abs(1 - h)
j = np.arange(-max_j, max_j + 1)
for j1, j2 in itertools.product(j, repeat=2):
temp = mat[h] + j1 * mat[k] + j2 * mat[l]
if all([np.round(x, 5).is_integer() for x in list(temp / mag)]):
mat_copy = mat.copy()
mat_copy[h] = np.array([int(round(ele / mag)) for ele in temp])
new_mat = np.dot(mat_copy, np.linalg.inv(r_matrix.T))
if all([np.round(x, 5).is_integer() for x in list(np.ravel(new_mat))]):
reduced = True
mat[h] = np.array([int(round(ele / mag)) for ele in temp])
break
if reduced:
break
if not reduced:
warnings.warn("Matrix reduction not performed, may lead to non-primitive gb cell.")
return mat
@staticmethod
def vec_to_surface(vec):
"""
Transform a float vector to a surface miller index with integers.
Args:
vec (1 by 3 array float vector): input float vector
Return:
the surface miller index of the input vector.
"""
miller = [None] * 3
index = []
for i, value in enumerate(vec):
if abs(value) < 1.e-8:
miller[i] = 0
else:
index.append(i)
if len(index) == 1:
miller[index[0]] = 1
else:
min_index = np.argmin([i for i in vec if i != 0])
true_index = index[min_index]
index.pop(min_index)
frac = []
for i, value in enumerate(index):
frac.append(Fraction(vec[value] / vec[true_index]).limit_denominator(100))
if len(index) == 1:
miller[true_index] = frac[0].denominator
miller[index[0]] = frac[0].numerator
else:
com_lcm = lcm(frac[0].denominator, frac[1].denominator)
miller[true_index] = com_lcm
miller[index[0]] = frac[0].numerator * int(round((com_lcm / frac[0].denominator)))
miller[index[1]] = frac[1].numerator * int(round((com_lcm / frac[1].denominator)))
return miller
def factors(n):
"""
Compute the factors of a integer.
Args:
n: the input integer
Returns:
a set of integers that are the factors of the input integer.
"""
return set(reduce(list.__add__,
([i, n // i] for i in range(1, int(np.sqrt(n)) + 1) if n % i == 0)))
def fix_pbc(structure, matrix=None):
"""
Set all frac_coords of the input structure within [0,1].
Args:
structure (pymatgen structure object):
input structure
matrix (lattice matrix, 3 by 3 array/matrix)
new structure's lattice matrix, if none, use
input structure's matrix
Return:
new structure with fixed frac_coords and lattice matrix
"""
spec = []
coords = []
if matrix is None:
latte = Lattice(structure.lattice.matrix)
else:
latte = Lattice(matrix)
for site in structure:
spec.append(site.specie)
coord = np.array(site.frac_coords)
for i in range(3):
coord[i] -= floor(coord[i])
if np.allclose(coord[i], 1):
coord[i] = 0
elif np.allclose(coord[i], 0):
coord[i] = 0
else:
coord[i] = round(coord[i], 7)
coords.append(coord)
return Structure(latte, spec, coords, site_properties=structure.site_properties)
def symm_group_cubic(mat):
"""
obtain cubic symmetric eqivalents of the list of vectors.
Args:
matrix (lattice matrix, n by 3 array/matrix)
Return:
cubic symmetric eqivalents of the list of vectors.
"""
sym_group = np.zeros([24, 3, 3])
sym_group[0, :] = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
sym_group[1, :] = [[1, 0, 0], [0, -1, 0], [0, 0, -1]]
sym_group[2, :] = [[-1, 0, 0], [0, 1, 0], [0, 0, -1]]
sym_group[3, :] = [[-1, 0, 0], [0, -1, 0], [0, 0, 1]]
sym_group[4, :] = [[0, -1, 0], [-1, 0, 0], [0, 0, -1]]
sym_group[5, :] = [[0, -1, 0], [1, 0, 0], [0, 0, 1]]
sym_group[6, :] = [[0, 1, 0], [-1, 0, 0], [0, 0, 1]]
sym_group[7, :] = [[0, 1, 0], [1, 0, 0], [0, 0, -1]]
sym_group[8, :] = [[-1, 0, 0], [0, 0, -1], [0, -1, 0]]
sym_group[9, :] = [[-1, 0, 0], [0, 0, 1], [0, 1, 0]]
sym_group[10, :] = [[1, 0, 0], [0, 0, -1], [0, 1, 0]]
sym_group[11, :] = [[1, 0, 0], [0, 0, 1], [0, -1, 0]]
sym_group[12, :] = [[0, 1, 0], [0, 0, 1], [1, 0, 0]]
sym_group[13, :] = [[0, 1, 0], [0, 0, -1], [-1, 0, 0]]
sym_group[14, :] = [[0, -1, 0], [0, 0, 1], [-1, 0, 0]]
sym_group[15, :] = [[0, -1, 0], [0, 0, -1], [1, 0, 0]]
sym_group[16, :] = [[0, 0, 1], [1, 0, 0], [0, 1, 0]]
sym_group[17, :] = [[0, 0, 1], [-1, 0, 0], [0, -1, 0]]
sym_group[18, :] = [[0, 0, -1], [1, 0, 0], [0, -1, 0]]
sym_group[19, :] = [[0, 0, -1], [-1, 0, 0], [0, 1, 0]]
sym_group[20, :] = [[0, 0, -1], [0, -1, 0], [-1, 0, 0]]
sym_group[21, :] = [[0, 0, -1], [0, 1, 0], [1, 0, 0]]
sym_group[22, :] = [[0, 0, 1], [0, -1, 0], [1, 0, 0]]
sym_group[23, :] = [[0, 0, 1], [0, 1, 0], [-1, 0, 0]]
mat = np.atleast_2d(mat)
all_vectors = []
for sym in sym_group:
for vec in mat:
all_vectors.append(np.dot(sym, vec))
return np.unique(np.array(all_vectors), axis=0)
|
tschaume/pymatgen
|
pymatgen/analysis/gb/grain.py
|
Python
|
mit
| 117,508
|
[
"CRYSTAL",
"pymatgen"
] |
e0b0c3e0d2dec6b3328e98f925e5cebc0b7367baa5ace1e72e34b0ce47795b99
|
try:
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
from PyQt4 import QtGui, QtCore
import pyqtgraph as QtGraph
except ImportError:
QtGui = type("vtk, PyQt, or pyqtgraph missing. functionality unavailable!",
(), {"QWidget": object, "QMainWindow": object})
from .floodview import floodview
from .profileview import profileview
|
RodericDay/MiniPNM
|
minipnm/gui/__init__.py
|
Python
|
mit
| 387
|
[
"VTK"
] |
887c6b518f22477fa4a5041208f54dd822da14740c0e7fcc1a0ebc1066807fba
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGenefilter(RPackage):
"""Some basic functions for filtering genes"""
homepage = "https://bioconductor.org/packages/genefilter/"
url = "https://git.bioconductor.org/packages/genefilter"
list_url = homepage
version('1.58.1', git='https://git.bioconductor.org/packages/genefilter', commit='ace2556049677f60882adfe91f8cc96791556fc2')
depends_on('r@3.4.0:3.4.9', when='@1.58.1')
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-annotate', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
|
skosukhin/spack
|
var/spack/repos/builtin/packages/r-genefilter/package.py
|
Python
|
lgpl-2.1
| 1,882
|
[
"Bioconductor"
] |
03a291ff9db69782a908e5a69adabf6a37f6cb9739a428f9953ab6a8630299f8
|
import numpy
import sklearn.cluster
import time
import scipy
import os
from . import audioFeatureExtraction as aF
from . import audioTrainTest as aT
from . import audioBasicIO
import matplotlib.pyplot as plt
from scipy.spatial import distance
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import sklearn.discriminant_analysis
import csv
import os.path
import sklearn
import sklearn.cluster
import hmmlearn.hmm
import pickle
import glob
""" General utility functions """
def smoothMovingAvg(inputSignal, windowLen=11):
windowLen = int(windowLen)
if inputSignal.ndim != 1:
raise ValueError("")
if inputSignal.size < windowLen:
raise ValueError("Input vector needs to be bigger than window size.")
if windowLen < 3:
return inputSignal
s = numpy.r_[2*inputSignal[0] - inputSignal[windowLen-1::-1], inputSignal, 2*inputSignal[-1]-inputSignal[-1:-windowLen:-1]]
w = numpy.ones(windowLen, 'd')
y = numpy.convolve(w/w.sum(), s, mode='same')
return y[windowLen:-windowLen+1]
def selfSimilarityMatrix(featureVectors):
'''
This function computes the self-similarity matrix for a sequence of feature vectors.
ARGUMENTS:
- featureVectors: a numpy matrix (nDims x nVectors) whose i-th column corresponds to the i-th feature vector
RETURNS:
- S: the self-similarity matrix (nVectors x nVectors)
'''
[nDims, nVectors] = featureVectors.shape
[featureVectors2, MEAN, STD] = aT.normalizeFeatures([featureVectors.T])
featureVectors2 = featureVectors2[0].T
S = 1.0 - distance.squareform(distance.pdist(featureVectors2.T, 'cosine'))
return S
def flags2segs(Flags, window):
'''
ARGUMENTS:
- Flags: a sequence of class flags (per time window)
- window: window duration (in seconds)
RETURNS:
- segs: a sequence of segment's limits: segs[i,0] is start and segs[i,1] are start and end point of segment i
- classes: a sequence of class flags: class[i] is the class ID of the i-th segment
'''
preFlag = 0
curFlag = 0
numOfSegments = 0
curVal = Flags[curFlag]
segsList = []
classes = []
while (curFlag < len(Flags) - 1):
stop = 0
preFlag = curFlag
preVal = curVal
while (stop == 0):
curFlag = curFlag + 1
tempVal = Flags[curFlag]
if ((tempVal != curVal) | (curFlag == len(Flags) - 1)): # stop
numOfSegments = numOfSegments + 1
stop = 1
curSegment = curVal
curVal = Flags[curFlag]
segsList.append((curFlag * window))
classes.append(preVal)
segs = numpy.zeros((len(segsList), 2))
for i in range(len(segsList)):
if i > 0:
segs[i, 0] = segsList[i-1]
segs[i, 1] = segsList[i]
return (segs, classes)
def segs2flags(segStart, segEnd, segLabel, winSize):
'''
This function converts segment endpoints and respective segment labels to fix-sized class labels.
ARGUMENTS:
- segStart: segment start points (in seconds)
- segEnd: segment endpoints (in seconds)
- segLabel: segment labels
- winSize: fix-sized window (in seconds)
RETURNS:
- flags: numpy array of class indices
- classNames: list of classnames (strings)
'''
flags = []
classNames = list(set(segLabel))
curPos = winSize / 2.0
while curPos < segEnd[-1]:
for i in range(len(segStart)):
if curPos > segStart[i] and curPos <= segEnd[i]:
break
flags.append(classNames.index(segLabel[i]))
curPos += winSize
return numpy.array(flags), classNames
def computePreRec(CM, classNames):
'''
This function computes the Precision, Recall and F1 measures, given a confusion matrix
'''
numOfClasses = CM.shape[0]
if len(classNames) != numOfClasses:
print("Error in computePreRec! Confusion matrix and classNames list must be of the same size!")
return
Precision = []
Recall = []
F1 = []
for i, c in enumerate(classNames):
Precision.append(CM[i,i] / numpy.sum(CM[:,i]))
Recall.append(CM[i,i] / numpy.sum(CM[i,:]))
F1.append( 2 * Precision[-1] * Recall[-1] / (Precision[-1] + Recall[-1]))
return Recall, Precision, F1
def readSegmentGT(gtFile):
'''
This function reads a segmentation ground truth file, following a simple CSV format with the following columns:
<segment start>,<segment end>,<class label>
ARGUMENTS:
- gtFile: the path of the CSV segment file
RETURNS:
- segStart: a numpy array of segments' start positions
- segEnd: a numpy array of segments' ending positions
- segLabel: a list of respective class labels (strings)
'''
f = open(gtFile, "rb")
reader = csv.reader(f, delimiter=',')
segStart = []
segEnd = []
segLabel = []
for row in reader:
if len(row) == 3:
segStart.append(float(row[0]))
segEnd.append(float(row[1]))
#if row[2]!="other":
# segLabel.append((row[2]))
#else:
# segLabel.append("silence")
segLabel.append((row[2]))
return numpy.array(segStart), numpy.array(segEnd), segLabel
def plotSegmentationResults(flagsInd, flagsIndGT, classNames, mtStep, ONLY_EVALUATE=False):
'''
This function plots statistics on the classification-segmentation results produced either by the fix-sized supervised method or the HMM method.
It also computes the overall accuracy achieved by the respective method if ground-truth is available.
'''
flags = [classNames[int(f)] for f in flagsInd]
(segs, classes) = flags2segs(flags, mtStep)
minLength = min(flagsInd.shape[0], flagsIndGT.shape[0])
if minLength > 0:
accuracy = numpy.sum(flagsInd[0:minLength] == flagsIndGT[0:minLength]) / float(minLength)
else:
accuracy = -1
if not ONLY_EVALUATE:
Duration = segs[-1, 1]
SPercentages = numpy.zeros((len(classNames), 1))
Percentages = numpy.zeros((len(classNames), 1))
AvDurations = numpy.zeros((len(classNames), 1))
for iSeg in range(segs.shape[0]):
SPercentages[classNames.index(classes[iSeg])] += (segs[iSeg, 1]-segs[iSeg, 0])
for i in range(SPercentages.shape[0]):
Percentages[i] = 100.0 * SPercentages[i] / Duration
S = sum(1 for c in classes if c == classNames[i])
if S > 0:
AvDurations[i] = SPercentages[i] / S
else:
AvDurations[i] = 0.0
for i in range(Percentages.shape[0]):
print(classNames[i], Percentages[i], AvDurations[i])
font = {'size': 10}
plt.rc('font', **font)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.set_yticks(numpy.array(list(range(len(classNames)))))
ax1.axis((0, Duration, -1, len(classNames)))
ax1.set_yticklabels(classNames)
ax1.plot(numpy.array(list(range(len(flagsInd)))) * mtStep + mtStep / 2.0, flagsInd)
if flagsIndGT.shape[0] > 0:
ax1.plot(numpy.array(list(range(len(flagsIndGT)))) * mtStep + mtStep / 2.0, flagsIndGT + 0.05, '--r')
plt.xlabel("time (seconds)")
if accuracy >= 0:
plt.title('Accuracy = {0:.1f}%'.format(100.0 * accuracy))
ax2 = fig.add_subplot(223)
plt.title("Classes percentage durations")
ax2.axis((0, len(classNames) + 1, 0, 100))
ax2.set_xticks(numpy.array(list(range(len(classNames) + 1))))
ax2.set_xticklabels([" "] + classNames)
ax2.bar(numpy.array(list(range(len(classNames)))) + 0.5, Percentages)
ax3 = fig.add_subplot(224)
plt.title("Segment average duration per class")
ax3.axis((0, len(classNames)+1, 0, AvDurations.max()))
ax3.set_xticks(numpy.array(list(range(len(classNames) + 1))))
ax3.set_xticklabels([" "] + classNames)
ax3.bar(numpy.array(list(range(len(classNames)))) + 0.5, AvDurations)
fig.tight_layout()
plt.show()
return accuracy
def evaluateSpeakerDiarization(flags, flagsGT):
minLength = min(flags.shape[0], flagsGT.shape[0])
flags = flags[0:minLength]
flagsGT = flagsGT[0:minLength]
uFlags = numpy.unique(flags)
uFlagsGT = numpy.unique(flagsGT)
# compute contigency table:
cMatrix = numpy.zeros((uFlags.shape[0], uFlagsGT.shape[0]))
for i in range(minLength):
cMatrix[int(numpy.nonzero(uFlags == flags[i])[0]), int(numpy.nonzero(uFlagsGT == flagsGT[i])[0])] += 1.0
Nc, Ns = cMatrix.shape
N_s = numpy.sum(cMatrix, axis=0)
N_c = numpy.sum(cMatrix, axis=1)
N = numpy.sum(cMatrix)
purityCluster = numpy.zeros((Nc, ))
puritySpeaker = numpy.zeros((Ns, ))
# compute cluster purity:
for i in range(Nc):
purityCluster[i] = numpy.max((cMatrix[i, :])) / (N_c[i])
for j in range(Ns):
puritySpeaker[j] = numpy.max((cMatrix[:, j])) / (N_s[j])
purityClusterMean = numpy.sum(purityCluster * N_c) / N
puritySpeakerMean = numpy.sum(puritySpeaker * N_s) / N
return purityClusterMean, puritySpeakerMean
def trainHMM_computeStatistics(features, labels):
'''
This function computes the statistics used to train an HMM joint segmentation-classification model
using a sequence of sequential features and respective labels
ARGUMENTS:
- features: a numpy matrix of feature vectors (numOfDimensions x numOfWindows)
- labels: a numpy array of class indices (numOfWindows x 1)
RETURNS:
- startprob: matrix of prior class probabilities (numOfClasses x 1)
- transmat: transition matrix (numOfClasses x numOfClasses)
- means: means matrix (numOfDimensions x 1)
- cov: deviation matrix (numOfDimensions x 1)
'''
uLabels = numpy.unique(labels)
nComps = len(uLabels)
nFeatures = features.shape[0]
if features.shape[1] < labels.shape[0]:
print("trainHMM warning: number of short-term feature vectors must be greater or equal to the labels length!")
labels = labels[0:features.shape[1]]
# compute prior probabilities:
startprob = numpy.zeros((nComps,))
for i, u in enumerate(uLabels):
startprob[i] = numpy.count_nonzero(labels == u)
startprob = startprob / startprob.sum() # normalize prior probabilities
# compute transition matrix:
transmat = numpy.zeros((nComps, nComps))
for i in range(labels.shape[0]-1):
transmat[int(labels[i]), int(labels[i + 1])] += 1
for i in range(nComps): # normalize rows of transition matrix:
transmat[i, :] /= transmat[i, :].sum()
means = numpy.zeros((nComps, nFeatures))
for i in range(nComps):
means[i, :] = numpy.matrix(features[:, numpy.nonzero(labels == uLabels[i])[0]].mean(axis=1))
cov = numpy.zeros((nComps, nFeatures))
for i in range(nComps):
#cov[i,:,:] = numpy.cov(features[:,numpy.nonzero(labels==uLabels[i])[0]]) # use this lines if HMM using full gaussian distributions are to be used!
cov[i, :] = numpy.std(features[:, numpy.nonzero(labels == uLabels[i])[0]], axis=1)
return startprob, transmat, means, cov
def trainHMM_fromFile(wavFile, gtFile, hmmModelName, mtWin, mtStep):
'''
This function trains a HMM model for segmentation-classification using a single annotated audio file
ARGUMENTS:
- wavFile: the path of the audio filename
- gtFile: the path of the ground truth filename
(a csv file of the form <segment start in seconds>,<segment end in seconds>,<segment label> in each row
- hmmModelName: the name of the HMM model to be stored
- mtWin: mid-term window size
- mtStep: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- classNames: a list of classNames
After training, hmm, classNames, along with the mtWin and mtStep values are stored in the hmmModelName file
'''
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read ground truth data
flags, classNames = segs2flags(segStart, segEnd, segLabels, mtStep) # convert to fix-sized sequence of flags
[Fs, x] = audioBasicIO.readAudioFile(wavFile) # read audio data
#F = aF.stFeatureExtraction(x, Fs, 0.050*Fs, 0.050*Fs);
[F, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * 0.050), round(Fs * 0.050)) # feature extraction
startprob, transmat, means, cov = trainHMM_computeStatistics(F, flags) # compute HMM statistics (priors, transition matrix, etc)
hmm = hmmlearn.hmm.GaussianHMM(startprob.shape[0], "diag") # hmm training
hmm.startprob_ = startprob
hmm.transmat_ = transmat
hmm.means_ = means
hmm.covars_ = cov
fo = open(hmmModelName, "wb") # output to file
pickle.dump(hmm, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
fo.close()
return hmm, classNames
def trainHMM_fromDir(dirPath, hmmModelName, mtWin, mtStep):
'''
This function trains a HMM model for segmentation-classification using a where WAV files and .segment (ground-truth files) are stored
ARGUMENTS:
- dirPath: the path of the data diretory
- hmmModelName: the name of the HMM model to be stored
- mtWin: mid-term window size
- mtStep: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- classNames: a list of classNames
After training, hmm, classNames, along with the mtWin and mtStep values are stored in the hmmModelName file
'''
flagsAll = numpy.array([])
classesAll = []
for i, f in enumerate(glob.glob(dirPath + os.sep + '*.wav')): # for each WAV file
wavFile = f
gtFile = f.replace('.wav', '.segments') # open for annotated file
if not os.path.isfile(gtFile): # if current WAV file does not have annotation -> skip
continue
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read GT data
flags, classNames = segs2flags(segStart, segEnd, segLabels, mtStep) # convert to flags
for c in classNames: # update classnames:
if c not in classesAll:
classesAll.append(c)
[Fs, x] = audioBasicIO.readAudioFile(wavFile) # read audio data
[F, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * 0.050), round(Fs * 0.050)) # feature extraction
lenF = F.shape[1]
lenL = len(flags)
MIN = min(lenF, lenL)
F = F[:, 0:MIN]
flags = flags[0:MIN]
flagsNew = []
for j, fl in enumerate(flags): # append features and labels
flagsNew.append(classesAll.index(classNames[flags[j]]))
flagsAll = numpy.append(flagsAll, numpy.array(flagsNew))
if i == 0:
Fall = F
else:
Fall = numpy.concatenate((Fall, F), axis=1)
startprob, transmat, means, cov = trainHMM_computeStatistics(Fall, flagsAll) # compute HMM statistics
hmm = hmmlearn.hmm.GaussianHMM(startprob.shape[0], "diag") # train HMM
hmm.startprob_ = startprob
hmm.transmat_ = transmat
hmm.means_ = means
hmm.covars_ = cov
fo = open(hmmModelName, "wb") # save HMM model
pickle.dump(hmm, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(classesAll, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
fo.close()
return hmm, classesAll
def hmmSegmentation(wavFileName, hmmModelName, PLOT=False, gtFileName=""):
[Fs, x] = audioBasicIO.readAudioFile(wavFileName) # read audio data
try:
fo = open(hmmModelName, "rb")
except IOError:
print("didn't find file")
return
try:
hmm = pickle.load(fo)
classesAll = pickle.load(fo)
mtWin = pickle.load(fo)
mtStep = pickle.load(fo)
except:
fo.close()
fo.close()
#Features = audioFeatureExtraction.stFeatureExtraction(x, Fs, 0.050*Fs, 0.050*Fs); # feature extraction
[Features, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * 0.050), round(Fs * 0.050))
flagsInd = hmm.predict(Features.T) # apply model
#for i in range(len(flagsInd)):
# if classesAll[flagsInd[i]]=="silence":
# flagsInd[i]=classesAll.index("speech")
# plot results
if os.path.isfile(gtFileName):
[segStart, segEnd, segLabels] = readSegmentGT(gtFileName)
flagsGT, classNamesGT = segs2flags(segStart, segEnd, segLabels, mtStep)
flagsGTNew = []
for j, fl in enumerate(flagsGT): # "align" labels with GT
if classNamesGT[flagsGT[j]] in classesAll:
flagsGTNew.append(classesAll.index(classNamesGT[flagsGT[j]]))
else:
flagsGTNew.append(-1)
CM = numpy.zeros((len(classNamesGT), len(classNamesGT)))
flagsIndGT = numpy.array(flagsGTNew)
for i in range(min(flagsInd.shape[0], flagsIndGT.shape[0])):
CM[int(flagsIndGT[i]),int(flagsInd[i])] += 1
else:
flagsIndGT = numpy.array([])
acc = plotSegmentationResults(flagsInd, flagsIndGT, classesAll, mtStep, not PLOT)
if acc >= 0:
print("Overall Accuracy: {0:.2f}".format(acc))
return (flagsInd, classNamesGT, acc, CM)
else:
return (flagsInd, classesAll, -1, -1)
def mtFileClassification(inputFile, modelName, modelType, plotResults=False, gtFile=""):
'''
This function performs mid-term classification of an audio stream.
Towards this end, supervised knowledge is used, i.e. a pre-trained classifier.
ARGUMENTS:
- inputFile: path of the input WAV file
- modelName: name of the classification model
- modelType: svm or knn depending on the classifier type
- plotResults: True if results are to be plotted using matplotlib along with a set of statistics
RETURNS:
- segs: a sequence of segment's endpoints: segs[i] is the endpoint of the i-th segment (in seconds)
- classes: a sequence of class flags: class[i] is the class ID of the i-th segment
'''
if not os.path.isfile(modelName):
print("mtFileClassificationError: input modelType not found!")
return (-1, -1, -1)
# Load classifier:
if modelType == 'svm':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadSVModel(modelName)
elif modelType == 'knn':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadKNNModel(modelName)
elif modelType == 'randomforest':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadRandomForestModel(modelName)
elif modelType == 'gradientboosting':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadGradientBoostingModel(modelName)
elif modelType == 'extratrees':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadExtraTreesModel(modelName)
if computeBEAT:
print("Model " + modelName + " contains long-term music features (beat etc) and cannot be used in segmentation")
return (-1, -1, -1)
[Fs, x] = audioBasicIO.readAudioFile(inputFile) # load input file
if Fs == -1: # could not read file
return (-1, -1, -1)
x = audioBasicIO.stereo2mono(x) # convert stereo (if) to mono
Duration = len(x) / Fs
# mid-term feature extraction:
[MidTermFeatures, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * stWin), round(Fs * stStep))
flags = []
Ps = []
flagsInd = []
for i in range(MidTermFeatures.shape[1]): # for each feature vector (i.e. for each fix-sized segment):
curFV = (MidTermFeatures[:, i] - MEAN) / STD # normalize current feature vector
[Result, P] = aT.classifierWrapper(Classifier, modelType, curFV) # classify vector
flagsInd.append(Result)
flags.append(classNames[int(Result)]) # update class label matrix
Ps.append(numpy.max(P)) # update probability matrix
flagsInd = numpy.array(flagsInd)
# 1-window smoothing
for i in range(1, len(flagsInd) - 1):
if flagsInd[i-1] == flagsInd[i + 1]:
flagsInd[i] = flagsInd[i + 1]
(segs, classes) = flags2segs(flags, mtStep) # convert fix-sized flags to segments and classes
segs[-1] = len(x) / float(Fs)
# Load grount-truth:
if os.path.isfile(gtFile):
[segStartGT, segEndGT, segLabelsGT] = readSegmentGT(gtFile)
flagsGT, classNamesGT = segs2flags(segStartGT, segEndGT, segLabelsGT, mtStep)
flagsIndGT = []
for j, fl in enumerate(flagsGT): # "align" labels with GT
if classNamesGT[flagsGT[j]] in classNames:
flagsIndGT.append(classNames.index(classNamesGT[flagsGT[j]]))
else:
flagsIndGT.append(-1)
flagsIndGT = numpy.array(flagsIndGT)
CM = numpy.zeros((len(classNamesGT), len(classNamesGT)))
for i in range(min(flagsInd.shape[0], flagsIndGT.shape[0])):
CM[int(flagsIndGT[i]),int(flagsInd[i])] += 1
else:
CM = []
flagsIndGT = numpy.array([])
acc = plotSegmentationResults(flagsInd, flagsIndGT, classNames, mtStep, not plotResults)
if acc >= 0:
print("Overall Accuracy: {0:.3f}".format(acc))
return (flagsInd, classNamesGT, acc, CM)
else:
return (flagsInd, classNames, acc, CM)
def evaluateSegmentationClassificationDir(dirName, modelName, methodName):
flagsAll = numpy.array([])
classesAll = []
accuracys = []
for i, f in enumerate(glob.glob(dirName + os.sep + '*.wav')): # for each WAV file
wavFile = f
print(wavFile)
gtFile = f.replace('.wav', '.segments') # open for annotated file
if methodName.lower() in ["svm", "knn","randomforest","gradientboosting","extratrees"]:
flagsInd, classNames, acc, CMt = mtFileClassification(wavFile, modelName, methodName, False, gtFile)
else:
flagsInd, classNames, acc, CMt = hmmSegmentation(wavFile, modelName, False, gtFile)
if acc > -1:
if i==0:
CM = numpy.copy(CMt)
else:
CM = CM + CMt
accuracys.append(acc)
print(CMt, classNames)
print(CM)
[Rec, Pre, F1] = computePreRec(CMt, classNames)
CM = CM / numpy.sum(CM)
[Rec, Pre, F1] = computePreRec(CM, classNames)
print(" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ")
print("Average Accuracy: {0:.1f}".format(100.0*numpy.array(accuracys).mean()))
print("Average Recall: {0:.1f}".format(100.0*numpy.array(Rec).mean()))
print("Average Precision: {0:.1f}".format(100.0*numpy.array(Pre).mean()))
print("Average F1: {0:.1f}".format(100.0*numpy.array(F1).mean()))
print("Median Accuracy: {0:.1f}".format(100.0*numpy.median(numpy.array(accuracys))))
print("Min Accuracy: {0:.1f}".format(100.0*numpy.array(accuracys).min()))
print("Max Accuracy: {0:.1f}".format(100.0*numpy.array(accuracys).max()))
def silenceRemoval(x, Fs, stWin, stStep, smoothWindow=0.5, Weight=0.5, plot=False):
'''
Event Detection (silence removal)
ARGUMENTS:
- x: the input audio signal
- Fs: sampling freq
- stWin, stStep: window size and step in seconds
- smoothWindow: (optinal) smooth window (in seconds)
- Weight: (optinal) weight factor (0 < Weight < 1) the higher, the more strict
- plot: (optinal) True if results are to be plotted
RETURNS:
- segmentLimits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that
the resulting segments are (0.1 - 0.9) seconds and (1.4, 3.0) seconds
'''
if Weight >= 1:
Weight = 0.99
if Weight <= 0:
Weight = 0.01
# Step 1: feature extraction
x = audioBasicIO.stereo2mono(x) # convert to mono
ShortTermFeatures = aF.stFeatureExtraction(x, Fs, stWin * Fs, stStep * Fs) # extract short-term features
# Step 2: train binary SVM classifier of low vs high energy frames
EnergySt = ShortTermFeatures[1, :] # keep only the energy short-term sequence (2nd feature)
E = numpy.sort(EnergySt) # sort the energy feature values:
L1 = int(len(E) / 20) # number of 10% of the total short-term windows
T1 = numpy.mean(E[0:L1]) # compute "lower" 10% energy threshold
T2 = numpy.mean(E[-L1:-1]) # compute "higher" 10% energy threshold
Class1 = ShortTermFeatures[:, numpy.where(EnergySt <= T1)[0]] # get all features that correspond to low energy
Class2 = ShortTermFeatures[:, numpy.where(EnergySt >= T2)[0]] # get all features that correspond to high energy
featuresSS = [Class1.T, Class2.T] # form the binary classification task and ...
[featuresNormSS, MEANSS, STDSS] = aT.normalizeFeatures(featuresSS) # normalize and ...
SVM = aT.trainSVM(featuresNormSS, 1.0) # train the respective SVM probabilistic model (ONSET vs SILENCE)
# Step 3: compute onset probability based on the trained SVM
ProbOnset = []
for i in range(ShortTermFeatures.shape[1]): # for each frame
curFV = (ShortTermFeatures[:, i] - MEANSS) / STDSS # normalize feature vector
ProbOnset.append(SVM.predict_proba(curFV.reshape(1,-1))[0][1]) # get SVM probability (that it belongs to the ONSET class)
ProbOnset = numpy.array(ProbOnset)
ProbOnset = smoothMovingAvg(ProbOnset, smoothWindow / stStep) # smooth probability
# Step 4A: detect onset frame indices:
ProbOnsetSorted = numpy.sort(ProbOnset) # find probability Threshold as a weighted average of top 10% and lower 10% of the values
Nt = ProbOnsetSorted.shape[0] / 10
T = (numpy.mean((1 - Weight) * ProbOnsetSorted[0:Nt]) + Weight * numpy.mean(ProbOnsetSorted[-Nt::]))
MaxIdx = numpy.where(ProbOnset > T)[0] # get the indices of the frames that satisfy the thresholding
i = 0
timeClusters = []
segmentLimits = []
# Step 4B: group frame indices to onset segments
while i < len(MaxIdx): # for each of the detected onset indices
curCluster = [MaxIdx[i]]
if i == len(MaxIdx)-1:
break
while MaxIdx[i+1] - curCluster[-1] <= 2:
curCluster.append(MaxIdx[i+1])
i += 1
if i == len(MaxIdx)-1:
break
i += 1
timeClusters.append(curCluster)
segmentLimits.append([curCluster[0] * stStep, curCluster[-1] * stStep])
# Step 5: Post process: remove very small segments:
minDuration = 0.2
segmentLimits2 = []
for s in segmentLimits:
if s[1] - s[0] > minDuration:
segmentLimits2.append(s)
segmentLimits = segmentLimits2
if plot:
timeX = numpy.arange(0, x.shape[0] / float(Fs), 1.0 / Fs)
plt.subplot(2, 1, 1)
plt.plot(timeX, x)
for s in segmentLimits:
plt.axvline(x=s[0])
plt.axvline(x=s[1])
plt.subplot(2, 1, 2)
plt.plot(numpy.arange(0, ProbOnset.shape[0] * stStep, stStep), ProbOnset)
plt.title('Signal')
for s in segmentLimits:
plt.axvline(x=s[0])
plt.axvline(x=s[1])
plt.title('SVM Probability')
plt.show()
return segmentLimits
def speakerDiarization(fileName, numOfSpeakers, mtSize=2.0, mtStep=0.2, stWin=0.05, LDAdim=35, PLOT=False):
'''
ARGUMENTS:
- fileName: the name of the WAV file to be analyzed
- numOfSpeakers the number of speakers (clusters) in the recording (<=0 for unknown)
- mtSize (opt) mid-term window size
- mtStep (opt) mid-term window step
- stWin (opt) short-term window size
- LDAdim (opt) LDA dimension (0 for no LDA)
- PLOT (opt) 0 for not plotting the results 1 for plottingy
'''
[Fs, x] = audioBasicIO.readAudioFile(fileName)
x = audioBasicIO.stereo2mono(x)
Duration = len(x) / Fs
[Classifier1, MEAN1, STD1, classNames1, mtWin1, mtStep1, stWin1, stStep1, computeBEAT1] = aT.loadKNNModel(os.path.join("data","knnSpeakerAll"))
[Classifier2, MEAN2, STD2, classNames2, mtWin2, mtStep2, stWin2, stStep2, computeBEAT2] = aT.loadKNNModel(os.path.join("data","knnSpeakerFemaleMale"))
[MidTermFeatures, ShortTermFeatures] = aF.mtFeatureExtraction(x, Fs, mtSize * Fs, mtStep * Fs, round(Fs * stWin), round(Fs*stWin * 0.5))
MidTermFeatures2 = numpy.zeros((MidTermFeatures.shape[0] + len(classNames1) + len(classNames2), MidTermFeatures.shape[1]))
for i in range(MidTermFeatures.shape[1]):
curF1 = (MidTermFeatures[:, i] - MEAN1) / STD1
curF2 = (MidTermFeatures[:, i] - MEAN2) / STD2
[Result, P1] = aT.classifierWrapper(Classifier1, "knn", curF1)
[Result, P2] = aT.classifierWrapper(Classifier2, "knn", curF2)
MidTermFeatures2[0:MidTermFeatures.shape[0], i] = MidTermFeatures[:, i]
MidTermFeatures2[MidTermFeatures.shape[0]:MidTermFeatures.shape[0]+len(classNames1), i] = P1 + 0.0001
MidTermFeatures2[MidTermFeatures.shape[0] + len(classNames1)::, i] = P2 + 0.0001
MidTermFeatures = MidTermFeatures2 # TODO
# SELECT FEATURES:
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20]; # SET 0A
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20, 99,100]; # SET 0B
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,
# 97,98, 99,100]; # SET 0C
iFeaturesSelect = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53] # SET 1A
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20,41,42,43,44,45,46,47,48,49,50,51,52,53, 99,100]; # SET 1B
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20,41,42,43,44,45,46,47,48,49,50,51,52,53, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100]; # SET 1C
#iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53]; # SET 2A
#iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53, 99,100]; # SET 2B
#iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100]; # SET 2C
#iFeaturesSelect = range(100); # SET 3
#MidTermFeatures += numpy.random.rand(MidTermFeatures.shape[0], MidTermFeatures.shape[1]) * 0.000000010
MidTermFeatures = MidTermFeatures[iFeaturesSelect, :]
(MidTermFeaturesNorm, MEAN, STD) = aT.normalizeFeatures([MidTermFeatures.T])
MidTermFeaturesNorm = MidTermFeaturesNorm[0].T
numOfWindows = MidTermFeatures.shape[1]
# remove outliers:
DistancesAll = numpy.sum(distance.squareform(distance.pdist(MidTermFeaturesNorm.T)), axis=0)
MDistancesAll = numpy.mean(DistancesAll)
iNonOutLiers = numpy.nonzero(DistancesAll < 1.2 * MDistancesAll)[0]
# TODO: Combine energy threshold for outlier removal:
#EnergyMin = numpy.min(MidTermFeatures[1,:])
#EnergyMean = numpy.mean(MidTermFeatures[1,:])
#Thres = (1.5*EnergyMin + 0.5*EnergyMean) / 2.0
#iNonOutLiers = numpy.nonzero(MidTermFeatures[1,:] > Thres)[0]
#print iNonOutLiers
perOutLier = (100.0 * (numOfWindows - iNonOutLiers.shape[0])) / numOfWindows
MidTermFeaturesNormOr = MidTermFeaturesNorm
MidTermFeaturesNorm = MidTermFeaturesNorm[:, iNonOutLiers]
# LDA dimensionality reduction:
if LDAdim > 0:
#[mtFeaturesToReduce, _] = aF.mtFeatureExtraction(x, Fs, mtSize * Fs, stWin * Fs, round(Fs*stWin), round(Fs*stWin));
# extract mid-term features with minimum step:
mtWinRatio = int(round(mtSize / stWin))
mtStepRatio = int(round(stWin / stWin))
mtFeaturesToReduce = []
numOfFeatures = len(ShortTermFeatures)
numOfStatistics = 2
#for i in range(numOfStatistics * numOfFeatures + 1):
for i in range(numOfStatistics * numOfFeatures):
mtFeaturesToReduce.append([])
for i in range(numOfFeatures): # for each of the short-term features:
curPos = 0
N = len(ShortTermFeatures[i])
while (curPos < N):
N1 = curPos
N2 = curPos + mtWinRatio
if N2 > N:
N2 = N
curStFeatures = ShortTermFeatures[i][N1:N2]
mtFeaturesToReduce[i].append(numpy.mean(curStFeatures))
mtFeaturesToReduce[i+numOfFeatures].append(numpy.std(curStFeatures))
curPos += mtStepRatio
mtFeaturesToReduce = numpy.array(mtFeaturesToReduce)
mtFeaturesToReduce2 = numpy.zeros((mtFeaturesToReduce.shape[0] + len(classNames1) + len(classNames2), mtFeaturesToReduce.shape[1]))
for i in range(mtFeaturesToReduce.shape[1]):
curF1 = (mtFeaturesToReduce[:, i] - MEAN1) / STD1
curF2 = (mtFeaturesToReduce[:, i] - MEAN2) / STD2
[Result, P1] = aT.classifierWrapper(Classifier1, "knn", curF1)
[Result, P2] = aT.classifierWrapper(Classifier2, "knn", curF2)
mtFeaturesToReduce2[0:mtFeaturesToReduce.shape[0], i] = mtFeaturesToReduce[:, i]
mtFeaturesToReduce2[mtFeaturesToReduce.shape[0]:mtFeaturesToReduce.shape[0] + len(classNames1), i] = P1 + 0.0001
mtFeaturesToReduce2[mtFeaturesToReduce.shape[0]+len(classNames1)::, i] = P2 + 0.0001
mtFeaturesToReduce = mtFeaturesToReduce2
mtFeaturesToReduce = mtFeaturesToReduce[iFeaturesSelect, :]
#mtFeaturesToReduce += numpy.random.rand(mtFeaturesToReduce.shape[0], mtFeaturesToReduce.shape[1]) * 0.0000010
(mtFeaturesToReduce, MEAN, STD) = aT.normalizeFeatures([mtFeaturesToReduce.T])
mtFeaturesToReduce = mtFeaturesToReduce[0].T
#DistancesAll = numpy.sum(distance.squareform(distance.pdist(mtFeaturesToReduce.T)), axis=0)
#MDistancesAll = numpy.mean(DistancesAll)
#iNonOutLiers2 = numpy.nonzero(DistancesAll < 3.0*MDistancesAll)[0]
#mtFeaturesToReduce = mtFeaturesToReduce[:, iNonOutLiers2]
Labels = numpy.zeros((mtFeaturesToReduce.shape[1], ));
LDAstep = 1.0
LDAstepRatio = LDAstep / stWin
#print LDAstep, LDAstepRatio
for i in range(Labels.shape[0]):
Labels[i] = int(i*stWin/LDAstepRatio);
clf = sklearn.discriminant_analysis.LinearDiscriminantAnalysis(n_components=LDAdim)
clf.fit(mtFeaturesToReduce.T, Labels)
MidTermFeaturesNorm = (clf.transform(MidTermFeaturesNorm.T)).T
if numOfSpeakers <= 0:
sRange = list(range(2, 10))
else:
sRange = [numOfSpeakers]
clsAll = []
silAll = []
centersAll = []
for iSpeakers in sRange:
k_means = sklearn.cluster.KMeans(n_clusters = iSpeakers)
k_means.fit(MidTermFeaturesNorm.T)
cls = k_means.labels_
means = k_means.cluster_centers_
# Y = distance.squareform(distance.pdist(MidTermFeaturesNorm.T))
clsAll.append(cls)
centersAll.append(means)
silA = []; silB = []
for c in range(iSpeakers): # for each speaker (i.e. for each extracted cluster)
clusterPerCent = numpy.nonzero(cls==c)[0].shape[0] / float(len(cls))
if clusterPerCent < 0.020:
silA.append(0.0)
silB.append(0.0)
else:
MidTermFeaturesNormTemp = MidTermFeaturesNorm[:,cls==c] # get subset of feature vectors
Yt = distance.pdist(MidTermFeaturesNormTemp.T) # compute average distance between samples that belong to the cluster (a values)
silA.append(numpy.mean(Yt)*clusterPerCent)
silBs = []
for c2 in range(iSpeakers): # compute distances from samples of other clusters
if c2!=c:
clusterPerCent2 = numpy.nonzero(cls==c2)[0].shape[0] / float(len(cls))
MidTermFeaturesNormTemp2 = MidTermFeaturesNorm[:,cls==c2]
Yt = distance.cdist(MidTermFeaturesNormTemp.T, MidTermFeaturesNormTemp2.T)
silBs.append(numpy.mean(Yt)*(clusterPerCent+clusterPerCent2)/2.0)
silBs = numpy.array(silBs)
silB.append(min(silBs)) # ... and keep the minimum value (i.e. the distance from the "nearest" cluster)
silA = numpy.array(silA);
silB = numpy.array(silB);
sil = []
for c in range(iSpeakers): # for each cluster (speaker)
sil.append( ( silB[c] - silA[c]) / (max(silB[c], silA[c])+0.00001) ) # compute silhouette
silAll.append(numpy.mean(sil)) # keep the AVERAGE SILLOUETTE
#silAll = silAll * (1.0/(numpy.power(numpy.array(sRange),0.5)))
imax = numpy.argmax(silAll) # position of the maximum sillouette value
nSpeakersFinal = sRange[imax] # optimal number of clusters
# generate the final set of cluster labels
# (important: need to retrieve the outlier windows: this is achieved by giving them the value of their nearest non-outlier window)
cls = numpy.zeros((numOfWindows,))
for i in range(numOfWindows):
j = numpy.argmin(numpy.abs(i-iNonOutLiers))
cls[i] = clsAll[imax][j]
# Post-process method 1: hmm smoothing
for i in range(1):
startprob, transmat, means, cov = trainHMM_computeStatistics(MidTermFeaturesNormOr, cls)
hmm = hmmlearn.hmm.GaussianHMM(startprob.shape[0], "diag") # hmm training
hmm.startprob_ = startprob
hmm.transmat_ = transmat
hmm.means_ = means; hmm.covars_ = cov
cls = hmm.predict(MidTermFeaturesNormOr.T)
# Post-process method 2: median filtering:
cls = scipy.signal.medfilt(cls, 13)
cls = scipy.signal.medfilt(cls, 11)
sil = silAll[imax] # final sillouette
classNames = ["speaker{0:d}".format(c) for c in range(nSpeakersFinal)];
# load ground-truth if available
gtFile = fileName.replace('.wav', '.segments'); # open for annotated file
if os.path.isfile(gtFile): # if groundturh exists
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read GT data
flagsGT, classNamesGT = segs2flags(segStart, segEnd, segLabels, mtStep) # convert to flags
if PLOT:
fig = plt.figure()
if numOfSpeakers>0:
ax1 = fig.add_subplot(111)
else:
ax1 = fig.add_subplot(211)
ax1.set_yticks(numpy.array(list(range(len(classNames)))))
ax1.axis((0, Duration, -1, len(classNames)))
ax1.set_yticklabels(classNames)
ax1.plot(numpy.array(list(range(len(cls))))*mtStep+mtStep/2.0, cls)
if os.path.isfile(gtFile):
if PLOT:
ax1.plot(numpy.array(list(range(len(flagsGT))))*mtStep+mtStep/2.0, flagsGT, 'r')
purityClusterMean, puritySpeakerMean = evaluateSpeakerDiarization(cls, flagsGT)
print("{0:.1f}\t{1:.1f}".format(100*purityClusterMean, 100*puritySpeakerMean))
if PLOT:
plt.title("Cluster purity: {0:.1f}% - Speaker purity: {1:.1f}%".format(100*purityClusterMean, 100*puritySpeakerMean) )
if PLOT:
plt.xlabel("time (seconds)")
#print sRange, silAll
if numOfSpeakers<=0:
plt.subplot(212)
plt.plot(sRange, silAll)
plt.xlabel("number of clusters");
plt.ylabel("average clustering's sillouette");
plt.show()
return cls
def speakerDiarizationEvaluateScript(folderName, LDAs):
'''
This function prints the cluster purity and speaker purity for each WAV file stored in a provided directory (.SEGMENT files are needed as ground-truth)
ARGUMENTS:
- folderName: the full path of the folder where the WAV and SEGMENT (ground-truth) files are stored
- LDAs: a list of LDA dimensions (0 for no LDA)
'''
types = ('*.wav', )
wavFilesList = []
for files in types:
wavFilesList.extend(glob.glob(os.path.join(folderName, files)))
wavFilesList = sorted(wavFilesList)
# get number of unique speakers per file (from ground-truth)
N = []
for wavFile in wavFilesList:
gtFile = wavFile.replace('.wav', '.segments');
if os.path.isfile(gtFile):
[segStart, segEnd, segLabels] = readSegmentGT(gtFile) # read GT data
N.append(len(list(set(segLabels))))
else:
N.append(-1)
for l in LDAs:
print("LDA = {0:d}".format(l))
for i, wavFile in enumerate(wavFilesList):
speakerDiarization(wavFile, N[i], 2.0, 0.2, 0.05, l, PLOT = False)
print()
def musicThumbnailing(x, Fs, shortTermSize=1.0, shortTermStep=0.5, thumbnailSize=10.0, Limit1 = 0, Limit2 = 1):
'''
This function detects instances of the most representative part of a music recording, also called "music thumbnails".
A technique similar to the one proposed in [1], however a wider set of audio features is used instead of chroma features.
In particular the following steps are followed:
- Extract short-term audio features. Typical short-term window size: 1 second
- Compute the self-silimarity matrix, i.e. all pairwise similarities between feature vectors
- Apply a diagonal mask is as a moving average filter on the values of the self-similarty matrix.
The size of the mask is equal to the desirable thumbnail length.
- Find the position of the maximum value of the new (filtered) self-similarity matrix.
The audio segments that correspond to the diagonial around that position are the selected thumbnails
ARGUMENTS:
- x: input signal
- Fs: sampling frequency
- shortTermSize: window size (in seconds)
- shortTermStep: window step (in seconds)
- thumbnailSize: desider thumbnail size (in seconds)
RETURNS:
- A1: beginning of 1st thumbnail (in seconds)
- A2: ending of 1st thumbnail (in seconds)
- B1: beginning of 2nd thumbnail (in seconds)
- B2: ending of 2nd thumbnail (in seconds)
USAGE EXAMPLE:
import audioFeatureExtraction as aF
[Fs, x] = basicIO.readAudioFile(inputFile)
[A1, A2, B1, B2] = musicThumbnailing(x, Fs)
[1] Bartsch, M. A., & Wakefield, G. H. (2005). Audio thumbnailing of popular music using chroma-based representations.
Multimedia, IEEE Transactions on, 7(1), 96-104.
'''
x = audioBasicIO.stereo2mono(x);
# feature extraction:
stFeatures = aF.stFeatureExtraction(x, Fs, Fs*shortTermSize, Fs*shortTermStep)
# self-similarity matrix
S = selfSimilarityMatrix(stFeatures)
# moving filter:
M = int(round(thumbnailSize / shortTermStep))
B = numpy.eye(M,M)
S = scipy.signal.convolve2d(S, B, 'valid')
# post-processing (remove main diagonal elements)
MIN = numpy.min(S)
for i in range(S.shape[0]):
for j in range(S.shape[1]):
if abs(i-j) < 5.0 / shortTermStep or i > j:
S[i,j] = MIN;
# find max position:
S[0:int(Limit1*S.shape[0]), :] = MIN
S[:, 0:int(Limit1*S.shape[0])] = MIN
S[int(Limit2*S.shape[0])::, :] = MIN
S[:, int(Limit2*S.shape[0])::] = MIN
maxVal = numpy.max(S)
[I, J] = numpy.unravel_index(S.argmax(), S.shape)
#plt.imshow(S)
#plt.show()
# expand:
i1 = I; i2 = I
j1 = J; j2 = J
while i2-i1<M:
if i1 <=0 or j1<=0 or i2>=S.shape[0]-2 or j2>=S.shape[1]-2:
break
if S[i1-1, j1-1] > S[i2+1,j2+1]:
i1 -= 1
j1 -= 1
else:
i2 += 1
j2 += 1
return (shortTermStep*i1, shortTermStep*i2, shortTermStep*j1, shortTermStep*j2, S)
|
belkinsky/SFXbot
|
src/pyAudioAnalysis/audioSegmentation.py
|
Python
|
mit
| 46,836
|
[
"Gaussian"
] |
0f72788c6e9b717043dd389b7c3b8c8bf215fe35afca91721581ee2ab372ec44
|
#! /usr/bin/env python
# Copyright 2014 Miguel Martinez de Aguirre
# See LICENSE for details
from StringIO import StringIO
import sys
from PIL import Image, ImageTk
import Tkinter
from tkSimpleDialog import Dialog
from twisted.cred import credentials
from twisted.internet import reactor, tksupport
from twisted.protocols import basic
from twisted.python import log
from twisted.spread import pb
import error
from server import GameType
import util
VERSION = 2
class GameClient(pb.Referenceable):
visited = set()
repl = {'xander': 'Xandy Pandy',
'alargeasteroid': 'A Large Asteroid',
'moon': 'MooN'}
def __init__(self, address="localhost", port=8181):
log.msg(["GameClient.__init__", self, address, port])
self.factory = pb.PBClientFactory()
self.root = Tkinter.Tk()
self.root.protocol("WM_DELETE_WINDOW", self.shutdown)
tksupport.install(self.root)
reactor.connectTCP(address, port, self.factory)
def sendMessage(self, message):
d = self.perspective.callRemote("message", message)
d.addErrback(self._errored)
def remote_print(self, message, colour):
log.msg(["print", self, message, colour])
self.chatui.printMessage(message, colour)
def remote_win(self, scores):
print "Congrats!"
print scores
self.gameui.setActive(False)
def remote_gameOver(self, scores):
print "Better luck next time!"
print scores
self.gameui.setActive(False)
def remote_startGame(self, start, end, players, costdelta):
"""
start: (x, y) start coordinate
end: (x, y) aim coordinate
players: [(name, colour), ...]
costdelta: [(coordinate, colour), ...]
"""
log.msg(["startGame", start, end, players, costdelta])
self.start = start
self.end = end
self.players = players
self.visited.add(start)
# TODO: notify user of start
self.remote_startNextTurn(costdelta)
def remote_startNextTurn(self, costdelta):
log.msg(["startNextTurn", self, costdelta])
t = self.gameType.timeout / 1000.0
self.later = reactor.callLater(t, self._finishTurn)
self.gameui.applyCostDelta(costdelta)
self.gameui.setActive(True)
def remote_updateCosts(self, costdelta):
log.msg(["updateCosts", costdelta])
self.gameui.applyCostDelta(costdelta)
def _finishTurn(self):
log.msg(["_finishTurn", self])
self.gameui.setActive(False)
chosen = self.gameui.chosen
log.msg({'chosen': chosen, 'visited': self.visited,
'parents': self.childMaker.getChildren(chosen) if chosen != () else None})
if chosen == ():
d = self.perspective.callRemote("expandNode", (), ())
else:
for parent in self.childMaker.getChildren(chosen):
if parent in self.visited:
self.visited.add(chosen)
d = self.perspective.callRemote("expandNode", chosen,
parent)
break
else:
d = self.perspective.callRemote("expandNode", (), ())
d.addErrback(self._errored)
def finishTurnEarly(self):
log.msg(["finishTurnEarly", self])
self.later.cancel()
self._finishTurn()
def connect(self, name=None):
log.msg(["connect", self, name])
while name is None:
dialog = NameDialog(self.root)
name = dialog.result
if name.lower() in self.repl:
name = self.repl[name.lower()]
self.name = name
d = self.factory.login(credentials.UsernamePassword(self.name, ''),
client=self)
d.addCallback(self._connected)
d.addErrback(self._nameTaken)
d.addErrback(self._errored)
reactor.run()
def _connected(self, perspective):
log.msg(["Connected with name: " + self.name, self, perspective])
self.perspective = perspective
d = perspective.callRemote("canPlay")
d.addCallback(self._setCanPlay)
d.addErrback(self._errored)
def _setCanPlay(self, canPlay):
log.msg(["setCanPlay", self, canPlay])
self.canPlay = canPlay
if canPlay:
log.msg("Acting as game client.")
self.gameui = GameUI(self, self.root)
d = self.perspective.callRemote("getGameType")
d.addCallback(self._setGameType)
d.addErrback(self._errored)
else:
log.msg("Acting as chat-only client.")
# Have chat functionality for both
self.chatui = ChatUI(self, self.root)
def _setGameType(self, gameType):
log.msg(["_setGameType", self, repr(gameType)[:100]])
self.gameType = GameType(None)
self.gameType.fromDictionary(gameType)
image = Image.open(StringIO(self.gameType.image))
self.gameui.setImage(image)
self.childMaker = util.ChildMaker(image.size, self.gameType.diagonals)
d = self.perspective.callRemote("getColour")
d.addCallback(self._setColour)
d.addErrback(self._errored)
def _setColour(self, colour):
log.msg(["_setColour", self, colour])
self.gameui.colour = colour
# Do things.
def _nameTaken(self, failure):
failure.trap(error.NameTaken)
log.msg("Name '{0}' already taken. Retrying with new name.".format(self.name))
name = None
while name is None:
dialog = NameDialog(self.root, True)
name = dialog.result
self.connect(name)
def _errored(self, reason):
log.msg("Logging error:")
log.err(reason)
def shutdown(self):
log.msg("Stopping reactor from " + repr(self))
reactor.stop()
class GameUI(object):
active = False
edited = []
def __init__(self, conduit, root):
"""conduit: a GameClient with a connection to a server."""
super(GameUI, self).__init__()
log.msg(["GameUI.__init__", self, conduit, root])
self.conduit = conduit
self.window = Tkinter.Toplevel(root)
self.window.title("Most exciting game you've ever played.")
self.canvas = Tkinter.Canvas(self.window, offset="10,10")
self.item = self.canvas.create_image(0, 0, anchor=Tkinter.NW)
self.canvas.pack()
self.canvas.bind("<Button 1>", self._onClick)
def _onClick(self, event):
log.msg(["_onClick", self, event, self.active])
if not self.active:
return
self._unedit()
# floor x,y to multiples of self.factor
x, y = map(lambda a, f=self.factor: a/f*f, (event.x, event.y))
self.chosen = (x/self.factor, y/self.factor)
self.edited.append((x, y, self.pa[x, y]))
for xx in (x, x+self.factor-1):
for yy in (y, y+self.factor-1):
self.pa[xx, yy] = self.colour
self._updateImage()
self.conduit.finishTurnEarly()
def setImage(self, image):
"""image: a PIL.Image"""
log.msg(["setImage", self, image])
self.image = self._resize(image.convert("RGB"), 9)
self.pa = self.image.load()
self._updateImage()
def _updateImage(self):
log.msg(["_updateImage", self])
imagetk = ImageTk.PhotoImage(self.image)
self.canvas.itemconfig(self.item, image=imagetk)
self.canvas.config(width=imagetk.width(), height=imagetk.height())
# keep reference so that old imagetk isn't
# GCed until new one is in place
self.imagetk = imagetk
def _resize(self, image, factor):
log.msg(["_resize", image, factor])
self.factor = factor
opa = image.load()
size = (image.size[0]*factor, image.size[1]*factor)
im = Image.new("RGB", size, None)
npa = im.load()
for x in xrange(size[0]):
for y in xrange(size[1]):
npa[x, y] = opa[x/factor, y/factor]
return im
def setActive(self, active):
log.msg(["setActive", self, active])
self.active = active
if active:
self._unedit()
self.chosen = ()
# TODO: visually show whether active or inactive
def _unedit(self):
log.msg(["_unedit", self, self.edited])
for e in self.edited:
for x in xrange(e[0], e[0]+self.factor):
for y in xrange(e[1], e[1]+self.factor):
self.pa[x, y] = e[2]
self.edited = []
self._updateImage()
def applyCostDelta(self, costdelta):
log.msg(["applyCostDelta", self, costdelta])
self._unedit()
for pixel, colour in costdelta:
for x in xrange(pixel[0]*self.factor, (pixel[0]+1)*self.factor):
for y in xrange(pixel[1]*self.factor, (pixel[1]+1)*self.factor):
self.pa[x, y] = tuple(colour)
self._updateImage()
class ChatUI(object):
tags = {}
def __init__(self, conduit, root):
log.msg(["ChatUI.__init__", self, conduit])
self.conduit = conduit
self.window = root
self.chatlog = Tkinter.Text(self.window, state='disabled', wrap='word',
width=80, height=24)
self.typebox = Tkinter.Entry(self.window, width=80)
self.typebox.bind('<Return>', self.returnPressed)
self.typebox.pack(fill='both', expand='yes')
self.chatlog.pack(fill='both', expand='yes')
def printMessage(self, message, colour):
# TODO: use colour
log.msg(["printMessage", self, message, colour])
tag = self.getTag(colour)
self.chatlog['state'] = 'normal'
self.chatlog.insert('end', message, (tag, ))
self.chatlog.insert('end', '\n')
self.chatlog['state'] = 'disabled'
print message
def returnPressed(self, event):
log.msg(["returnPressed", self, event])
self.conduit.sendMessage(self.typebox.get())
self.typebox.delete('0', 'end')
def getTag(self, colour):
try:
return self.tags[colour]
except KeyError:
r, g, b = colour
yiq = (r * 299 + g * 587 + b * 114) / 1000
if yiq > 128:
fg = 'black'
else:
fg = 'white'
hexcolour = '#' + ''.join('%02x' % c for c in colour)
name = ''.join(('tag', hexcolour))
self.chatlog.tag_configure(name, background=hexcolour,
foreground=fg)
self.tags[colour] = name
return name
class NameDialog(Dialog):
"""Requests name from user."""
def __init__(self, master, wasTaken=False):
self.wasTaken = wasTaken
Dialog.__init__(self, master)
def body(self, master):
if self.wasTaken:
text = "Username taken. Try again."
else:
text = "Enter username:"
Tkinter.Label(master, text=text).pack()
self.entry = Tkinter.Entry(master)
self.entry.pack()
return self.entry
def apply(self):
self.result = self.entry.get()
if __name__ == '__main__':
if len(sys.argv) >= 2 and sys.argv[1] == "--help":
print "usage: {0} [address [port]]".format(sys.argv[0])
exit(0)
log.startLogging(sys.stdout, setStdout=False)
GameClient(*sys.argv[1:]).connect()
|
mmdeas/path-game
|
client.py
|
Python
|
mit
| 11,523
|
[
"exciting"
] |
c871c3a0dfd255e2208391e4e9b96254d57e2af5f584d97e23a8d109f839eedf
|
import json
import platform
import unittest
import xapian
from gi.repository import GLib
from mock import patch
from piston_mini_client import PistonResponseObject
from tests.utils import (
get_test_pkg_info,
setup_test_env,
ObjectWithSignals,
)
setup_test_env()
from softwarecenter.enums import (
AppInfoFields,
AVAILABLE_FOR_PURCHASE_MAGIC_CHANNEL_NAME,
XapianValues,
)
from softwarecenter.db.database import get_reinstall_previous_purchases_query
from softwarecenter.db.update import (
SCAPurchasedApplicationParser,
SCAApplicationParser,
update_from_software_center_agent,
)
# Example taken from running:
# PYTHONPATH=. utils/piston-helpers/piston_generic_helper.py --output=pickle \
# --debug --needs-auth SoftwareCenterAgentAPI subscriptions_for_me
# then:
# f = open('my_subscriptions.pickle')
# subscriptions = pickle.load(f)
# completed_subs = [subs for subs in subscriptions if subs.state=='Complete']
# completed_subs[0].__dict__
SUBSCRIPTIONS_FOR_ME_JSON = """
[
{
"deb_line": "deb https://username:random3atoken@private-ppa.launchpad.net/commercial-ppa-uploaders/photobomb/ubuntu natty main",
"purchase_price": "2.99",
"purchase_date": "2011-09-16 06:37:52",
"state": "Complete",
"failures": [],
"open_id": "https://login.ubuntu.com/+id/ABCDEF",
"application": {
"archive_id": "commercial-ppa-uploaders/photobomb",
"signing_key_id": "1024R/75254D99",
"name": "Photobomb",
"package_name": "photobomb",
"description": "Easy and Social Image Editor\\nPhotobomb give you easy access to images in your social networking feeds, pictures on your computer and peripherals, and pictures on the web, and let\'s you draw, write, crop, combine, and generally have a blast mashing \'em all up. Then you can save off your photobomb, or tweet your creation right back to your social network.",
"version": "1.2.1"
},
"distro_series": {"code_name": "natty", "version": "11.04"}
}
]
"""
# Taken directly from:
# https://software-center.ubuntu.com/api/2.0/applications/en/ubuntu/oneiric/i386/
AVAILABLE_APPS_JSON = """
[
{
"archive_id": "commercial-ppa-uploaders/fluendo-dvd",
"signing_key_id": "1024R/75254D99",
"license": "Proprietary",
"name": "Fluendo DVD Player",
"package_name": "fluendo-dvd",
"support_url": "",
"series": {
"maverick": [
"i386",
"amd64"
],
"natty": [
"i386",
"amd64"
],
"oneiric": [
"i386",
"amd64"
]
},
"price": "24.95",
"demo": null,
"date_published": "2011-12-05 18:43:21.653868",
"status": "Published",
"channel": "For Purchase",
"icon_data": "...",
"department": [
"Sound & Video"
],
"archive_root": "https://private-ppa.launchpad.net/",
"screenshot_url": "http://software-center.ubuntu.com/site_media/screenshots/2011/05/fluendo-dvd-maverick_.png",
"tos_url": "https://software-center.ubuntu.com/licenses/3/",
"icon_url": "http://software-center.ubuntu.com/site_media/icons/2011/05/fluendo-dvd.png",
"categories": "AudioVideo",
"description": "Play DVD-Videos\\r\\n\\r\\nFluendo DVD Player is a software application specially designed to\\r\\nreproduce DVD on Linux/Unix platforms, which provides end users with\\r\\nhigh quality standards.\\r\\n\\r\\nThe following features are provided:\\r\\n* Full DVD Playback\\r\\n* DVD Menu support\\r\\n* Fullscreen support\\r\\n* Dolby Digital pass-through\\r\\n* Dolby Digital 5.1 output and stereo downmixing support\\r\\n* Resume from last position support\\r\\n* Subtitle support\\r\\n* Audio selection support\\r\\n* Multiple Angles support\\r\\n* Support for encrypted discs\\r\\n* Multiregion, works in all regions\\r\\n* Multiple video deinterlacing algorithms",
"website": null,
"version": "1.2.1",
"binary_filesize": 12345
},
{
"website": "",
"package_name": "photobomb",
"video_embedded_html_urls": [ ],
"demo": null,
"keywords": "photos, pictures, editing, gwibber, twitter, facebook, drawing",
"video_urls": [ ],
"screenshot_url": "http://software-center.ubuntu.com/site_media/screenshots/2011/08/Screenshot-45.png",
"id": 83,
"archive_id": "commercial-ppa-uploaders/photobomb",
"support_url": "http://launchpad.net/photobomb",
"icon_url": "http://software-center.ubuntu.com/site_media/icons/2011/08/logo_64.png",
"binary_filesize": null,
"version": "",
"company_name": "",
"department": [
"Graphics"
],
"tos_url": "",
"channel": "For Purchase",
"status": "Published",
"signing_key_id": "1024R/75254D99",
"description": "Easy and Social Image Editor\\nPhotobomb give you easy access to images in your social networking feeds, pictures on your computer and peripherals, and pictures on the web, and let's you draw, write, crop, combine, and generally have a blast mashing 'em all up. Then you can save off your photobomb, or tweet your creation right back to your social network.",
"price": "2.99",
"debtags": [ ],
"date_published": "2011-12-05 18:43:20.794802",
"categories": "Graphics",
"name": "Photobomb",
"license": "GNU GPL v3",
"screenshot_urls": [
"http://software-center.ubuntu.com/site_media/screenshots/2011/08/Screenshot-45.png"
],
"archive_root": "https://private-ppa.launchpad.net/"
}
]
"""
class SCAApplicationParserTestCase(unittest.TestCase):
def _make_application_parser(self, piston_application=None):
if piston_application is None:
piston_application = PistonResponseObject.from_dict(
json.loads(AVAILABLE_APPS_JSON)[0])
return SCAApplicationParser(piston_application)
def test_parses_application_from_available_apps(self):
parser = self._make_application_parser()
inverse_map = dict(
(val, key) for key, val in SCAApplicationParser.MAPPING.items())
# Delete the keys which are not yet provided via the API:
del(inverse_map['video_embedded_html_url'])
for key in inverse_map:
self.assertEqual(
getattr(parser.sca_application, key),
parser.get_value(inverse_map[key]))
def test_name_not_updated_for_non_purchased_apps(self):
parser = self._make_application_parser()
self.assertEqual('Fluendo DVD Player',
parser.get_value(AppInfoFields.NAME))
def test_binary_filesize(self):
parser = self._make_application_parser()
self.assertEqual(12345,
parser.get_value(AppInfoFields.DOWNLOAD_SIZE))
def test_keys_not_provided_by_api(self):
parser = self._make_application_parser()
self.assertIsNone(parser.get_value(AppInfoFields.VIDEO_URL))
self.assertEqual('Application', parser.get_value(AppInfoFields.TYPE))
def test_thumbnail_is_screenshot(self):
parser = self._make_application_parser()
self.assertEqual(
"http://software-center.ubuntu.com/site_media/screenshots/"
"2011/05/fluendo-dvd-maverick_.png",
parser.get_value(AppInfoFields.THUMBNAIL_URL))
def test_extracts_description(self):
parser = self._make_application_parser()
self.assertEqual("Play DVD-Videos",
parser.get_value(AppInfoFields.SUMMARY))
self.assertEqual(
"Fluendo DVD Player is a software application specially designed "
"to\r\nreproduce DVD on Linux/Unix platforms, which provides end "
"users with\r\nhigh quality standards.\r\n\r\nThe following "
"features are provided:\r\n* Full DVD Playback\r\n* DVD Menu "
"support\r\n* Fullscreen support\r\n* Dolby Digital pass-through"
"\r\n* Dolby Digital 5.1 output and stereo downmixing support\r\n"
"* Resume from last position support\r\n* Subtitle support\r\n"
"* Audio selection support\r\n* Multiple Angles support\r\n"
"* Support for encrypted discs\r\n"
"* Multiregion, works in all regions\r\n"
"* Multiple video deinterlacing algorithms",
parser.get_value(AppInfoFields.DESCRIPTION))
def test_desktop_categories_uses_department(self):
parser = self._make_application_parser()
self.assertEqual([u'DEPARTMENT:Sound & Video', "AudioVideo"],
parser.get_categories())
def test_desktop_categories_no_department(self):
piston_app = PistonResponseObject.from_dict(
json.loads(AVAILABLE_APPS_JSON)[0])
del(piston_app.department)
parser = self._make_application_parser(piston_app)
self.assertEqual(["AudioVideo"], parser.get_categories())
def test_magic_channel(self):
parser = self._make_application_parser()
self.assertEqual(
AVAILABLE_FOR_PURCHASE_MAGIC_CHANNEL_NAME,
parser.get_value(AppInfoFields.CHANNEL))
class SCAPurchasedApplicationParserTestCase(unittest.TestCase):
def _make_application_parser(self, piston_subscription=None):
if piston_subscription is None:
piston_subscription = PistonResponseObject.from_dict(
json.loads(SUBSCRIPTIONS_FOR_ME_JSON)[0])
return SCAPurchasedApplicationParser(piston_subscription)
def setUp(self):
get_distro_patcher = patch('softwarecenter.db.update.get_distro')
self.addCleanup(get_distro_patcher.stop)
mock_get_distro = get_distro_patcher.start()
mock_get_distro.return_value.get_codename.return_value = 'quintessential'
def test_get_desktop_subscription(self):
parser = self._make_application_parser()
expected_results = {
AppInfoFields.DEB_LINE: "deb https://username:random3atoken@"
"private-ppa.launchpad.net/commercial-ppa-uploaders"
"/photobomb/ubuntu quintessential main",
AppInfoFields.DEB_LINE_ORIG:
"deb https://username:random3atoken@"
"private-ppa.launchpad.net/commercial-ppa-uploaders"
"/photobomb/ubuntu natty main",
AppInfoFields.PURCHASED_DATE: "2011-09-16 06:37:52",
}
for key in expected_results:
result = parser.get_value(key)
self.assertEqual(expected_results[key], result)
def test_get_desktop_application(self):
# The parser passes application attributes through to
# an application parser for handling.
parser = self._make_application_parser()
# We're testing here also that the name is updated automatically.
expected_results = {
AppInfoFields.NAME: "Photobomb (already purchased)",
AppInfoFields.PACKAGE: "photobomb",
AppInfoFields.SIGNING_KEY_ID: "1024R/75254D99",
AppInfoFields.PPA: "commercial-ppa-uploaders/photobomb",
}
for key in expected_results.keys():
result = parser.get_value(key)
self.assertEqual(expected_results[key], result)
def test_has_option_desktop_includes_app_keys(self):
# The SCAPurchasedApplicationParser handles application keys also
# (passing them through to the composited application parser).
parser = self._make_application_parser()
for key in (AppInfoFields.NAME, AppInfoFields.PACKAGE,
AppInfoFields.SIGNING_KEY_ID, AppInfoFields.PPA):
self.assertIsNotNone(parser.get_value(key))
for key in (AppInfoFields.DEB_LINE, AppInfoFields.PURCHASED_DATE):
self.assertIsNotNone(parser.get_value(key),
'Key: {0} was not an option.'.format(key))
def test_license_key_present(self):
piston_subscription = PistonResponseObject.from_dict(
json.loads(SUBSCRIPTIONS_FOR_ME_JSON)[0])
piston_subscription.license_key = 'abcd'
piston_subscription.license_key_path = '/foo'
parser = self._make_application_parser(piston_subscription)
self.assertEqual('abcd', parser.get_value(AppInfoFields.LICENSE_KEY))
self.assertEqual(
'/foo', parser.get_value(AppInfoFields.LICENSE_KEY_PATH))
def test_license_key_not_present(self):
parser = self._make_application_parser()
for key in (AppInfoFields.LICENSE_KEY, AppInfoFields.LICENSE_KEY_PATH):
self.assertIsNone(parser.get_value(key))
def test_purchase_date(self):
parser = self._make_application_parser()
self.assertEqual(
"2011-09-16 06:37:52",
parser.get_value(AppInfoFields.PURCHASED_DATE))
def test_will_handle_supported_distros_when_available(self):
# When the fix for bug 917109 reaches production, we will be
# able to use the supported series.
parser = self._make_application_parser()
supported_distros = {
"maverick": [
"i386",
"amd64"
],
"natty": [
"i386",
"amd64"
],
}
parser.sca_application.series = supported_distros
self.assertEqual(
supported_distros,
parser.get_value(AppInfoFields.SUPPORTED_DISTROS))
def test_update_debline_other_series(self):
orig_debline = (
"deb https://username:random3atoken@"
"private-ppa.launchpad.net/commercial-ppa-uploaders"
"/photobomb/ubuntu karmic main")
expected_debline = (
"deb https://username:random3atoken@"
"private-ppa.launchpad.net/commercial-ppa-uploaders"
"/photobomb/ubuntu quintessential main")
self.assertEqual(expected_debline,
SCAPurchasedApplicationParser.update_debline(orig_debline))
def test_update_debline_with_pocket(self):
orig_debline = (
"deb https://username:random3atoken@"
"private-ppa.launchpad.net/commercial-ppa-uploaders"
"/photobomb/ubuntu karmic-security main")
expected_debline = (
"deb https://username:random3atoken@"
"private-ppa.launchpad.net/commercial-ppa-uploaders"
"/photobomb/ubuntu quintessential-security main")
self.assertEqual(expected_debline,
SCAPurchasedApplicationParser.update_debline(orig_debline))
class TestAvailableForMeMerging(unittest.TestCase):
def setUp(self):
self.available_for_me = self._make_available_for_me_list()
self.available = self._make_available_list()
def _make_available_for_me_list(self):
my_subscriptions = json.loads(SUBSCRIPTIONS_FOR_ME_JSON)
return list(
PistonResponseObject.from_dict(subs) for subs in my_subscriptions)
def _make_available_list(self):
available_apps = json.loads(AVAILABLE_APPS_JSON)
return list(
PistonResponseObject.from_dict(subs) for subs in available_apps)
def _make_fake_scagent(self, available_data, available_for_me_data):
sca = ObjectWithSignals()
sca.query_available = lambda **kwargs: GLib.timeout_add(
100, lambda: sca.emit('available', sca, available_data))
sca.query_available_for_me = lambda **kwargs: GLib.timeout_add(
100, lambda: sca.emit('available-for-me',
sca, available_for_me_data))
return sca
def test_reinstall_purchased_mock(self):
# test if the mocks are ok
self.assertEqual(len(self.available_for_me), 1)
self.assertEqual(
self.available_for_me[0].application['package_name'], "photobomb")
@patch("softwarecenter.db.update.SoftwareCenterAgent")
@patch("softwarecenter.db.update.UbuntuSSO")
def test_reinstall_purchased_xapian(self, mock_helper, mock_agent):
small_available = [ self.available[0] ]
mock_agent.return_value = self._make_fake_scagent(
small_available, self.available_for_me)
db = xapian.inmemory_open()
cache = get_test_pkg_info()
# now create purchased debs xapian index (in memory because
# we store the repository passwords in here)
old_db_len = db.get_doccount()
update_from_software_center_agent(db, cache)
# ensure we have the new item
self.assertEqual(db.get_doccount(), old_db_len+2)
# query
query = get_reinstall_previous_purchases_query()
enquire = xapian.Enquire(db)
enquire.set_query(query)
matches = enquire.get_mset(0, db.get_doccount())
self.assertEqual(len(matches), 1)
distroseries = platform.dist()[2]
for m in matches:
doc = db.get_document(m.docid)
self.assertEqual(doc.get_value(XapianValues.PKGNAME), "photobomb")
self.assertEqual(
doc.get_value(XapianValues.ARCHIVE_SIGNING_KEY_ID),
"1024R/75254D99")
self.assertEqual(doc.get_value(XapianValues.ARCHIVE_DEB_LINE),
"deb https://username:random3atoken@"
"private-ppa.launchpad.net/commercial-ppa-uploaders"
"/photobomb/ubuntu %s main" % distroseries)
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
mortenpi/ubuntu-software-center
|
tests/test_reinstall_purchased.py
|
Python
|
gpl-3.0
| 17,776
|
[
"BLAST"
] |
3676901c86b799337d53ba070cb62387ea2dca6b995583ae26317147e6d46eef
|
#!/usr/bin/env python
""" The dirac-pilot.py script is a steering script to execute a series of
pilot commands. The commands may be provided in the pilot input sandbox, and are coded in
the pilotCommands.py module or in any <EXTENSION>Commands.py module.
The pilot script defines two switches in order to choose a set of commands for the pilot:
-E, --commandExtensions value
where the value is a comma separated list of extension names. Modules
with names <EXTENSION>Commands.py will be searched for the commands in
the order defined in the value. By default no extensions are given
-X, --commands value
where value is a comma separated list of pilot commands. By default
the list is InstallDIRAC,ConfigureDIRAC,LaunchAgent
The pilot script by default performs initial sanity checks on WN, installs and configures
DIRAC and runs the Job Agent to execute pending workloads in the DIRAC WMS.
But, as said, all the actions are actually configurable.
"""
__RCSID__ = "$Id$"
import os
import getopt
import sys
from types import ListType
from pilotTools import Logger, pythonPathCheck, PilotParams, getCommand
if __name__ == "__main__":
pythonPathCheck()
log = Logger( 'Pilot' )
pilotParams = PilotParams()
if pilotParams.debugFlag:
log.setDebug()
pilotParams.pilotRootPath = os.getcwd()
pilotParams.pilotScript = os.path.realpath( sys.argv[0] )
pilotParams.pilotScriptName = os.path.basename( pilotParams.pilotScript )
log.debug( 'PARAMETER [%s]' % ', '.join( map( str, pilotParams.optList ) ) )
log.info( "Executing commands: %s" % str( pilotParams.commands ) )
if pilotParams.commandExtensions:
log.info( "Requested command extensions: %s" % str( pilotParams.commandExtensions ) )
for commandName in pilotParams.commands:
command, module = getCommand( pilotParams, commandName, log )
if command is not None:
log.info( "Command %s instantiated from %s" % ( commandName, module ) )
command.execute()
else:
log.error( "Command %s could not be instantiated" % commandName )
sys.exit( -1 )
|
Sbalbp/DIRAC
|
WorkloadManagementSystem/PilotAgent/dirac-pilot.py
|
Python
|
gpl-3.0
| 2,136
|
[
"DIRAC"
] |
5999c75a8ce1aa7e0b4197ffddabead73facf160b18b5f406e213f0cb50b8d07
|
# Copyright (c) 2012, 2013, 2014 James Hensman
# Licensed under the GPL v3 (see LICENSE.txt)
import numpy as np
from .collapsed_mixture import CollapsedMixture
import GPy
from GPy.util.linalg import mdot, pdinv, backsub_both_sides, dpotrs, jitchol, dtrtrs
from GPy.util.linalg import tdot_numpy as tdot
class OMGP(CollapsedMixture):
"""
Overlapping mixtures of Gaussian processes
"""
def __init__(self, X, Y, K=2, kernels=None, variance=1., alpha=1., prior_Z='symmetric', name='OMGP'):
N, self.D = Y.shape
self.Y = Y
self.YYT = tdot(self.Y)
self.X = X
if kernels == None:
self.kern = []
for i in range(K):
self.kern.append(GPy.kern.RBF(input_dim=1))
else:
self.kern = kernels
CollapsedMixture.__init__(self, N, K, prior_Z, alpha, name)
self.link_parameter(GPy.core.parameterization.param.Param('variance', variance, GPy.core.parameterization.transformations.Logexp()))
self.link_parameters(*self.kern)
def parameters_changed(self):
""" Set the kernel parameters
"""
self.update_kern_grads()
def do_computations(self):
"""
Here we do all the computations that are required whenever the kernels
or the variational parameters are changed.
"""
if len(self.kern) < self.K:
self.kern.append(self.kern[-1].copy())
self.link_parameter(self.kern[-1])
if len(self.kern) > self.K:
for kern in self.kern[self.K:]:
self.unlink_parameter(kern)
self.kern = self.kern[:self.K]
def update_kern_grads(self):
"""
Set the derivative of the lower bound wrt the (kernel) parameters
"""
grad_Lm_variance = 0.0
for i, kern in enumerate(self.kern):
K = kern.K(self.X)
B_inv = np.diag(1. / (self.phi[:, i] / self.variance))
# Numerically more stable version using cholesky decomposition
#alpha = linalg.cho_solve(linalg.cho_factor(K + B_inv), self.Y)
#K_B_inv = pdinv(K + B_inv)[0]
#dL_dK = .5*(tdot(alpha) - K_B_inv)
# Make more stable using cholesky factorization:
Bi, LB, LBi, Blogdet = pdinv(K+B_inv)
tmp = dpotrs(LB, self.YYT)[0]
GPy.util.diag.subtract(tmp, 1)
dL_dB = dpotrs(LB, tmp.T)[0]
kern.update_gradients_full(dL_dK=.5*dL_dB, X=self.X)
# variance gradient
#for i, kern in enumerate(self.kern):
K = kern.K(self.X)
#I = np.eye(self.N)
B_inv = np.diag(1. / ((self.phi[:, i] + 1e-6) / self.variance))
#alpha = np.linalg.solve(K + B_inv, self.Y)
#K_B_inv = pdinv(K + B_inv)[0]
#dL_dB = tdot(alpha) - K_B_inv
grad_B_inv = np.diag(1. / (self.phi[:, i] + 1e-6))
grad_Lm_variance += 0.5 * np.trace(np.dot(dL_dB, grad_B_inv))
grad_Lm_variance -= .5*self.D * np.einsum('j,j->',self.phi[:, i], 1./self.variance)
self.variance.gradient = grad_Lm_variance
def bound(self):
"""
Compute the lower bound on the marginal likelihood (conditioned on the
GP hyper parameters).
"""
GP_bound = 0.0
for i, kern in enumerate(self.kern):
K = kern.K(self.X)
B_inv = np.diag(1. / ((self.phi[:, i] + 1e-6) / self.variance))
# Make more stable using cholesky factorization:
Bi, LB, LBi, Blogdet = pdinv(K+B_inv)
# Data fit
# alpha = linalg.cho_solve(linalg.cho_factor(K + B_inv), self.Y)
# GP_bound += -0.5 * np.dot(self.Y.T, alpha).trace()
GP_bound -= .5 * dpotrs(LB, self.YYT)[0].trace()
# Penalty
# GP_bound += -0.5 * np.linalg.slogdet(K + B_inv)[1]
GP_bound -= 0.5 * Blogdet
# Constant, weighted by model assignment per point
#GP_bound += -0.5 * (self.phi[:, i] * np.log(2 * np.pi * self.variance)).sum()
GP_bound -= .5*self.D * np.einsum('j,j->',self.phi[:, i], np.log(2 * np.pi * self.variance))
return GP_bound + self.mixing_prop_bound() + self.H
def vb_grad_natgrad(self):
"""
Natural Gradients of the bound with respect to phi, the variational
parameters controlling assignment of the data to GPs
"""
grad_Lm = np.zeros_like(self.phi)
for i, kern in enumerate(self.kern):
K = kern.K(self.X)
I = np.eye(self.N)
B_inv = np.diag(1. / ((self.phi[:, i] + 1e-6) / self.variance))
K_B_inv = pdinv(K + B_inv)[0]
alpha = np.dot(K_B_inv, self.Y)
dL_dB = tdot(alpha) - K_B_inv
for n in range(self.phi.shape[0]):
grad_B_inv_nonzero = -self.variance / (self.phi[n, i] ** 2 + 1e-6)
grad_Lm[n, i] = 0.5 * dL_dB[n, n] * grad_B_inv_nonzero
grad_phi = grad_Lm + self.mixing_prop_bound_grad() + self.Hgrad
natgrad = grad_phi - np.sum(self.phi * grad_phi, 1)[:, None]
grad = natgrad * self.phi
return grad.flatten(), natgrad.flatten()
def predict(self, Xnew, i):
""" Predictive mean for a given component
"""
kern = self.kern[i]
K = kern.K(self.X)
kx = kern.K(self.X, Xnew)
# Predict mean
# This works but should Cholesky for stability
B_inv = np.diag(1. / (self.phi[:, i] / self.variance))
K_B_inv = pdinv(K + B_inv)[0]
mu = kx.T.dot(np.dot(K_B_inv, self.Y))
# Predict variance
kxx = kern.K(Xnew, Xnew)
va = self.variance + kxx - kx.T.dot(np.dot(K_B_inv, kx))
return mu, va
def predict_components(self, Xnew):
"""The predictive density under each component"""
mus = []
vas = []
for i in range(len(self.kern)):
mu, va = self.predict(Xnew, i)
mus.append(mu)
vas.append(va)
return np.array(mus)[:, :, 0].T, np.array(vas)[:, :, 0].T
def plot(self, gp_num=0):
"""
Plot the mixture of Gaussian Processes.
Supports plotting 1d and 2d regression.
"""
from matplotlib import pylab as plt
from matplotlib import cm
XX = np.linspace(self.X.min(), self.X.max())[:, None]
if self.Y.shape[1] == 1:
plt.scatter(self.X, self.Y, c=self.phi[:, gp_num], cmap=cm.RdBu, vmin=0., vmax=1., lw=0.5)
plt.colorbar(label='GP {} assignment probability'.format(gp_num))
GPy.plotting.Tango.reset()
for i in range(self.phi.shape[1]):
YY_mu, YY_var = self.predict(XX, i)
col = GPy.plotting.Tango.nextMedium()
plt.fill_between(XX[:, 0],
YY_mu[:, 0] - 2 * np.sqrt(YY_var[:, 0]),
YY_mu[:, 0] + 2 * np.sqrt(YY_var[:, 0]),
alpha=0.1,
facecolor=col)
plt.plot(XX, YY_mu[:, 0], c=col, lw=2);
elif self.Y.shape[1] == 2:
plt.scatter(self.Y[:, 0], self.Y[:, 1], c=self.phi[:, gp_num], cmap=cm.RdBu, vmin=0., vmax=1., lw=0.5)
plt.colorbar(label='GP {} assignment probability'.format(gp_num))
GPy.plotting.Tango.reset()
for i in range(self.phi.shape[1]):
YY_mu, YY_var = self.predict(XX, i)
col = GPy.plotting.Tango.nextMedium()
plt.plot(YY_mu[:, 0], YY_mu[:, 1], c=col, lw=2);
else:
raise NotImplementedError('Only 1d and 2d regression can be plotted')
def plot_probs(self, gp_num=0):
"""
Plot assignment probabilities for each data point of the OMGP model
"""
from matplotlib import pylab as plt
plt.scatter(self.X, self.phi[:, gp_num])
plt.ylim(-0.1, 1.1)
plt.ylabel('GP {} assignment probability'.format(gp_num))
|
jameshensman/GPclust
|
GPclust/OMGP.py
|
Python
|
gpl-3.0
| 8,106
|
[
"Gaussian"
] |
18f6398c0b805d686f929c50edb7b74c14274ab04fb7abde25a063cf36b34266
|
# GenCumulativeSkyMtx
#
# Ladybug: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Ladybug.
#
# Copyright (c) 2013-2015, Mostapha Sadeghipour Roudsari <Sadeghipour@gmail.com>
# Ladybug is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Ladybug is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ladybug; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
This component uses Radiance's gendaymtx function to calculate the sky's radiation for each hour of the year. This is a necessary pre-step before doing radiation analysis with Rhino geometry or generating a radiation rose.
The first time you use this component, you will need to be connected to the internet so that the component can download the "gendaymtx.exe" function to your system.
Gendaymtx is written by Ian Ashdown and Greg Ward. For more information, check the Radiance manual at:
http://www.radiance-online.org/learning/documentation/manual-pages/pdfs/gendaymtx.pdf
-
Provided by Ladybug 0.0.60
Args:
_epwFile: The output of the Ladybug Open EPW component or the file path location of the epw weather file on your system.
_skyDensity_: Set to 0 to generate a Tregenza sky, which will divide up the sky dome with a coarse density of 145 sky patches. Set to 1 to generate a Reinhart sky, which will divide up the sky dome using a very fine density of 580 sky patches. Note that, while the Reinhart sky is more accurate, it will result in considerably longer calculation times. Accordingly, the default is set to 0 for a Tregenza sky.
workingDir_: An optional working directory in your system where the sky will be generated. Default is set to C:\Ladybug or C:\Users\yourUserName\AppData\Roaming\Ladybug. The latter is used if you cannot write to the C:\ drive of your computer. Any valid file path location can be connected.
useOldRes_: Set this to "True" if you have already run this component previously and you want to use the already-generated data for this weather file.
_runIt: Set to "True" to run the component and generate a sky matrix.
Returns:
readMe!: ...
cumulativeSkyMtx: The result of the gendaymtx function. Use the selectSkyMtx component to select a desired sky matrix from this output for use in a radiation study, radition rose, or sky dome visualization.
"""
ghenv.Component.Name = "Ladybug_GenCumulativeSkyMtx"
ghenv.Component.NickName = 'genCumulativeSkyMtx'
ghenv.Component.Message = 'VER 0.0.60\nJUL_06_2015'
ghenv.Component.Category = "Ladybug"
ghenv.Component.SubCategory = "2 | VisualizeWeatherData"
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "2"
except: pass
import os
import scriptcontext as sc
from clr import AddReference
AddReference('Grasshopper')
import Grasshopper.Kernel as gh
from itertools import izip
import shutil
def date2Hour(month, day, hour):
# fix the end day
numOfDays = [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
# dd = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
JD = numOfDays[int(month)-1] + int(day)
return (JD - 1) * 24 + hour
def hour2Date(hour):
monthList = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']
numOfDays = [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365]
numOfHours = [24 * numOfDay for numOfDay in numOfDays]
for h in range(len(numOfHours)-1):
if hour <= numOfHours[h+1]: month = h + 1; break
if hour == 0: day = 1
elif (hour)%24 == 0: day = int((hour - numOfHours[h]) / 24)
else: day = int((hour - numOfHours[h]) / 24) + 1
time = hour%24 + 0.5
return str(day), str(month), str(time)
def getRadiationValues(epw_file, analysisPeriod, weaFile):
# start hour and end hour
stHour = 0
endHour = 8760
epwfile = open(epw_file,"r")
for lineCount, line in enumerate(epwfile):
hour = lineCount - 8
if int(stHour) <= hour <= int(endHour):
dirRad = (line.split(',')[14])
difRad = (line.split(',')[15])
day, month, time = hour2Date(hour)
weaFile.write(month + " " + day + " " + time + " " + dirRad + " " + difRad + "\n")
epwfile.close()
return weaFile
def weaHeader(epwFileAddress, lb_preparation):
locName, lat, long, timeZone, elev, dataStr = lb_preparation.epwLocation(epwFileAddress)
#print locName, lat, long, timeZone, elev
return "place " + locName + "\n" + \
"latitude " + lat + "\n" + \
"longitude " + `-float(long)` + "\n" + \
"time_zone " + `-float(timeZone) * 15` + "\n" + \
"site_elevation " + elev + "\n" + \
"weather_data_file_units 1\n"
def epw2wea(weatherFile, analysisPeriod, lb_preparation):
outputFile = weatherFile.replace(".epw", ".wea")
header = weaHeader(weatherFile, lb_preparation)
weaFile = open(outputFile, 'w')
weaFile.write(header)
weaFile = getRadiationValues(weatherFile, analysisPeriod, weaFile)
weaFile.close()
return outputFile
def main(epwFile, skyType, workingDir, useOldRes):
# import the classes
if sc.sticky.has_key('ladybug_release'):
try:
if not sc.sticky['ladybug_release'].isCompatible(ghenv.Component): return -1
except:
warning = "You need a newer version of Ladybug to use this compoent." + \
"Use updateLadybug component to update userObjects.\n" + \
"If you have already updated userObjects drag Ladybug_Ladybug component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
lb_preparation = sc.sticky["ladybug_Preparation"]()
# make working directory
if workingDir: workingDir = lb_preparation.removeBlankLight(workingDir)
workingDir = lb_preparation.makeWorkingDir(workingDir)
# make sure the directory has been created
if workingDir == -1: return -2
workingDrive = workingDir[0:1]
# GenCumulativeSky
gendaymtxFile = os.path.join(workingDir, 'gendaymtx.exe')
if not os.path.isfile(gendaymtxFile):
# let's see if we can grab it from radiance folder
if os.path.isfile("c:/radiance/bin/gendaymtx.exe"):
# just copy this file
shutil.copyfile("c:/radiance/bin/gendaymtx.exe", gendaymtxFile)
else:
# download the file
lb_preparation.downloadGendaymtx(workingDir)
#check if the file is there
if not os.path.isfile(gendaymtxFile) or os.path.getsize(gendaymtxFile)< 15000 : return -3
## check for epw file to be connected
if epwFile != None and epwFile[-3:] == 'epw':
if not os.path.isfile(epwFile):
print "Can't find epw file at " + epwFile
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "Can't find epw file at " + epwFile)
return -1
# import data from epw file
locName, lat, lngt, timeZone, elev, locationStr = lb_preparation.epwLocation(epwFile)
newLocName = lb_preparation.removeBlank(locName)
# make new folder for each city
subWorkingDir = lb_preparation.makeWorkingDir(workingDir + "\\" + newLocName)
print 'Current working directory is set to: ', subWorkingDir
# copy .epw file to sub-directory
weatherFileAddress = lb_preparation.copyFile(epwFile, subWorkingDir + "\\" + newLocName + '.epw')
# create weaFile
weaFile = epw2wea(weatherFileAddress, [], lb_preparation)
outputFile = weaFile.replace(".wea", ".mtx")
outputFileDif = weaFile.replace(".wea", "_dif_" + `skyType` + ".mtx")
outputFileDir = weaFile.replace(".wea", "_dir_" + `skyType` + ".mtx")
# check if the study is already ran for this weather file
if useOldRes and os.path.isfile(outputFileDif) and os.path.isfile(outputFileDir):
# ask the user if he wants to re-run the study
print "Sky matrix files for this epw file are already existed on your system.\n" + \
"The component won't recalculate the sky and imports the available result.\n" + \
"In case you don't want to use these files, set useOldRes input to False and re-run the study.\n" + \
"If you found the lines above confusing just ignore it! It's all fine. =)\n"
else:
batchFile = weaFile.replace(".wea", ".bat")
command = "@echo off \necho.\n echo HELLO " + os.getenv("USERNAME").upper()+ "! " + \
"DO NOT CLOSE THIS WINDOW. \necho.\necho IT WILL BE CLOSED AUTOMATICALLY WHEN THE CALCULATION IS OVER!\n" + \
"echo.\necho AND MAY TAKE FEW MINUTES...\n" + \
"echo.\n" + \
"echo CALCULATING DIFFUSE COMPONENT OF THE SKY...\n" + \
workingDir + "\\gendaymtx -m " + str(n) + " -s -O1 " + weaFile + "> " + outputFileDif + "\n" + \
"echo.\necho CALCULATING DIRECT COMPONENT OF THE SKY...\n" + \
workingDir + "\\gendaymtx -m " + str(n) + " -d -O1 " + weaFile + "> " + outputFileDir
file = open(batchFile, 'w')
file.write(command)
file.close()
os.system(batchFile)
return outputFileDif, outputFileDir, newLocName, lat, lngt, timeZone
else:
print "epwWeatherFile address is not a valid .epw file"
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "epwWeatherFile address is not a valid .epw file")
return -1
else:
print "You should first let the Ladybug fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should first let the Ladybug fly...")
return -1
def readMTXFile(daylightMtxDif, daylightMtxDir, n, newLocName, lat, lngt, timeZone):
# All the patches on top high get the same values so maybe
# I should re-create the geometry 577 instead of 580
# and keep in mind that first patch is ground!
# I create the dictionary only for sky patches and don't collect the data
# for the first patch
# this line could have saved me 5 hours
skyPatchesDict = {1 : 145,
2 : 580 - 3}
numOfPatchesInEachRow = {1: [30, 30, 24, 24, 18, 12, 6, 1],
2: [60, 60, 60, 60, 48, 48, 48, 48, 36, 36, 24, 24, 12, 12, 1]}
# first row is horizon and last row is the one
strConv = {1 : [0.0435449227, 0.0416418006, 0.0473984151, 0.0406730411, 0.0428934136, 0.0445221864, 0.0455168385, 0.0344199465],
2: [0.0113221971, 0.0111894547, 0.0109255262, 0.0105335058, 0.0125224872, 0.0117312774, 0.0108025291, 0.00974713106, 0.011436609, 0.00974295956, 0.0119026242, 0.00905126163, 0.0121875626, 0.00612971396, 0.00921483254]}
numOfSkyPatches = skyPatchesDict[n]
# create an empty dictionary
radValuesDict = {}
for skyPatch in range(numOfSkyPatches):
radValuesDict[skyPatch] = {}
resFileDif = open(daylightMtxDif, "r")
resFileDir = open(daylightMtxDir, "r")
def getValue(line, rowNumber):
R, G, B = line.split(' ')
value = (.265074126 * float(R) + .670114631 * float(G) + .064811243 * float(B)) * strConv[n][rowNumber]
return value
lineCount = 0
extraHeadingLines = 0 # no heading
warnOff = False
failedHours = {}
for difLine, dirLine in izip(resFileDif, resFileDir):
# each line is the data for each hour for a single patch
# new version of gendaymtx genrates a header
# this is a check to make sure the component will work for both versions
if lineCount == 0 and difLine.startswith("#?RADIANCE"):
# the file has a header
extraHeadingLines = -8
if lineCount + extraHeadingLines < 0:
# pass heading line
lineCount += 1
continue
# these lines is an empty line to separate patches do let's pass them
hour = (lineCount + 1 + extraHeadingLines)% 8761
#print lineCount, hour
if hour != 0:
patchNumber = int((lineCount + 1 + extraHeadingLines) /8761)
# first patch is ground!
if patchNumber != 0: #and patchNumber < numOfSkyPatches:
for rowCount, patchCountInRow in enumerate(numOfPatchesInEachRow[n]):
if patchNumber - 1 < sum(numOfPatchesInEachRow[n][:rowCount+1]):
rowNumber = rowCount
# print rowNumber
break
try:
difValue = getValue(difLine, rowNumber)
dirValue = getValue(dirLine, rowNumber)
except Exception, e:
value = 0
if not warnOff:
print "genDayMtx returns null Values for few hours. The study will run anyways." + \
"\nMake sure that you are using an standard epw file." + \
"\nThe failed hours are listed below in [Month/Day @Hour] format."
warnOff = True
day, month, time = hour2Date(hour - 1)
if hour-1 not in failedHours.keys():
failedHours[hour-1] = [day, month, time]
print "Failed to read the results > " + month + "/" + day + " @" + time
try: radValuesDict[patchNumber-1][hour] = [difValue, dirValue]
except:print patchNumber-1, hour, value
lineCount += 1
resFileDif.close()
resFileDir.close()
class SkyResultsCollection(object):
def __init__(self, valuesDict, locationName, lat, lngt, timeZone):
self.d = valuesDict
self.location = locationName
self.lat = lat
self.lngt = lngt
self.timeZone = timeZone
return SkyResultsCollection(radValuesDict, newLocName, lat, lngt, timeZone)
if _runIt and _epwFile!=None:
if _skyDensity_ == None: n = 1 #Tregenza Sky
else: n = _skyDensity_%2 + 1 #
result = main(_epwFile, n, workingDir_, useOldRes_)
w = gh.GH_RuntimeMessageLevel.Warning
if result== -3:
warning = 'Download failed!!! You need GenDayMtx.exe to use this component.' + \
'\nPlease check your internet connection, and try again!'
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
elif result == -2:
warning = 'Working directory cannot be created! Please set workingDir to a new path'
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
elif result == -1:
pass
else:
daylightMtxDiffueFile, daylightMtxDirectFile, newLocName, lat, lngt, timeZone = result
cumulativeSkyMtx = readMTXFile(daylightMtxDiffueFile, daylightMtxDirectFile, n, newLocName, lat, lngt, timeZone)
else:
warn = "Set runIt to True and connect a valid epw file address"
print warn
#ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warn)
|
samuto/ladybug
|
src/Ladybug_GenCumulativeSkyMtx.py
|
Python
|
gpl-3.0
| 16,430
|
[
"EPW"
] |
8371a25172952e773e18dfddd80815433c64a1be7d35112640549897d6873845
|
# macs2 python wrapper
# based on http://toolshed.g2.bx.psu.edu/view/modencode-dcc/macs2
import sys, subprocess, tempfile, shutil, glob, os, os.path, gzip
from galaxy import eggs
import json
CHUNK_SIZE = 1024
#==========================================================================================
#functions
#==========================================================================================
def gunzip_cat_glob_path( glob_path, target_filename, delete = False ):
out = open( target_filename, 'wb' )
for filename in glob.glob( glob_path ):
fh = gzip.open( filename, 'rb' )
while True:
data = fh.read( CHUNK_SIZE )
if data:
out.write( data )
else:
break
fh.close()
if delete:
os.unlink( filename )
out.close()
def xls_to_interval( xls_file, interval_file, header = None ):
out = open( interval_file, 'wb' )
if header:
out.write( '#%s\n' % header )
wrote_header = False
#From macs readme: Coordinates in XLS is 1-based which is different with BED format.
for line in open( xls_file ):
#keep all existing comment lines
if line.startswith( '#' ):
out.write( line )
#added for macs2 since there is an extra newline
elif line.startswith( '\n' ):
out.write( line )
elif not wrote_header:
out.write( '#%s' % line )
print line
wrote_header = True
else:
fields = line.split( '\t' )
if len( fields ) > 1:
fields[1] = str( int( fields[1] ) - 1 )
out.write( '\t'.join( fields ) )
out.close()
#==========================================================================================
#main
#==========================================================================================
def main():
#take in options file and output file names
options = json.load( open( sys.argv[1] ) )
outputs = json.load( open( sys.argv[2] ) )
#=================================================================================
#parse options and execute macs2
#=================================================================================
#default inputs that are in every major command
experiment_name = '_'.join( options['experiment_name'].split() ) #save experiment name here, it will be used by macs for some file names
cmdline = "macs2 %s -t %s" % ( options['command'], ",".join( options['input_chipseq'] ) )
if options['input_control']:
cmdline = "%s -c %s" % ( cmdline, ",".join( options['input_control'] ) )
#=================================================================================
if (options['command'] == "callpeak"):
output_bed = outputs['output_bed_file']
output_extra_html = outputs['output_extra_file']
output_extra_path = outputs['output_extra_file_path']
output_peaks = outputs['output_peaks_file']
output_narrowpeaks = outputs['output_narrowpeaks_file']
output_xls_to_interval_peaks_file = outputs['output_xls_to_interval_peaks_file']
output_xls_to_interval_negative_peaks_file = outputs['output_xls_to_interval_negative_peaks_file']
if 'pvalue' in options:
cmdline = "%s --format='%s' --name='%s' --gsize='%s' --bw='%s' --pvalue='%s' --mfold %s %s %s %s" % ( cmdline, options['format'], experiment_name, options['gsize'], options['bw'], options['pvalue'], options['mfoldlo'], options['mfoldhi'], options['nolambda'], options['bdg'] )
elif 'qvalue' in options:
cmdline = "%s --format='%s' --name='%s' --gsize='%s' --bw='%s' --qvalue='%s' --mfold %s %s %s %s" % ( cmdline, options['format'], experiment_name, options['gsize'], options['bw'], options['qvalue'], options['mfoldlo'], options['mfoldhi'], options['nolambda'], options['bdg'] )
if 'nomodel' in options:
cmdline = "%s --nomodel --shiftsize='%s'" % ( cmdline, options['nomodel'] )
#=================================================================================
if (options['command'] == "bdgcmp"):
output_bdgcmp = outputs['output_bdgcmp_file']
cmdline = "%s -m %s -p %s -o bdgcmp_out.bdg" % ( cmdline, options['m'], options['pseudocount'] )
#=================================================================================
tmp_dir = tempfile.mkdtemp() #macs makes very messy output, need to contain it into a temp dir, then provide to user
stderr_name = tempfile.NamedTemporaryFile().name # redirect stderr here, macs provides lots of info via stderr, make it into a report
proc = subprocess.Popen( args=cmdline, shell=True, cwd=tmp_dir, stderr=open( stderr_name, 'wb' ) )
proc.wait()
#We don't want to set tool run to error state if only warnings or info, e.g. mfold could be decreased to improve model, but let user view macs log
#Do not terminate if error code, allow dataset (e.g. log) creation and cleanup
if proc.returncode:
stderr_f = open( stderr_name )
while True:
chunk = stderr_f.read( CHUNK_SIZE )
if not chunk:
stderr_f.close()
break
sys.stderr.write( chunk )
#=================================================================================
#copy files created by macs2 to appripriate directory with the provided names
#=================================================================================
#=================================================================================
#move files generated by callpeak command
if (options['command'] == "callpeak"):
#run R to create pdf from model script
if os.path.exists( os.path.join( tmp_dir, "%s_model.r" % experiment_name ) ):
cmdline = 'R --vanilla --slave < "%s_model.r" > "%s_model.r.log"' % ( experiment_name, experiment_name )
proc = subprocess.Popen( args=cmdline, shell=True, cwd=tmp_dir )
proc.wait()
#move bed out to proper output file
created_bed_name = os.path.join( tmp_dir, "%s_peaks.bed" % experiment_name )
if os.path.exists( created_bed_name ):
shutil.move( created_bed_name, output_bed )
#OICR peak_xls file
created_peak_xls_file = os.path.join( tmp_dir, "%s_peaks.xls" % experiment_name )
if os.path.exists( created_peak_xls_file ):
# shutil.copy( created_peak_xls_file, os.path.join ( "/mnt/galaxyData/tmp/", "%s_peaks.xls" % ( os.path.basename(output_extra_path) )))
shutil.copyfile( created_peak_xls_file, output_peaks )
#peaks.encodepeaks (narrowpeaks) file
created_narrowpeak_file = os.path.join (tmp_dir, "%s_peaks.encodePeak" % experiment_name )
if os.path.exists( created_narrowpeak_file ):
shutil.move (created_narrowpeak_file, output_narrowpeaks )
#parse xls files to interval files as needed
#if 'xls_to_interval' in options:
if (options['xls_to_interval'] == "True"):
create_peak_xls_file = os.path.join( tmp_dir, '%s_peaks.xls' % experiment_name )
if os.path.exists( create_peak_xls_file ):
xls_to_interval( create_peak_xls_file, output_xls_to_interval_peaks_file, header = 'peaks file' )
create_peak_xls_file = os.path.join( tmp_dir, '%s_negative_peaks.xls' % experiment_name )
if os.path.exists( create_peak_xls_file ):
print "negative file exists"
xls_to_interval( create_peak_xls_file, output_xls_to_interval_negative_peaks_file, header = 'negative peaks file' )
#move all remaining files to extra files path of html file output to allow user download
out_html = open( output_extra_html, 'wb' )
out_html.write( '<html><head><title>Additional output created by MACS (%s)</title></head><body><h3>Additional Files:</h3><p><ul>\n' % experiment_name )
os.mkdir( output_extra_path )
for filename in sorted( os.listdir( tmp_dir ) ):
shutil.move( os.path.join( tmp_dir, filename ), os.path.join( output_extra_path, filename ) )
out_html.write( '<li><a href="%s">%s</a></li>\n' % ( filename, filename ) )
#out_html.write( '<li><a href="%s">%s</a>peakxls %s SomethingDifferent tmp_dir %s path %s exp_name %s</li>\n' % ( created_peak_xls_file, filename, filename, tmp_dir, output_extra_path, experiment_name ) )
out_html.write( '</ul></p>\n' )
out_html.write( '<h3>Messages from MACS:</h3>\n<p><pre>%s</pre></p>\n' % open( stderr_name, 'rb' ).read() )
out_html.write( '</body></html>\n' )
out_html.close()
#=================================================================================
#move files generated by bdgcmp command
if (options['command'] == "bdgcmp"):
created_bdgcmp_file = os.path.join (tmp_dir, "bdgcmp_out.bdg" )
if os.path.exists( created_bdgcmp_file ):
shutil.move (created_bdgcmp_file, output_bdgcmp )
#=================================================================================
#cleanup
#=================================================================================
os.unlink( stderr_name )
os.rmdir( tmp_dir )
if __name__ == "__main__": main()
|
stemcellcommons/macs2
|
macs2_wrapper.py
|
Python
|
mit
| 9,144
|
[
"Galaxy"
] |
5b6289db9f182c49c5c3d662679723b04c75134f8b36525cd9131934e7aa2e39
|
#ryangc ATSYMBOL mail.med.upenn.edu ryan.g.coleman ATSYMBOL gmail.com
#Ryan G Coleman, Kim Sharp http://crystal.med.upenn.edu
#contains lots of grid primitives, there is not a grid class
import math
import geometry
def getIndices(mins, gridSize, pt):
'''helper function to find the box a point is in'''
xIndex = int(math.floor((pt[0] - mins[0]) / gridSize))
yIndex = int(math.floor((pt[1] - mins[1]) / gridSize))
zIndex = int(math.floor((pt[2] - mins[2]) / gridSize))
return xIndex, yIndex, zIndex
def assignAtomDepths(gridD, gridSize, mins, maxs, pdbD, minVal=0.):
atomDepths = []
for coord in pdbD.coords:
gridIndex = getIndices(mins, gridSize, coord)
#print gridIndex, coord, len(gridD), len(gridD[0]), len(gridD[0][0])
atomDepths.append(max(
minVal, gridD[gridIndex[0]][gridIndex[1]][gridIndex[2]][0]))
return atomDepths
def fillBoxesSets(grid):
outsideBoxes, insideBoxes = set(), set()
#useful variables to set
lenX = len(grid)
lenY = len(grid[0])
lenZ = len(grid[0][0])
for x in xrange(0, lenX):
for y in xrange(0, lenY):
for z in xrange(0, lenZ):
thisBox = grid[x][y][z]
if thisBox[0] == -1:
outsideBoxes.update([(x, y, z)])
elif thisBox[0] == 0:
insideBoxes.update([(x, y, z)])
#re-encode box to be large so can tell when 'real' value is present
newBox = lenX + lenY + lenZ, thisBox[1], thisBox[2], thisBox[3]
grid[x][y][z] = newBox
elif thisBox[0] == -2:
insideBoxes.update([(x, y, z)])
return outsideBoxes, insideBoxes
#grid modified in place as well
def getMaximaOnly(grid, maxGreater=0):
'''returns a copy of the grid, with everything but the maxima removed
should be run after finalizing grid distances so all non-negative'''
lens = [len(grid), len(grid[0]), len(grid[0][0])]
newGrid = []
for indexX, rowX in enumerate(grid):
newX = []
for indexY, rowY in enumerate(rowX):
newY = []
for indexZ, entryZ in enumerate(rowY):
thisBox = grid[indexX][indexY][indexZ][0]
maxima = maxGreater
requiredBoxes = 26
for adjBox in getAllAdjacentBoxes(
(indexX, indexY, indexZ), lens[0], lens[1], lens[2]):
requiredBoxes -= 1
if grid[adjBox[0]][adjBox[1]][adjBox[2]][0] >= thisBox:
maxima -= 1
if maxima >= 0 and requiredBoxes == 0:
newY.append(entryZ)
else:
newY.append((0, entryZ[1], entryZ[2], entryZ[3]))
newX.append(newY)
newGrid.append(newX)
return newGrid
def getMaximaRanking(grid):
'''returns a copy of the grid, with the value being the number of neighbors
not greater than this one
should be run after finalizing grid distances so all non-negative'''
lens = [len(grid), len(grid[0]), len(grid[0][0])]
newGrid = []
for indexX, rowX in enumerate(grid):
newX = []
for indexY, rowY in enumerate(rowX):
newY = []
for indexZ, entryZ in enumerate(rowY):
thisBox = grid[indexX][indexY][indexZ][0]
maxima = 0
requiredBoxes = 26
for adjBox in getAllAdjacentBoxes(
(indexX, indexY, indexZ), lens[0], lens[1], lens[2]):
requiredBoxes -= 1
if grid[adjBox[0]][adjBox[1]][adjBox[2]][0] <= thisBox: # ties ties
maxima += 1
if maxima >= 0 and requiredBoxes == 0 and thisBox > 0:
newY.append((maxima, entryZ[1], entryZ[2], entryZ[3]))
else:
newY.append((0, entryZ[1], entryZ[2], entryZ[3]))
newX.append(newY)
newGrid.append(newX)
return newGrid
def calculateOffsets(grid1, grid2, gridSize):
'''figures out the difference in offsets between two grids
(with the same spacing)'''
differences = []
for index, value in enumerate(grid1[0][0][0][1:]):
differences.append(value-grid2[0][0][0][index+1])
offsets = [int((x/gridSize)) for x in differences]
#print grid1[0][0][0][1:], grid2[0][0][0][1:], differences, offsets
return offsets
#helper function that calculates L1 distance from end-point to end-point
def calcEdgeGridDist(pt1, pt2, mins, maxs, gridSize, metric='L1'):
return geometry.dist(
getIndices(mins, gridSize, pt1),
getIndices(mins, gridSize, pt2), metric=metric)
def getAllAdjacentBoxes(curBox, lenX, lenY, lenZ):
returnVec = []
returnVec.extend(getAdjacentBoxes(curBox, lenX, lenY, lenZ))
returnVec.extend(getSideAdjacentBoxes(curBox, lenX, lenY, lenZ))
returnVec.extend(getCornerAdjacentBoxes(curBox, lenX, lenY, lenZ))
return returnVec
def getAllAdjacentBoxesOnce(curBox, lenX, lenY, lenZ, extraEdges=False):
#when called on all curBoxes, only returns each pair once
returnVec = getAllAdjacentBoxes(curBox, lenX, lenY, lenZ)
#add distance info
newReturnVec = []
for box in returnVec:
newReturnVec.append((box, geometry.distL2(curBox, box)))
if extraEdges and curBox in extraEdges:
for adjBox, adjDist, adjGridDist in extraEdges[curBox]: # tuple unpack
newReturnVec.append((adjBox, adjDist))
newVec = [box for box in newReturnVec if curBox[0:2] <= box[0][0:2]]
return newVec
#helper function, gets up to 6 adjacent boxes
def getAdjacentBoxes(curBox, lenX, lenY, lenZ):
adjVec = []
for x in [curBox[0]-1, curBox[0]+1]:
if x >= 0 and x < lenX:
adjVec.append((x, curBox[1], curBox[2]))
for y in [curBox[1]-1, curBox[1]+1]:
if y >= 0 and y < lenY:
adjVec.append((curBox[0], y, curBox[2]))
for z in [curBox[2]-1, curBox[2]+1]:
if z >= 0 and z < lenZ:
adjVec.append((curBox[0], curBox[1], z))
return adjVec
#helper function, gets side adjacent boxes (not directly connected to center)
def getSideAdjacentBoxes(curBox, lenX, lenY, lenZ):
adjVec = []
for x in [curBox[0]-1, curBox[0]+1]:
if x >= 0 and x < lenX:
for y in [curBox[1]-1, curBox[1]+1]:
if y >= 0 and y < lenY:
adjVec.append((x, y, curBox[2]))
for z in [curBox[2]-1, curBox[2]+1]:
if z >= 0 and z < lenZ:
adjVec.append((x, curBox[1], z))
for y in [curBox[1]-1, curBox[1]+1]:
if y >= 0 and y < lenY:
for z in [curBox[2]-1, curBox[2]+1]:
if z >= 0 and z < lenZ:
adjVec.append((curBox[0], y, z))
return adjVec
#helper function, gets 'corner' boxes
def getCornerAdjacentBoxes(curBox, lenX, lenY, lenZ):
adjVec = []
for x in [curBox[0]-1, curBox[0]+1]:
if x >= 0 and x < lenX:
for y in [curBox[1]-1, curBox[1]+1]:
if y >= 0 and y < lenY:
for z in [curBox[2]-1, curBox[2]+1]:
if z >= 0 and z < lenZ:
adjVec.append((x, y, z))
return adjVec
def makeGridFromPhi(phiData, threshold=6.0, inside=-2.0, outside=-1.0):
newGrid = []
mins, maxs = phiData.getMinsMaxs()
gap = 1./phiData.scale
for x in xrange(phiData.gridDimension):
newX = []
for y in xrange(phiData.gridDimension):
newY = []
for z in xrange(phiData.gridDimension):
value = phiData.phiArray[
z * (phiData.gridDimension**2) + y * phiData.gridDimension + x]
where = 0.0
if False == threshold:
where = value
else:
if value < threshold:
where = outside
else:
where = inside
newTuple = where, mins[0] + (x * gap), mins[1] + (y * gap), \
mins[2] + (z * gap)
newY.append(newTuple) # start inside ch, easier to check outsideness
newX.append(newY)
newGrid.append(newX)
return newGrid
#helper routine, makes tuples of encoding, centerX, centerY, centerZ
def makeNewEmptyGrid(mins, maxs, gap, value=0):
newGrid = []
for x in xrange(int(math.ceil((maxs[0] - mins[0]) / gap))):
newX = []
for y in xrange(int(math.ceil((maxs[1] - mins[1]) / gap))):
newY = []
for z in xrange(int(math.ceil((maxs[2] - mins[2]) / gap))):
newTuple = value, mins[0] + 0.5 * gap + (x * gap), \
mins[1] + 0.5 * gap + (y * gap), mins[2] + 0.5 * gap + (z * gap)
newY.append(newTuple) # start inside ch, easier to check outsideness
newX.append(newY)
newGrid.append(newX)
return newGrid
def findPointMinsMaxs(phiData, pointXYZ, pointList):
minsPts = pointXYZ[0][1:]
maxsPts = pointXYZ[0][1:]
for point in pointList:
xyz = pointXYZ[point-1][1:]
for coord in range(3):
minsPts[coord] = min(minsPts[coord], xyz[coord])
maxsPts[coord] = max(maxsPts[coord], xyz[coord])
mins, maxs = phiData.getMinsMaxs()
gap = 1./phiData.scale
newMins = list(getIndices(mins, gap, minsPts))
newMaxs = list(getIndices(mins, gap, maxsPts)) # they initialize to the pts
return newMins, newMaxs
def makeTrimmedGridFromPhi(
phiData, pointXYZ, pointList,
threshold=6.0, inside=-2.0, outside=-1.0, border=2):
'''makes a trimmed grid from the phi data, hopefully faster than
makeGridFromPhi then trimGrid, does not support threshold=False'''
mins, maxs = phiData.getMinsMaxs()
gap = 1. / phiData.scale
newMins, newMaxs = findPointMinsMaxs(phiData, pointXYZ, pointList)
for x in xrange(phiData.gridDimension):
for y in xrange(phiData.gridDimension):
for z in xrange(phiData.gridDimension):
if x < newMins[0] or x > newMaxs[0] or \
y < newMins[1] or y > newMaxs[1] or \
z < newMins[2] or z > newMaxs[2]:
value = phiData.phiArray[
z * (phiData.gridDimension**2) + y * phiData.gridDimension + x]
if value >= threshold: # inside the surface
newMins[0] = min(x, newMins[0])
newMins[1] = min(y, newMins[1])
newMins[2] = min(z, newMins[2])
newMaxs[0] = max(x, newMaxs[0])
newMaxs[1] = max(y, newMaxs[1])
newMaxs[2] = max(z, newMaxs[2])
#add border, careful about current border
newMins = [max(0, xCount - border) for xCount in newMins]
newMaxs = [min(phiData.gridDimension, xCount + border) for xCount in newMaxs]
newGrid = []
for x in xrange(phiData.gridDimension):
newX = []
for y in xrange(phiData.gridDimension):
newY = []
for z in xrange(phiData.gridDimension):
indices = [x, y, z]
good = True
for coord in xrange(3):
if indices[coord] < newMins[coord] or \
indices[coord] >= newMaxs[coord]:
good = False
if good:
value = phiData.phiArray[
z * (phiData.gridDimension**2) + y * phiData.gridDimension + x]
where = 0.0
if value < threshold:
where = outside
else:
where = inside
newTuple = where, mins[0] + (x * gap), mins[1] + (y * gap), \
mins[2] + (z * gap)
newY.append(newTuple) # start inside ch, easier to check outsideness
if len(newY) > 0:
newX.append(newY)
if len(newX) > 0:
newGrid.append(newX)
lens = [len(newGrid), len(newGrid[0]), len(newGrid[0][0])]
newMinVals = [xCount - gap/2. for xCount in newGrid[0][0][0][1:]]
newMaxVals = [xCount + gap/2. for xCount in newGrid[-1][-1][-1][1:]]
return newGrid, newMinVals, newMaxVals
def trimGrid(grid, gridSize, pointXYZ, pointList, inside=-2.0, border=1):
'''trims grid so boundary on each coordinate is only 1 grid cube
returns grid, mins, maxs of new grid'''
minsPts = pointXYZ[0][1:]
maxsPts = pointXYZ[0][1:]
for point in pointList:
xyz = pointXYZ[point-1][1:]
for coord in range(3):
minsPts[coord] = min(minsPts[coord], xyz[coord])
maxsPts[coord] = max(maxsPts[coord], xyz[coord])
lens = [len(grid), len(grid[0]), len(grid[0][0])]
newMins, newMaxs = [10000000.0, 10000000.0, 100000000.0], \
[-1000000., -1000000., -1000000]
for indexX, rowX in enumerate(grid):
for indexY, rowY in enumerate(rowX):
for indexZ, entryZ in enumerate(rowY):
indices = [indexX, indexY, indexZ]
if inside == entryZ[0]:
for coord in xrange(3):
#this makes sure the interior points are inside new grid
if indices[coord] < newMins[coord]:
newMins[coord] = indices[coord]
if indices[coord] > newMaxs[coord]:
newMaxs[coord] = indices[coord]
for coord in xrange(3):
#this makes sure the points are inside new grid
if entryZ[coord+1] > minsPts[coord] - gridSize/2.:
if indices[coord] < newMins[coord]:
newMins[coord] = indices[coord]
if entryZ[coord+1] < maxsPts[coord] + gridSize/2.:
if indices[coord] > newMaxs[coord]:
newMaxs[coord] = indices[coord]
#add border, careful about current border
newMins = [max(0, xCount - border) for xCount in newMins]
newMaxs = [min(lens[coord], newMaxs[coord] + border) for coord in xrange(3)]
newGrid = []
for indexX, rowX in enumerate(grid):
newX = []
for indexY, rowY in enumerate(rowX):
newY = []
for indexZ, entryZ in enumerate(rowY):
indices = [indexX, indexY, indexZ]
good = True
for coord in xrange(3):
if indices[coord] < newMins[coord] or \
indices[coord] >= newMaxs[coord]:
good = False
if good:
newY.append(entryZ)
if len(newY) > 0:
newX.append(newY)
if len(newX) > 0:
newGrid.append(newX)
newMinVals = [
xCount - gridSize/2. for xCount in
grid[newMins[0]][newMins[1]][newMins[2]][1:]]
newMaxVals = [
xCount + gridSize/2. for xCount in
grid[newMaxs[0] - 1][newMaxs[1] - 1][newMaxs[2] - 1][1:]]
return newGrid, newMinVals, newMaxVals
def copyGrid(grid):
#returns a copy of the grid
newGrid = []
for indexX, rowX in enumerate(grid):
newX = []
for indexY, rowY in enumerate(rowX):
newY = []
for indexZ, entryZ in enumerate(rowY):
newY.append(entryZ)
newX.append(newY)
newGrid.append(newX)
return newGrid
def resetGrid(grid, value=0.):
'''returns a copy of the grid where all values set to some number'''
for indexX, rowX in enumerate(grid):
for indexY, rowY in enumerate(rowX):
for indexZ, entryZ in enumerate(rowY):
newEntry = value, entryZ[1], entryZ[2], entryZ[3]
grid[indexX][indexY][indexZ] = newEntry
#change -2-travel dist to travel dist
def finalizeGridTravelDist(grid, gridSize):
maxTD = 0.0
for indexX, rowX in enumerate(grid):
for indexY, rowY in enumerate(rowX):
for indexZ, entryZ in enumerate(rowY):
if entryZ[0] == -1:
newEntry = 0, entryZ[1], entryZ[2], entryZ[3]
grid[indexX][indexY][indexZ] = newEntry
elif entryZ[0] < -2:
newEntry = ((-2-entryZ[0])*gridSize), entryZ[1], entryZ[2], entryZ[3]
grid[indexX][indexY][indexZ] = newEntry
maxTD = max(maxTD, newEntry[0])
elif entryZ[0] > 0:
newEntry = entryZ[0]*gridSize, entryZ[1], entryZ[2], entryZ[3]
grid[indexX][indexY][indexZ] = newEntry
maxTD = max(maxTD, newEntry[0])
#grid modified in place, return maximum travel distance, useful sometimes
return maxTD
def findPointsInCube(index, mins, maxs, gridSize, allPoints, points):
'''returns a vector of all the indices of points in cube'''
returnVec = []
for pointIndex in allPoints:
xyz = points[pointIndex-1][1:]
cube = getIndices(mins, gridSize, xyz)
if cube[0] == index[0] and cube[1] == index[1] and cube[2] == index[2]:
returnVec.append(pointIndex)
return returnVec
def findLongSurfEdges(pointList, pointNeighborList, gridSize, mins, maxs):
'''returns the extra edges, i.e. a dictionary of all surface edges between
grid cubes with their euclidean distance between grid cube centers'''
extraEdges = {} # empty dictionary
surfaceEdgeBoxes = {}
for pointNeighbors in pointNeighborList:
pointStart = pointList[pointNeighbors[0] - 1]
startIndex = getIndices(mins, gridSize, pointStart[1:])
surfaceEdgeBoxes[startIndex] = True
#set them all to true, has_key is check
endList = []
for neighbors in pointNeighbors[2:]: # pN[1] is # of neighbors
pointEnd = pointList[neighbors-1]
endIndex = getIndices(mins, gridSize, pointEnd[1:])
gridLength = calcEdgeGridDist(
pointStart[1:], pointEnd[1:], mins, maxs, gridSize, metric='LINF')
realLength = calcEdgeGridDist(
pointStart[1:], pointEnd[1:], mins, maxs, gridSize, metric='L2')
if gridLength > 0: # no reason to add the ones within a grid cube
if (endIndex, realLength, gridLength) not in endList: # no duplicates
endList.append((endIndex, realLength, gridLength))
#also want to add boxes connecting to surfaceEdgeBoxes dict??
if len(endList) > 0:
if startIndex not in extraEdges:
extraEdges[startIndex] = endList
else: # append
newList = extraEdges[startIndex] + endList
extraEdges[startIndex] = newList
return extraEdges, surfaceEdgeBoxes
def assignPointsValues(pointList, gridD, gridSize, mins, maxs, allPoints=False):
'''helper function, finds which grid cube each surface point is in, determines
depth value to assign
record each points value based on which grid box it is in
encoded value is -(val)-3 if < -2, 0 and -1 both map to 0, pos map to 1+num'''
pointTravelDist = []
for point in pointList:
pointXYZ = point[1:]
#compute x, y, z indices into grid (use mins, maxs, gridSize)
xIndex, yIndex, zIndex = getIndices(mins, gridSize, pointXYZ)
if allPoints and point[0] not in allPoints:
#print point[0], " not added, set to -1" #debugging fix loop
pointTravelDist.append([point[0], -1])
elif gridD[xIndex][yIndex][zIndex][0] > 0:
pointTravelDist.append(
[point[0], (gridD[xIndex][yIndex][zIndex][0])*gridSize])
elif gridD[xIndex][yIndex][zIndex][0] >= -1:
pointTravelDist.append([point[0], 0])
elif gridD[xIndex][yIndex][zIndex][0] == -2: # problem
#print point[0], " not added, set to -2" # debugging fix loop
pointTravelDist.append([point[0], -2])
else:
pointTravelDist.append(
[point[0], (-2-gridD[xIndex][yIndex][zIndex][0])*gridSize])
return pointTravelDist
|
ryancoleman/traveldistance
|
src/grid.py
|
Python
|
gpl-2.0
| 18,159
|
[
"CRYSTAL"
] |
eba491d005b770f6652781c7ccc243fadf84a227a344d9e37fc08636d5c91bec
|
# Compute the audio novelty features of a song
import sys
import numpy as np
import scipy
from ..utils import RMS_energy
def novelty(song, k=64, wlen_ms=100, start=0, duration=None, nchangepoints=5, feature="rms"):
"""Return points of high "novelty" in a song
(e.g., significant musical transitions)
:param song: Song to analyze
:type song: :py:class:`radiotool.composer.Song`
:param k: Width of comparison kernel (larger kernel finds coarser differences in music)
:type k: int
:param wlen_ms: Analysis window length in milliseconds
:type wlen_ms: int
:param start: Where to start analysis within the song (in seconds)
:type start: float
:param duration: How long of a chunk of the song to analyze (None analyzes the entire song after start)
:type duration: float
:param nchangepoints: How many novel change points to return
:type nchangepoints: int
:param feature: Music feature to use for novelty analysis
:type feature: "rms" or "mfcc" (will support "chroma" eventually)
:returns: List of change points (in seconds)
:rtype: list of floats
"""
if feature != "rms" and feature != "mfcc":
raise ValueError, "novelty currently only supports 'rms' and 'mfcc' features"
if feature == "rms":
frames = song.all_as_mono()
wlen_samples = int(wlen_ms * song.samplerate / 1000)
if duration is None:
frames = frames[start * song.samplerate:]
else:
frames = frames[start * song.samplerate:(start + duration) *
song.samplerate]
# Compute energies
hamming = np.hamming(wlen_samples)
nwindows = int(2 * song.duration / wlen_samples - 1)
energies = np.empty(nwindows)
for i in range(nwindows):
energies[i] = RMS_energy(
hamming *
frames[i * wlen_samples / 2:
i * wlen_samples / 2 + wlen_samples]
)
energies_list = [[x] for x in energies]
elif feature == "mfcc":
analysis = song.analysis
energies_list = np.array(analysis["timbres"])
# Compute similarities
S_matrix = 1 - scipy.spatial.distance.squareform(
scipy.spatial.distance.pdist(energies_list, 'euclidean'))
# smooth the C matrix with a gaussian taper
C_matrix = np.kron(np.eye(2), np.ones((k,k))) -\
np.kron([[0, 1], [1, 0]], np.ones((k,k)))
g = scipy.signal.gaussian(2*k, k)
C_matrix = np.multiply(C_matrix, np.multiply.outer(g.T, g))
# Created checkerboard kernel
N_vec = np.zeros(np.shape(S_matrix)[0])
for i in xrange(k, len(N_vec) - k):
S_part = S_matrix[i - k:i + k, i - k:i + k]
N_vec[i] = np.sum(np.multiply(S_part, C_matrix))
# Computed checkerboard response
peaks = naive_peaks(N_vec, k=k / 2 + 1)
out_peaks = []
if feature == "rms":
# ensure that the points we return are more exciting
# after the change point than before the change point
for p in peaks:
frame = p[0]
if frame > k:
left_frames = frames[int((frame - k) * wlen_samples / 2):
int(frame * wlen_samples / 2)]
right_frames = frames[int(frame * wlen_samples / 2):
int((frame + k) * wlen_samples / 2)]
if RMS_energy(left_frames) <\
RMS_energy(right_frames):
out_peaks.append(p)
out_peaks = [(x[0] * wlen_ms / 2000.0, x[1]) for x in out_peaks]
for i, p in enumerate(out_peaks):
if i == nchangepoints:
break
return [x[0] for x in out_peaks[:nchangepoints]]
elif feature == "mfcc":
beats = analysis["beats"]
return [beats[int(b[0])] for b in peaks[:nchangepoints]]
def smooth_hanning(x, size=11):
"""smooth a 1D array using a hanning window with requested size."""
if x.ndim != 1:
raise ValueError, "smooth_hanning only accepts 1-D arrays."
if x.size < size:
raise ValueError, "Input vector needs to be bigger than window size."
if size < 3:
return x
s = np.r_[x[size - 1:0:-1], x, x[-1:-size:-1]]
w = np.hanning(size)
y = np.convolve(w / w.sum(), s, mode='valid')
return y
def naive_peaks(vec, k=33):
"""A naive method for finding peaks of a signal.
1. Smooth vector
2. Find peaks (local maxima)
3. Find local max from original signal, pre-smoothing
4. Return (sorted, descending) peaks
"""
a = smooth_hanning(vec, k)
k2 = (k - 1) / 2
peaks = np.r_[True, a[1:] > a[:-1]] & np.r_[a[:-1] > a[1:], True]
p = np.array(np.where(peaks)[0])
maxidx = np.zeros(np.shape(p))
maxvals = np.zeros(np.shape(p))
for i, pk in enumerate(p):
maxidx[i] = np.argmax(vec[pk - k2:pk + k2]) + pk - k2
maxvals[i] = np.max(vec[pk - k2:pk + k2])
out = np.array([maxidx, maxvals]).T
return out[(-out[:, 1]).argsort()]
if __name__ == '__main__':
novelty(sys.argv[1], k=int(sys.argv[2]))
|
ucbvislab/radiotool
|
radiotool/algorithms/novelty.py
|
Python
|
isc
| 5,195
|
[
"Gaussian",
"exciting"
] |
5bcb7ce9466e303cddf4d6935d3cc8d6911b3aab69b561996485d23eae2a8e7e
|
# rdesignerProtos.py ---
#
# Filename: rdesignerProtos.py
# Description:
# Author: Subhasis Ray, Upi Bhalla
# Maintainer:
# Created: Tue May 7 12:11:22 2013 (+0530)
# Version:
# Last-Updated: Wed Dec 30 13:01:00 2015 (+0530)
# By: Upi
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import numpy as np
import moose
import math
from moose import utils
EREST_ACT = -70e-3
per_ms = 1e3
PI = 3.14159265359
FaradayConst = 96485.3365 # Coulomb/mol
def make_HH_Na(name = 'HH_Na', parent='/library', vmin=-110e-3, vmax=50e-3, vdivs=3000):
"""Create a Hodhkin-Huxley Na channel under `parent`.
vmin, vmax, vdivs: voltage range and number of divisions for gate tables
"""
na = moose.HHChannel('%s/%s' % (parent, name))
na.Ek = 50e-3
na.Xpower = 3
na.Ypower = 1
v = np.linspace(vmin, vmax, vdivs+1) - EREST_ACT
m_alpha = per_ms * (25 - v * 1e3) / (10 * (np.exp((25 - v * 1e3) / 10) - 1))
m_beta = per_ms * 4 * np.exp(- v * 1e3/ 18)
m_gate = moose.element('%s/gateX' % (na.path))
m_gate.min = vmin
m_gate.max = vmax
m_gate.divs = vdivs
m_gate.tableA = m_alpha
m_gate.tableB = m_alpha + m_beta
h_alpha = per_ms * 0.07 * np.exp(-v / 20e-3)
h_beta = per_ms * 1/(np.exp((30e-3 - v) / 10e-3) + 1)
h_gate = moose.element('%s/gateY' % (na.path))
h_gate.min = vmin
h_gate.max = vmax
h_gate.divs = vdivs
h_gate.tableA = h_alpha
h_gate.tableB = h_alpha + h_beta
na.tick = -1
return na
def make_HH_K(name = 'HH_K', parent='/library', vmin=-120e-3, vmax=40e-3, vdivs=3000):
"""Create a Hodhkin-Huxley K channel under `parent`.
vmin, vmax, vdivs: voltage range and number of divisions for gate tables
"""
k = moose.HHChannel('%s/%s' % (parent, name))
k.Ek = -77e-3
k.Xpower = 4
v = np.linspace(vmin, vmax, vdivs+1) - EREST_ACT
n_alpha = per_ms * (10 - v * 1e3)/(100 * (np.exp((10 - v * 1e3)/10) - 1))
n_beta = per_ms * 0.125 * np.exp(- v * 1e3 / 80)
n_gate = moose.element('%s/gateX' % (k.path))
n_gate.min = vmin
n_gate.max = vmax
n_gate.divs = vdivs
n_gate.tableA = n_alpha
n_gate.tableB = n_alpha + n_beta
k.tick = -1
return k
#========================================================================
# SynChan: Glu receptor
#========================================================================
def make_glu( name ):
if moose.exists( '/library/' + name ):
return
glu = moose.SynChan( '/library/' + name )
glu.Ek = 0.0
glu.tau1 = 2.0e-3
glu.tau2 = 9.0e-3
sh = moose.SimpleSynHandler( glu.path + '/sh' )
moose.connect( sh, 'activationOut', glu, 'activation' )
sh.numSynapses = 1
sh.synapse[0].weight = 1
return glu
#========================================================================
# SynChan: GABA receptor
#========================================================================
def make_GABA( name ):
if moose.exists( '/library/' + name ):
return
GABA = moose.SynChan( '/library/' + name )
GABA.Ek = EK + 10.0e-3
GABA.tau1 = 4.0e-3
GABA.tau2 = 9.0e-3
sh = moose.SimpleSynHandler( GABA.path + '/sh' )
moose.connect( sh, 'activationOut', GABA, 'activation' )
sh.numSynapses = 1
sh.synapse[0].weight = 1
def makeChemOscillator( name = 'osc', parent = '/library' ):
model = moose.Neutral( parent + '/' + name )
compt = moose.CubeMesh( model.path + '/kinetics' )
"""
This function sets up a simple oscillatory chemical system within
the script. The reaction system is::
s ---a---> a // s goes to a, catalyzed by a.
s ---a---> b // s goes to b, catalyzed by a.
a ---b---> s // a goes to s, catalyzed by b.
b -------> s // b is degraded irreversibly to s.
in sum, **a** has a positive feedback onto itself and also forms **b**.
**b** has a negative feedback onto **a**.
Finally, the diffusion constant for **a** is 1/10 that of **b**.
"""
# create container for model
diffConst = 10e-12 # m^2/sec
motorRate = 1e-6 # m/sec
concA = 1 # millimolar
# create molecules and reactions
a = moose.Pool( compt.path + '/a' )
b = moose.Pool( compt.path + '/b' )
s = moose.Pool( compt.path + '/s' )
e1 = moose.MMenz( compt.path + '/e1' )
e2 = moose.MMenz( compt.path + '/e2' )
e3 = moose.MMenz( compt.path + '/e3' )
r1 = moose.Reac( compt.path + '/r1' )
a.concInit = 0.1
b.concInit = 0.1
s.concInit = 1
moose.connect( e1, 'sub', s, 'reac' )
moose.connect( e1, 'prd', a, 'reac' )
moose.connect( a, 'nOut', e1, 'enzDest' )
e1.Km = 1
e1.kcat = 1
moose.connect( e2, 'sub', s, 'reac' )
moose.connect( e2, 'prd', b, 'reac' )
moose.connect( a, 'nOut', e2, 'enzDest' )
e2.Km = 1
e2.kcat = 0.5
moose.connect( e3, 'sub', a, 'reac' )
moose.connect( e3, 'prd', s, 'reac' )
moose.connect( b, 'nOut', e3, 'enzDest' )
e3.Km = 0.1
e3.kcat = 1
moose.connect( r1, 'sub', b, 'reac' )
moose.connect( r1, 'prd', s, 'reac' )
r1.Kf = 0.3 # 1/sec
r1.Kb = 0 # 1/sec
# Assign parameters
a.diffConst = diffConst/10
b.diffConst = diffConst
s.diffConst = 0
return compt
#################################################################
# Here we have a series of utility functions for building cell
# prototypes.
#################################################################
def transformNMDAR( path ):
for i in moose.wildcardFind( path + "/##/#NMDA#[ISA!=NMDAChan]" ):
chanpath = i.path
pa = i.parent
i.name = '_temp'
if ( chanpath[-3:] == "[0]" ):
chanpath = chanpath[:-3]
nmdar = moose.NMDAChan( chanpath )
sh = moose.SimpleSynHandler( chanpath + '/sh' )
moose.connect( sh, 'activationOut', nmdar, 'activation' )
sh.numSynapses = 1
sh.synapse[0].weight = 1
nmdar.Ek = i.Ek
nmdar.tau1 = i.tau1
nmdar.tau2 = i.tau2
nmdar.Gbar = i.Gbar
nmdar.CMg = 12
nmdar.KMg_A = 1.0 / 0.28
nmdar.KMg_B = 1.0 / 62
nmdar.temperature = 300
nmdar.extCa = 1.5
nmdar.intCa = 0.00008
nmdar.intCaScale = 1
nmdar.intCaOffset = 0.00008
nmdar.condFraction = 0.02
moose.delete( i )
moose.connect( pa, 'channel', nmdar, 'channel' )
caconc = moose.wildcardFind( pa.path + '/#[ISA=CaConcBase]' )
if ( len( caconc ) < 1 ):
print('no caconcs found on ', pa.path)
else:
moose.connect( nmdar, 'ICaOut', caconc[0], 'current' )
moose.connect( caconc[0], 'concOut', nmdar, 'assignIntCa' )
################################################################
# Utility function for building a compartment, used for spines.
# Builds a compartment object downstream (further away from soma)
# of the specfied previous compartment 'pa'. If 'pa' is not a
# compartment, it builds it on 'pa'. It places the compartment
# on the end of 'prev', and at 0,0,0 otherwise.
def buildCompt( pa, name, RM = 1.0, RA = 1.0, CM = 0.01, dia = 1.0e-6, x = 0.0, y = 0.0, z = 0.0, dx = 10e-6, dy = 0.0, dz = 0.0 ):
length = np.sqrt( dx * dx + dy * dy + dz * dz )
compt = moose.Compartment( pa.path + '/' + name )
compt.x0 = x
compt.y0 = y
compt.z0 = z
compt.x = dx + x
compt.y = dy + y
compt.z = dz + z
compt.diameter = dia
compt.length = length
xa = dia * dia * PI / 4.0
sa = length * dia * PI
compt.Ra = length * RA / xa
compt.Rm = RM / sa
compt.Cm = CM * sa
return compt
def buildComptWrapper( pa, name, length, dia, xoffset, RM, RA, CM ):
return buildCompt( pa, name, RM, RA, CM, dia = dia, x = xoffset, dx = length )
################################################################
# Utility function for building a synapse, used for spines.
def buildSyn( name, compt, Ek, tau1, tau2, Gbar, CM ):
syn = moose.SynChan( compt.path + '/' + name )
syn.Ek = Ek
syn.tau1 = tau1
syn.tau2 = tau2
syn.Gbar = Gbar * compt.Cm / CM
#print "BUILD SYN: ", name, Gbar, syn.Gbar, CM
moose.connect( compt, 'channel', syn, 'channel' )
sh = moose.SimpleSynHandler( syn.path + '/sh' )
moose.connect( sh, 'activationOut', syn, 'activation' )
sh.numSynapses = 1
sh.synapse[0].weight = 1
return syn
######################################################################
# Utility function, borrowed from proto18.py, for making an LCa channel.
# Based on Traub's 91 model, I believe.
def make_LCa( name = 'LCa', parent = '/library' ):
EREST_ACT = -0.060 #/* hippocampal cell resting potl */
ECA = 0.140 + EREST_ACT #// 0.080
if moose.exists( parent + '/' + name ):
return
Ca = moose.HHChannel( parent + '/' + name )
Ca.Ek = ECA
Ca.Gbar = 0
Ca.Gk = 0
Ca.Xpower = 2
Ca.Ypower = 1
Ca.Zpower = 0
xgate = moose.element( parent + '/' + name + '/gateX' )
xA = np.array( [ 1.6e3, 0, 1.0, -1.0 * (0.065 + EREST_ACT), -0.01389, -20e3 * (0.0511 + EREST_ACT), 20e3, -1.0, -1.0 * (0.0511 + EREST_ACT), 5.0e-3, 3000, -0.1, 0.05 ] )
xgate.alphaParms = xA
ygate = moose.element( parent + '/' + name + '/gateY' )
ygate.min = -0.1
ygate.max = 0.05
ygate.divs = 3000
yA = np.zeros( (ygate.divs + 1), dtype=float)
yB = np.zeros( (ygate.divs + 1), dtype=float)
#Fill the Y_A table with alpha values and the Y_B table with (alpha+beta)
dx = (ygate.max - ygate.min)/ygate.divs
x = ygate.min
for i in range( ygate.divs + 1 ):
if ( x > EREST_ACT):
yA[i] = 5.0 * math.exp( -50 * (x - EREST_ACT) )
else:
yA[i] = 5.0
yB[i] = 5.0
x += dx
ygate.tableA = yA
ygate.tableB = yB
return Ca
################################################################
# API function for building spine prototypes. Here we put in the
# spine dimensions, and options for standard channel types.
# The synList tells it to create dual alpha function synchans:
# [name, Erev, tau1, tau2, conductance_density, connectToCa]
# The chanList tells it to copy over channels defined in /library
# and assign the specified conductance density.
# If caTau <= zero then there is no caConc created, otherwise it
# creates one and assigns the desired tau in seconds.
# With the default arguments here it will create a glu, NMDA and LCa,
# and add a Ca_conc.
def addSpineProto( name = 'spine',
parent = '/library',
RM = 1.0, RA = 1.0, CM = 0.01,
shaftLen = 1.e-6 , shaftDia = 0.2e-6,
headLen = 0.5e-6, headDia = 0.5e-6,
synList = (),
chanList = (),
caTau = 0.0
):
assert( moose.exists( parent ) ), "%s must exist" % parent
spine = moose.Neutral( parent + '/' + name )
shaft = buildComptWrapper( spine, 'shaft', shaftLen, shaftDia, 0.0, RM, RA, CM )
head = buildComptWrapper( spine, 'head', headLen, headDia, shaftLen, RM, RA, CM )
moose.connect( shaft, 'axial', head, 'raxial' )
if caTau > 0.0:
conc = moose.CaConc( head.path + '/Ca_conc' )
conc.tau = caTau
conc.length = head.length
conc.diameter = head.diameter
conc.thick = 0.0
# The 'B' field is deprecated.
# B = 1/(ion_charge * Faraday * volume)
#vol = head.length * head.diameter * head.diameter * PI / 4.0
#conc.B = 1.0 / ( 2.0 * FaradayConst * vol )
conc.Ca_base = 0.0
for i in synList:
syn = buildSyn( i[0], head, i[1], i[2], i[3], i[4], CM )
if i[5] and caTau > 0.0:
moose.connect( syn, 'IkOut', conc, 'current' )
for i in chanList:
if ( moose.exists( parent + '/' + i[0] ) ):
chan = moose.copy( parent + '/' + i[0], head )
else:
moose.setCwe( head )
chan = make_LCa()
chan.name = i[0]
moose.setCwe( '/' )
chan.Gbar = i[1] * head.Cm / CM
#print "CHAN = ", chan, chan.tick, chan.Gbar
moose.connect( head, 'channel', chan, 'channel' )
if i[2] and caTau > 0.0:
moose.connect( chan, 'IkOut', conc, 'current' )
transformNMDAR( parent + '/' + name )
return spine
#######################################################################
# Here are some compartment related prototyping functions
def makePassiveHHsoma(name = 'passiveHHsoma', parent='/library'):
''' Make HH squid model sized compartment:
len and dia 500 microns. CM = 0.01 F/m^2, RA =
'''
elecpath = parent + '/' + name
if not moose.exists( elecpath ):
elecid = moose.Neuron( elecpath )
dia = 500e-6
soma = buildComptWrapper( elecid, 'soma', dia, dia, 0.0,
0.33333333, 3000, 0.01 )
soma.initVm = -65e-3 # Resting of -65, from HH
soma.Em = -54.4e-3 # 10.6 mV above resting of -65, from HH
else:
elecid = moose.element( elecpath )
return elecid
# Wrapper function. This is used by the proto builder from rdesigneur
def makeActiveSpine(name = 'active_spine', parent='/library'):
return addSpineProto( name = name, parent = parent,
synList = ( ['glu', 0.0, 2e-3, 9e-3, 200.0, False],
['NMDA', 0.0, 20e-3, 20e-3, 80.0, True] ),
chanList = ( ['Ca', 10.0, True ], ),
caTau = 13.333e-3
)
# Wrapper function. This is used by the proto builder from rdesigneur
def makeExcSpine(name = 'exc_spine', parent='/library'):
return addSpineProto( name = name, parent = parent,
synList = ( ['glu', 0.0, 2e-3, 9e-3, 200.0, False],
['NMDA', 0.0, 20e-3, 20e-3, 80.0, True] ),
caTau = 13.333e-3 )
# Wrapper function. This is used by the proto builder from rdesigneur
def makePassiveSpine(name = 'passive_spine', parent='/library'):
return addSpineProto( name = name, parent = parent)
# legacy function. This is used by the proto builder from rdesigneur
def makeSpineProto( name ):
addSpineProto( name = name, chanList = () )
|
subhacom/moose-core
|
python/rdesigneur/rdesigneurProtos.py
|
Python
|
gpl-3.0
| 15,090
|
[
"MOOSE",
"NEURON"
] |
2e40e3a0d4ed828f61bf4c69fbc80b52735aef11bb8076126f7201349c4731f6
|
# -*- coding: utf-8 -*-
"""
Functions relevant for photometric calibration
Table of contents:
1. Available response functions
2. Adding filters on the fly
- Defining a new filter
- Temporarily modifying an existing filter
3. Adding filters permanently
Section 1. Available response functions
=======================================
Short list of available systems:
>>> responses = list_response()
>>> systems = [response.split('.')[0] for response in responses]
>>> set_responses = sorted(set([response.split('.')[0] for response in systems]))
>>> for i,resp in enumerate(set_responses):
... print '%10s (%3d filters)'%(resp,systems.count(resp))
2MASS ( 3 filters)
ACSHRC ( 17 filters)
ACSSBC ( 6 filters)
ACSWFC ( 12 filters)
AKARI ( 13 filters)
ANS ( 6 filters)
APEX ( 1 filters)
ARGUE ( 3 filters)
BESSEL ( 6 filters)
BESSELL ( 6 filters)
COROT ( 2 filters)
COUSINS ( 3 filters)
DDO ( 7 filters)
DENIS ( 3 filters)
DIRBE ( 10 filters)
EEV4280 ( 1 filters)
ESOIR ( 10 filters)
GAIA ( 4 filters)
GALEX ( 2 filters)
GENEVA ( 7 filters)
HIPPARCOS ( 1 filters)
IPHAS ( 3 filters)
IRAC ( 4 filters)
IRAS ( 4 filters)
ISOCAM ( 21 filters)
JOHNSON ( 25 filters)
KEPLER ( 43 filters)
KRON ( 2 filters)
LANDOLT ( 6 filters)
MIPS ( 3 filters)
MOST ( 1 filters)
MSX ( 6 filters)
NARROW ( 1 filters)
NICMOS ( 6 filters)
OAO2 ( 12 filters)
PACS ( 3 filters)
SAAO ( 13 filters)
SCUBA ( 6 filters)
SDSS ( 10 filters)
SLOAN ( 2 filters)
SPIRE ( 3 filters)
STEBBINS ( 6 filters)
STISCCD ( 2 filters)
STISFUV ( 4 filters)
STISNUV ( 7 filters)
STROMGREN ( 6 filters)
TD1 ( 4 filters)
TYCHO ( 2 filters)
TYCHO2 ( 2 filters)
ULTRACAM ( 5 filters)
USNOB1 ( 2 filters)
UVEX ( 5 filters)
VILNIUS ( 7 filters)
VISIR ( 13 filters)
WALRAVEN ( 5 filters)
WFCAM ( 5 filters)
WFPC2 ( 21 filters)
WISE ( 4 filters)
WOOD ( 12 filters)
Plots of all passbands of all systems:
]include figure]]ivs_sed_filters_2MASS.png]
]include figure]]ivs_sed_filters_ACSHRC.png]
]include figure]]ivs_sed_filters_ACSSBC.png]
]include figure]]ivs_sed_filters_ACSWFC.png]
]include figure]]ivs_sed_filters_AKARI.png]
]include figure]]ivs_sed_filters_ANS.png]
]include figure]]ivs_sed_filters_APEX.png]
]include figure]]ivs_sed_filters_ARGUE.png]
]include figure]]ivs_sed_filters_BESSEL.png]
]include figure]]ivs_sed_filters_BESSELL.png]
]include figure]]ivs_sed_filters_COROT.png]
]include figure]]ivs_sed_filters_COUSINS.png]
]include figure]]ivs_sed_filters_DDO.png]
]include figure]]ivs_sed_filters_DENIS.png]
]include figure]]ivs_sed_filters_DIRBE.png]
]include figure]]ivs_sed_filters_ESOIR.png]
]include figure]]ivs_sed_filters_EEV4280.png]
]include figure]]ivs_sed_filters_GAIA.png]
]include figure]]ivs_sed_filters_GALEX.png]
]include figure]]ivs_sed_filters_GENEVA.png]
]include figure]]ivs_sed_filters_HIPPARCOS.png]
]include figure]]ivs_sed_filters_IPHAS.png]
]include figure]]ivs_sed_filters_IRAC.png]
]include figure]]ivs_sed_filters_IRAS.png]
]include figure]]ivs_sed_filters_ISOCAM.png]
]include figure]]ivs_sed_filters_JOHNSON.png]
]include figure]]ivs_sed_filters_KEPLER.png]
]include figure]]ivs_sed_filters_KRON.png]
]include figure]]ivs_sed_filters_LANDOLT.png]
]include figure]]ivs_sed_filters_MIPS.png]
]include figure]]ivs_sed_filters_MOST.png]
]include figure]]ivs_sed_filters_MSX.png]
]include figure]]ivs_sed_filters_NARROW.png]
]include figure]]ivs_sed_filters_NICMOS.png]
]include figure]]ivs_sed_filters_OAO2.png]
]include figure]]ivs_sed_filters_PACS.png]
]include figure]]ivs_sed_filters_SAAO.png]
]include figure]]ivs_sed_filters_SCUBA.png]
]include figure]]ivs_sed_filters_SDSS.png]
]include figure]]ivs_sed_filters_SLOAN.png]
]include figure]]ivs_sed_filters_SPIRE.png]
]include figure]]ivs_sed_filters_STEBBINS.png]
]include figure]]ivs_sed_filters_STISCCD.png]
]include figure]]ivs_sed_filters_STISFUV.png]
]include figure]]ivs_sed_filters_STISNUV.png]
]include figure]]ivs_sed_filters_STROMGREN.png]
]include figure]]ivs_sed_filters_TD1.png]
]include figure]]ivs_sed_filters_TYCHO.png]
]include figure]]ivs_sed_filters_TYCHO2.png]
]include figure]]ivs_sed_filters_ULTRACAM.png]
]include figure]]ivs_sed_filters_USNOB1.png]
]include figure]]ivs_sed_filters_UVEX.png]
]include figure]]ivs_sed_filters_VILNIUS.png]
]include figure]]ivs_sed_filters_VISIR.png]
]include figure]]ivs_sed_filters_WALRAVEN.png]
]include figure]]ivs_sed_filters_WFPC2.png]
]include figure]]ivs_sed_filters_WISE.png]
]include figure]]ivs_sed_filters_WOOD.png]
Section 2: Adding filters on the fly
====================================
Section 2.1: Defining a new filter
----------------------------------
You can add custom filters on the fly using L{add_custom_filter}. In this
example we add a weird-looking filter and check the definition of Flambda and
Fnu and its relation to the effective wavelength of a passband:
Prerequisites: some modules that come in handy:
>>> from cc.ivs.sigproc import funclib
>>> from cc.ivs.sed import model
>>> from cc.ivs.units import conversions
First, we'll define a double peakd Gaussian profile on the wavelength grid of
the WISE.W3 response curve:
>>> wave = get_response('WISE.W3')[0]
>>> trans = funclib.evaluate('gauss',wave,[1.5,76000.,10000.,0.])
>>> trans+= funclib.evaluate('gauss',wave,[1.0,160000.,25000.,0.])
This is what it looks like:
>>> p = pl.figure()
>>> p = pl.plot(wave/1e4,trans,'k-')
>>> p = pl.xlabel("Wavelength [micron]")
>>> p = pl.ylabel("Transmission [arbitrary units]")
]include figure]]ivs_sed_filters_weird_trans.png]
We can add this filter to the list of predefined filters in the following way
(for the doctests to work, we have to do a little work around and call
filters via that module, this is not needed in a normal workflow):
>>> model.filters.add_custom_filter(wave,trans,photband='LAMBDA.CCD',type='CCD')
>>> model.filters.add_custom_filter(wave,trans,photband='LAMBDA.BOL',type='BOL')
Note that we add the filter twice, once assuming that it is mounted on a
bolometer, and once on a CCD device. We'll call the filter C{LAMBDA.CCD} and
C{LAMBDA.BOL}. From now on, they are available within functions as L{get_info}
and L{get_response}. For example, what is the effective (actually pivot)
wavelength?
>>> effwave_ccd = model.filters.eff_wave('LAMBDA.CCD')
>>> effwave_bol = model.filters.eff_wave('LAMBDA.BOL')
>>> print(effwave_ccd,effwave_bol)
(119263.54911400242, 102544.27931275869)
Let's do some synthetic photometry now. Suppose we have a black body atmosphere:
>>> bb = model.blackbody(wave,5777.)
We now calculate the synthetic flux, assuming the CCD and BOL device. We
compute the synthetic flux both in Flambda and Fnu:
>>> flam_ccd,flam_bol = model.synthetic_flux(wave,bb,['LAMBDA.CCD','LAMBDA.BOL'])
>>> fnu_ccd,fnu_bol = model.synthetic_flux(wave,bb,['LAMBDA.CCD','LAMBDA.BOL'],units=['FNU','FNU'])
You can see that the fluxes can be quite different when you assume photon or
energy counting devices!
>>> flam_ccd,flam_bol
(897.68536911320564, 1495.248213834755)
>>> fnu_ccd,fnu_bol
(4.2591095543803019e-06, 5.2446332430111098e-06)
Can we now readily convert Flambda to Fnu with assuming the pivot wavelength?
>>> fnu_fromflam_ccd = conversions.convert('erg/s/cm2/AA','erg/s/cm2/Hz',flam_ccd,wave=(effwave_ccd,'A'))
>>> fnu_fromflam_bol = conversions.convert('erg/s/cm2/AA','erg/s/cm2/Hz',flam_bol,wave=(effwave_bol,'A'))
Which is equivalent with:
>>> fnu_fromflam_ccd = conversions.convert('erg/s/cm2/AA','erg/s/cm2/Hz',flam_ccd,photband='LAMBDA.CCD')
>>> fnu_fromflam_bol = conversions.convert('erg/s/cm2/AA','erg/s/cm2/Hz',flam_bol,photband='LAMBDA.BOL')
Apparently, with the definition of pivot wavelength, you can safely convert from
Fnu to Flambda:
>>> print(fnu_ccd,fnu_fromflam_ccd)
(4.2591095543803019e-06, 4.259110447428463e-06)
>>> print(fnu_bol,fnu_fromflam_bol)
(5.2446332430111098e-06, 5.2446373530017525e-06)
The slight difference you see is numerical.
Section 2.2: Temporarily modifying an existing filter
-----------------------------------------------------
Under usual conditions, you are prohibited from overwriting an existing predefined
response curve. That is, if you try to L{add_custom_filter} with a C{photband}
that already exists as a file, a C{ValueError} will be raised (this is not the
case for a custom defined filter, which you can overwrite without problems!).
If, for testing purposes, you want to use another definition of a predefined
response curve, you need to set C{force=True} in L{add_custom_filter}, and then
call
>>> set_prefer_file(False)
To reset and use the original definitions again, do
>>> set_prefer_file(True)
Section 3.: Adding filters permanently
--------------------------------------
Add a new response curve file to the ivs/sed/filters directory. The file should
contain two columns, the first column is the wavelength in angstrom, the second
column is the transmission curve. The units of the later are not important.
Then, call L{update_info}. The contents of C{zeropoints.dat} will automatically
be updated. Make sure to add any additional information on the new filters
manually in that file (e.g. is t a CCD or bolometer, what are the zeropoint
magnitudes etc).
"""
import os
import glob
from astropy.io import fits as pyfits
import logging
import numpy as np
from cc.ivs.aux.decorators import memoized
from cc.ivs.aux import decorators
from cc.ivs.aux import loggers
from cc.ivs.io import ascii
basedir = os.path.dirname(__file__)
logger = logging.getLogger("SED.FILT")
logger.addHandler(loggers.NullHandler())
custom_filters = {'_prefer_file':True}
#{ response curves
@memoized
def get_response(photband):
"""
Retrieve the response curve of a photometric system 'SYSTEM.FILTER'
OPEN.BOL represents a bolometric open filter.
Example usage:
>>> p = pl.figure()
>>> for band in ['J','H','KS']:
... p = pl.plot(*get_response('2MASS.%s'%(band)))
If you defined a custom filter with the same name as an existing one and
you want to use that one in the future, set C{prefer_file=False} in the
C{custom_filters} module dictionary.
@param photband: photometric passband
@type photband: str ('SYSTEM.FILTER')
@return: (wavelength [A], response)
@rtype: (array, array)
"""
photband = photband.upper()
prefer_file = custom_filters['_prefer_file']
if photband=='OPEN.BOL':
return np.array([1,1e10]),np.array([1/(1e10-1),1/(1e10-1)])
#-- either get from file or get from dictionary
photfile = os.path.join(basedir,'filters',photband)
photfile_is_file = os.path.isfile(photfile)
#-- if the file exists and files have preference
if photfile_is_file and prefer_file:
wave, response = ascii.read2array(photfile).T[:2]
#-- if the custom_filter exist
elif photband in custom_filters:
wave, response = custom_filters[photband]['response']
#-- if the file exists but custom filters have preference
elif photfile_is_file:
wave, response = ascii.read2array(photfile).T[:2]
else:
raise IOError,('{0} does not exist {1}'.format(photband,custom_filters.keys()))
sa = np.argsort(wave)
return wave[sa],response[sa]
def create_custom_filter(wave,peaks,range=(3000,4000),sigma=3.):
"""
Create a custom filter as a sum of Gaussians.
@param wave: wavelength to evaluate the profile on
@type wave: ndarray
@param peaks: heights of the peaks
@type peaks: ndarray of length N, with N peaks
@param range: wavelength range of the peaks
@type range: tuple
@param sigma: width of the peaks in units of (range/N)
@type sigma: float
@return: filter profile
@rtype: ndarray
"""
wpnts = np.linspace(range[0],range[1],len(peaks)+2)[1:-1]
sigma = (range[1]-range[0])/(sigma*len(peaks))
gauss = lambda x,p: p[0] * np.exp( -(x-p[1])**2 / (2.0*p[2]**2))
els = [gauss(wave,[pk,mu,sigma]) for pk,mu in zip(peaks,wpnts)]
profile = np.array(els).sum(axis=0)
return profile
def add_custom_filter(wave,response,**kwargs):
"""
Add a custom filter to the set of predefined filters.
Extra keywords are:
'eff_wave', 'type',
'vegamag', 'vegamag_lit',
'ABmag', 'ABmag_lit',
'STmag', 'STmag_lit',
'Flam0', 'Flam0_units', 'Flam0_lit',
'Fnu0', 'Fnu0_units', 'Fnu0_lit',
'source'
default C{type} is 'CCD'.
default C{photband} is 'CUSTOM.FILTER'
@param wave: wavelength (angstrom)
@type wave: ndarray
@param response: response
@type response: ndarray
@param photband: photometric passband
@type photband: str ('SYSTEM.FILTER')
"""
kwargs.setdefault('photband','CUSTOM.FILTER')
kwargs.setdefault('copy_from','JOHNSON.V')
kwargs.setdefault('force',False)
photband = kwargs['photband']
#-- check if the filter already exists:
photfile = os.path.join(basedir,'filters',photband)
if os.path.isfile(photfile) and not kwargs['force']:
raise ValueError,'bandpass {0} already exists'.format(photfile)
elif photband in custom_filters:
logger.debug('Overwriting previous definition of {0}'.format(photband))
custom_filters[photband] = dict(response=(wave,response))
#-- set effective wavelength
kwargs.setdefault('type','CCD')
kwargs.setdefault('eff_wave',eff_wave(photband,det_type=kwargs['type']))
#-- add info for zeropoints.dat file: make sure wherever "lit" is part of
# the name, we replace it with "0". Then, we overwrite any existing
# information with info given
myrow = get_info([kwargs['copy_from']])
for name in myrow.dtype.names:
if 'lit' in name:
myrow[name] = 0
myrow[name] = kwargs.pop(name,myrow[name])
del decorators.memory[__name__]
#-- add info:
custom_filters[photband]['zp'] = myrow
logger.debug('Added photband {0} to the predefined set'.format(photband))
def set_prefer_file(prefer_file=True):
"""
Set whether to prefer files or custom filters when both exist.
@param prefer_file: boolean
@type prefer_file: bool
"""
custom_filters['_prefer_file'] = prefer_file
logger.info("Prefering {}".format(prefer_file and 'files' or 'custom filters'))
def add_spectrophotometric_filters(R=200.,lambda0=950.,lambdan=3350.):
#-- STEP 1: define wavelength bins
Delta = np.log10(1.+1./R)
x = np.arange(np.log10(lambda0),np.log10(lambdan)+Delta,Delta)
x = 10**x
photbands = []
for i in range(len(x)-1):
wave = np.linspace(x[i],x[i+1],100)
resp = np.ones_like(wave)
dw = wave[1]-wave[0]
wave = np.hstack([wave[0]-dw,wave,wave[-1]+dw])
resp = np.hstack([0,resp,0])
photband = 'BOXCAR_R{0:d}.{1:d}'.format(int(R),int(x[i]))
try:
add_custom_filter(wave,resp,photband=photband)
except ValueError:
logger.info('{0} already exists, skipping'.format(photband))
photbands.append(photband)
logger.info('Added spectrophotometric filters')
return photbands
def list_response(name='*',wave_range=(-np.inf,+np.inf)):
"""
List available response curves.
Specify a glob string C{name} and/or a wavelength range to make a selection
of all available curves. If nothing is supplied, all curves will be returned.
@param name: list all curves containing this string
@type name: str
@param wave_range: list all curves within this wavelength range (A)
@type wave_range: (float, float)
@return: list of curve files
@rtype: list of str
"""
#-- collect all curve files but remove human eye responses
if not '*' in name:
name_ = '*' + name + '*'
else:
name_ = name
curve_files = sorted(glob.glob(os.path.join(basedir,'filters',name_.upper())))
curve_files = sorted(curve_files+[key for key in custom_filters.keys() if ((name in key) and not (key=='_prefer_file'))])
curve_files = [cf for cf in curve_files if not ('HUMAN' in cf or 'EYE' in cf) ]
#-- select in correct wavelength range
curve_files = [os.path.basename(curve_file) for curve_file in curve_files if (wave_range[0]<=eff_wave(os.path.basename(curve_file))<=wave_range[1])]
#-- log to the screen and return
for curve_file in curve_files: logger.info(curve_file)
return curve_files
def is_color(photband):
"""
Return true if the photometric passband is actually a color.
@param photband: name of the photometric passband
@type photband: string
@return: True or False
@rtype: bool
"""
if '-' in photband.split('.')[1]:
return True
elif photband.split('.')[1].upper() in ['M1','C1']:
return True
else:
return False
def get_color_photband(photband):
"""
Retrieve the photometric bands from color
@param photband: name of the photometric passband
@type photband: string
@return: tuple of strings
@rtype: tuple
"""
system,band = photband.split('.')
band = band.strip() # remove extra spaces
if '-' in band:
bands = tuple(['%s.%s'%(system,iband) for iband in band.split('-')])
elif band.upper()=='M1':
bands = tuple(['%s.%s'%(system,iband) for iband in ['V','B','Y']])
elif band.upper()=='C1':
bands = tuple(['%s.%s'%(system,iband) for iband in ['V','B','U']])
else:
raise ValueError('cannot recognize color {}'.format(photband))
return bands
def make_color(photband):
"""
Make a color from a color name and fluxes.
You get two things: a list of photbands that are need to construct the color,
and a function which you need to pass fluxes to compute the color.
>>> bands, func = make_color('JOHNSON.B-V')
>>> print(bands)
('JOHNSON.B', 'JOHNSON.V')
>>> print(func(2,3.))
0.666666666667
@return: photbands, function to construct color
@rtype: tuple,callable
"""
system,band = photband.split('.')
band = band.strip() # remove extra spaces
photbands = get_color_photband(photband)
if len(band.split('-'))==2:
func = lambda f0,f1: f0/f1
elif band=='M1':
func = lambda fv,fb,fy: fv*fy/fb**2
elif band=='C1':
func = lambda fv,fb,fu: fu*fb/fv**2
else:
raise ValueError('cannot recognize color {}'.format(photband))
return photbands,func
def eff_wave(photband,model=None,det_type=None):
"""
Return the effective wavelength of a photometric passband.
The effective wavelength is defined as the average wavelength weighed with
the response curve.
>>> eff_wave('2MASS.J')
12393.093155655277
If you give model fluxes as an extra argument, the wavelengths will take
these into account to calculate the `true' effective wavelength (e.g.,
Van Der Bliek, 1996), eq 2.
@param photband: photometric passband
@type photband: str ('SYSTEM.FILTER') or array/list of str
@param model: model wavelength and fluxes
@type model: tuple of 1D arrays (wave,flux)
@return: effective wavelength [A]
@rtype: float or numpy array
"""
#-- if photband is a string, it's the name of a photband: put it in a container
# but unwrap afterwards
if isinstance(photband,unicode):
photband = str(photband)
if isinstance(photband,str):
single_band = True
photband = [photband]
#-- else, it is a container
else:
single_band = False
my_eff_wave = []
for iphotband in photband:
try:
wave,response = get_response(iphotband)
#-- bolometric or ccd?
if det_type is None and len(get_info([iphotband])):
det_type = get_info([iphotband])['type'][0]
elif det_type is None:
det_type = 'CCD'
if model is None:
#this_eff_wave = np.average(wave,weights=response)
if det_type=='BOL':
this_eff_wave = np.sqrt(np.trapz(response,x=wave)/np.trapz(response/wave**2,x=wave))
else:
this_eff_wave = np.sqrt(np.trapz(wave*response,x=wave)/np.trapz(response/wave,x=wave))
else:
#-- interpolate response curve onto higher resolution model and
# take weighted average
is_response = response>1e-10
start_response,end_response = wave[is_response].min(),wave[is_response].max()
fluxm = np.sqrt(10**np.interp(np.log10(wave),np.log10(model[0]),np.log10(model[1])))
if det_type=='CCD':
this_eff_wave = np.sqrt(np.trapz(wave*fluxm*response,x=wave) / np.trapz(fluxm*response/wave,x=wave))
elif det_type=='BOL':
this_eff_wave = np.sqrt(np.trapz(fluxm*response,x=wave) / np.trapz(fluxm*response/wave**2,x=wave))
#-- if the photband is not defined:
except IOError:
this_eff_wave = np.nan
my_eff_wave.append(this_eff_wave)
if single_band:
my_eff_wave = my_eff_wave[0]
else:
my_eff_wave = np.array(my_eff_wave,float)
return my_eff_wave
@memoized
def get_info(photbands=None):
"""
Return a record array containing all filter information.
The record arrays contains following columns:
- photband
- eff_wave
- type
- vegamag, vegamag_lit
- ABmag, ABmag_lit
- STmag, STmag_lit
- Flam0, Flam0_units, Flam0_lit
- Fnu0, Fnu0_units, Fnu0_lit,
- source
@param photbands: list of photbands to get the information from. The input
order is equal to the output order. If C{None}, all filters are returned.
@type photbands: iterable container (list, tuple, 1Darray)
@return: record array containing all information on the requested photbands.
@rtype: record array
"""
zp_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),'zeropoints.dat')
zp = ascii.read2recarray(zp_file)
for iph in custom_filters:
if iph=='_prefer_file': continue
if 'zp' in custom_filters[iph]:
zp = np.hstack([zp,custom_filters[iph]['zp']])
zp = zp[np.argsort(zp['photband'])]
#-- list photbands in order given, and remove those that do not have
# zeropoints etc.
if photbands is not None:
order = np.searchsorted(zp['photband'],photbands)
zp = zp[order]
keep = (zp['photband']==photbands)
zp = zp[keep]
return zp
def update_info(zp=None):
"""
Update information in zeropoint file, e.g. after calibration.
Call first L{cc.ivs.sed.model.calibrate} without arguments, and pass the output
to this function.
@param zp: updated contents from C{zeropoints.dat}
@type zp: recarray
"""
zp_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),'zeropoints.dat')
zp_,comms = ascii.read2recarray(zp_file,return_comments=True)
existing = [str(i.strip()) for i in zp_['photband']]
resp_files = sorted(glob.glob(os.path.join(os.path.dirname(os.path.abspath(__file__)),'filters/*')))
resp_files = [os.path.basename(ff) for ff in resp_files if not os.path.basename(ff) in existing]
resp_files.remove('HUMAN.EYE')
resp_files.remove('HUMAN.CONES')
resp_files.remove('CONES.EYE')
if zp is None:
zp = zp_
logger.info('No new calibrations; previous information on existing response curves is copied')
else:
logger.info('Received new calibrations contents of zeropoints.dat will be updated')
#-- update info on previously non existing response curves
new_zp = np.zeros(len(resp_files),dtype=zp.dtype)
logger.info('Found {} new response curves, adding them with default information'.format(len(resp_files)))
for i,respfile in enumerate(resp_files):
new_zp[i]['photband'] = respfile
new_zp[i]['eff_wave'] = float(eff_wave(respfile))
new_zp[i]['type'] = 'CCD'
new_zp[i]['vegamag'] = np.nan
new_zp[i]['ABmag'] = np.nan
new_zp[i]['STmag'] = np.nan
new_zp[i]['Flam0_units'] = 'erg/s/cm2/AA'
new_zp[i]['Fnu0_units'] = 'erg/s/cm2/AA'
new_zp[i]['source'] = 'nan'
zp = np.hstack([zp,new_zp])
sa = np.argsort(zp['photband'])
ascii.write_array(zp[sa],'zeropoints.dat',header=True,auto_width=True,comments=['#'+line for line in comms[:-2]],use_float='%g')
if __name__=="__main__":
import sys
import pylab as pl
if not sys.argv[1:]:
import doctest
doctest.testmod()
pl.show()
else:
import itertools
responses = list_response()
systems = [response.split('.')[0] for response in responses]
set_responses = sorted(set([response.split('.')[0] for response in systems]))
this_filter = 0
for i,resp in enumerate(responses):
# what system is this, and how many filters are in this system?
this_system = resp.split('.')[0]
nr_filters = systems.count(this_system)
# call the plot containing the filters of the same system. If this is the
# the first time the plot is called (first filter of system), then set
# the title and color cycle
p = pl.figure(set_responses.index(this_system),figsize=(10,4.5))
if not hasattr(pl.gca(),'color_cycle'):
color_cycle = itertools.cycle([pl.cm.spectral(j) for j in np.linspace(0, 1.0, nr_filters)])
p = pl.gca().color_cycle = color_cycle
color = pl.gca().color_cycle.next()
p = pl.title(resp.split('.')[0])
# get the response curve and plot it
wave,trans = get_response(resp)
p = pl.plot(wave/1e4,trans,label=resp,color=color)
# and set labels
p = pl.xlabel('Wavelength [micron]')
p = pl.ylabel('Transmission')
# if there are not more filters in this systems, save the plot to a file
# and close it
this_filter+=1
if this_filter==nr_filters:
this_filter = 0
p = pl.legend(prop=dict(size='small'))
p = pl.savefig('/home/pieterd/python/ivs/doc/images/ivs_sed_filters_%s'%(this_system));p = pl.close()
|
MarieVdS/ComboCode
|
cc/ivs/sed/filters.py
|
Python
|
gpl-3.0
| 26,787
|
[
"Gaussian"
] |
6222f44a4ea6e456832a7c04bd33d3f75ae8d9b5da2c4db24410e552025f0b82
|
#!/usr/bin/python
# Python name convention : http://stackoverflow.com/questions/159720/what-is-the-naming-convention-in-python-for-variable-and-function-names
import csv
import os
import math
import numpy
from Bio import PDB, pairwise2
from Bio.SeqUtils import seq1
'''
Created on 17 nov. 2015
@author: 0mician
## Remarks:
## - Hierarchy of the PDB file
## 1- Structure
## 2- Model
## 3- Chains
## 4- Residues
## - Difference between SEQRES and ATOM
## SEQRES record give the whole full sequence
## ATOM records give the structural information each amino acid residue
## if there is any structural information available! Sometimes, there
## is no data available: check the broken regions at
## http://www.rcsb.org/pdb/explore/remediatedSequence.do?structureId=1BO1
'''
# Retrieve SEQRES record : http://www.bioinfopoint.com/index.php/code/41-extracting-the-sequence-from-a-protein-structure-using-biopython
# handle = open("data/pdb"+pdb_id+".ent", "rU")
# for record in SeqIO.parse(handle, "pdb-seqres") :
# # Get first letter : record.seq[0]
# print ">" + record.id + "\n" + record.seq
# handle.close()
'''
Important functions to debug:
- dir(x) : get all attributes and method available for that object
'''
# Delete multiple elements in list : http://stackoverflow.com/questions/497426/deleting-multiple-elements-from-a-list
pdbl=PDB.PDBList()
def findOccurences(s, ch):
return [i for i, letter in enumerate(s) if letter == ch]
# Function : http://www.tutorialspoint.com/python/python_functions.htm
# Always define functions before you call it
''' Download and get the structure object of the given PDB structure with
the given pdb_id '''
def get_pdb( pdb_id ):
pdb_file_path = "data/pdb"+pdb_id+".ent"
''' Check if PDB file exists otherwise download from pdb.org ''' # http://stackoverflow.com/questions/2259382/pythonic-way-to-check-if-a-file-exists
if os.path.isfile(pdb_file_path) is False:
pdbl.retrieve_pdb_file(pdb_id,pdir="data")
print "Downloading PDB structure "+ pdb_id +" from http://www.pdb.org ..."
parser = PDB.PDBParser(PERMISSIVE=1)
pdb = parser.get_structure(pdb_id,"data/pdb"+pdb_id+".ent")
return pdb
# ''' Remove all water molecules residues in the sequence '''
# chain_seq = filter(lambda a: a != "HOH", chain_seq)
''' Return the sequence (both in raw and structured format) of
the given chain in
the given model (PDB model)
with water molecules residues if given with_h2o is true and
in 3 letter code if given _1lc is false otherwise return the sequence in 1 letter code '''
def get_pdb_chain_partial_seq (model, chain, _1lc, with_h2o):
chain=model[chain]
chain_seq = []
chain_seq_raw = []
for residue in chain.get_residues():
residue_name = residue.get_resname()
# Remove all occurences of a value in list : http://stackoverflow.com/questions/1157106/remove-all-occurences-of-a-value-from-a-python-list
''' Check if sequence with water molecules has to be returned '''
if with_h2o is False:
''' Remove all water molecules residues in the sequence '''
if residue_name == "HOH":
continue
''' Check if 1 letter code sequence has to be returned '''
if _1lc is True:
''' Convert 3 letter code protein sequence to 1 letter code protein sequence ''' #http://biopython.org/DIST/docs/api/Bio.SeqUtils-module.html#seq1
residue.resname = seq1(residue_name) # Change variable resname
chain_seq.append(residue)
chain_seq_raw.append(residue.resname)
return [chain_seq, chain_seq_raw]
'''
Return the enriched Scheeff sequence alignment as a list of residues structured object
containing all structural information (e.g.: coordinates of all atoms for each residue)
given the:
- PDB id
- Scheeff sequence alignment (see Scheeff et al. nexus file)
Meaning of NULL values:
- Gap
- No structural information from the PDB file
'''
def build_enr_seq_aln (pdb_id,chain_id,scheeff_aln_seq):
print "Building the enriched sequence alignment for protein "+pdb_id+" ..."
''' Get all indexes of gaps in the Scheeff CE-like-manual structural alignement
performed in the paper Scheeff et al.'''
scheeff_struct_aln_gaps_indices = findOccurences(scheeff_aln_seq, "-")
# Delete character from a string: http://stackoverflow.com/questions/3559559/how-to-delete-a-character-from-a-string-using-python
scheeff_aln_seq = scheeff_aln_seq.replace("-", "")
# print "ALN:"+scheeff_aln_seq
model=get_pdb(pdb_id)[0]
# To uppercase : http://stackoverflow.com/questions/9257094/how-to-change-a-string-into-uppercase
print "Chain selected: "+chain_id.upper()
pdb_partial_seq = get_pdb_chain_partial_seq(model, chain_id.upper(), True, False)
# Join a list in string : http://stackoverflow.com/questions/12453580/concatenate-item-in-list-to-strings-python
pdb_partial_seq_raw = ''.join(pdb_partial_seq[1])
''' Store a list of all residues from the PDB protein sequence found to be present in
the Scheeff CE (Combinatorial Extension), manual protein sequence alignment '''
map_sequence = []
'''
Align the Scheeff structural sequences alignment with the partial PDB sequences containing only
the amino acid residues for which there is structural information available
Arguments description for pairwise.align.globalms:
- scheeff_aln_seq: Scheeff alignment sequence
- pdb_seq_raw: partial PDB sequence alignment
- 1: match penalty > Maximize the identical character: bonus score
- -30: mismatch penalty > Minimize the non-identical character: very high penalty score
- -5: extend gap penalties > Mimimize the number of gaps: very high penalty score when opening gaps
- -0.01: extend gap penalties: Maximize the grouping: very low penalty score when extending gaps
'''
# Align sequence: http://biopython.org/DIST/docs/api/Bio.pairwise2-module.html
# for a in pairwise2.align.globalms(scheeff_aln_seq, pdb_seq_raw,1,-30,-5,-0.01):
# print(format_alignment(*a))
print "Align sequence alignment of protein with its PDB sequence from ATOM records..."
aln = pairwise2.align.globalms(scheeff_aln_seq, pdb_partial_seq_raw,1,-30,-5,-0.01)
'''
Problem: for 1bo1 there are 2 alignment that maximize the score,
which one should be taken?
'''
pdb_partial_seq_aln = aln[0][1]
# Find indexes of all occurences of a char in a string :http://stackoverflow.com/questions/13009675/find-all-the-occurrences-of-a-character-in-a-string
''' Get all indexes of the tail gaps in the Scheeff sequence alignement '''
scheeff_seq_aln_tail_gaps_indices = findOccurences(aln[0][0], "-")
# print scheeff_seq_aln_tail_gaps_indices
''' Get all indexes of the gaps in the partial PDB sequence alignement '''
pdb_partial_seq_aln_gaps_indices = findOccurences(aln[0][1], "-")
# print pdb_partial_seq_aln_gaps_indices
nb_internal_gaps = 0
# Loop by sequence index: http://www.tutorialspoint.com/python/python_for_loop.htm
for i in range(len(pdb_partial_seq_aln)):
if i in scheeff_seq_aln_tail_gaps_indices:
continue
if i in pdb_partial_seq_aln_gaps_indices:
# Increment ++: http://stackoverflow.com/questions/2632677/python-integer-incrementing-with
nb_internal_gaps += 1
# Null : http://stackoverflow.com/questions/3289601/null-object-in-python
map_sequence.append(None)
# print None
continue
# print pdb_partial_seq[0][i-nb_internal_gaps]
map_sequence.append(pdb_partial_seq[0][i-nb_internal_gaps])
'''
Add all the gaps from initial Scheeff CE,manual structural alignment.
The map function execute the lambda function, which is inserting an
element at a particular index in the map_sequence list, on each values
in the list scheeff_struct_aln_gaps_indices
'''
# Use lambda function and map : http://www.python-course.eu/lambda.php
# Add element to list at position : http://www.thegeekstuff.com/2013/06/python-list/
map(lambda x : map_sequence.insert(x, None), scheeff_struct_aln_gaps_indices)
return map_sequence
def get_atom(residue, atom_id):
if residue.has_id("CA"):
return residue["CA"]
'''
Return as list
- a pair list of equivalent atoms given the first protein sequence
alignment prot_1_seq_aln and given the second protein sequence alignment
prot_2_seq_aln as an enriched pair list of atom objects
- the number of equivalent atoms between the two given protein sequence
alignments
'''
def build_pair_list_atoms(prot_1_seq_aln, prot_2_seq_aln):
print "Building pair list of 'equivalent' atoms between protein sequences..."
nb_match_atoms = 0
ref_atoms = []
alt_atoms = []
for i in range(len(prot_1_seq_aln)):
''' Element in protein 1 sequence alignment at position i '''
el_1 = prot_1_seq_aln[i]
''' Element in protein 1 sequence alignment at position i'''
el_2 = prot_2_seq_aln[i]
'''
We calculate the RMSD only for equivalent residues i.e.:
so we filter all positions in the alignments that are either a gap or
a position with none structural information
'''
# OR operator : http://stackoverflow.com/questions/2485466/pythons-equivalent-of-in-an-if-statement
if el_1 == None or el_2 == None:
continue
nb_match_atoms += 1
ref_atoms.append(get_atom(el_1, "CA"))
alt_atoms.append(get_atom(el_2, "CA"))
return [ref_atoms, alt_atoms, nb_match_atoms]
'''
Return the root mean squared distance of pairwise C-alpha atoms of each amino acid
between the first given protein sequence alignment prot_1_seq_aln and the second
given protein sequence alignment prot_2_seq_aln both having been aligned using
any available method.
# Remark:
# - We cannot just compute the RMDS between the pairwise residues by picking
# straight away the coordinates of both residues. Before computing this
# statistic we need to rotate and translating one protein structure such
# that the selected residues are best superposing each other i.e.: minimizing
# the mean square root deviation.
#
'''
def get_rmsd_prots_aln(ref_atoms,alt_atoms):
# Superposing proteins in BioPython : http://www2.warwick.ac.uk/fac/sci/moac/people/students/peter_cock/python/protein_superposition/
# Superposing these paired atom lists
print "Superposing proteins structures to minimize RMSD..."
super_imposer = PDB.Superimposer()
super_imposer.set_atoms(ref_atoms, alt_atoms)
# Update the structure by moving all the atoms in
super_imposer.apply(alt_atoms)
return super_imposer.rms
'''
Return the normalize RMSD given the reference value, chosen here as the value of
the fitted RMSD curve at 100 residues, rmsd 100 (see O. Carugo,S. Pongor paper)
'''
def norm_rmsd(rmsd,N):
print "Number of matched pairs: %s" % (N)
# Math functions: https://docs.python.org/2/library/math.html
# Convert int to float: http://stackoverflow.com/questions/31519987/convert-int-to-double-python
return rmsd/(1+math.log(math.sqrt(float(N)/100))) # log = natural logarithm
# http://stackoverflow.com/questions/2572916/numpy-smart-symmetric-matrix
def symmetrize(a):
return a + a.T - numpy.diag(a.diagonal())
''' Distance Similarity Alignment Tool
Compute a RMDS distance matrix between all pairwise pdb protein structure '''
class PyRAT(object):
def __init__(self, string):
# Read tabulated file : https://docs.python.org/2/library/csv.html
with open('data/kinases_struct_alignment.dat', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='|')
pdb_list = []
for row in reader:
# Substring a string : http://stackoverflow.com/questions/663171/is-there-a-way-to-substring-a-string-in-python
protein_name = row[0][1:5]
protein_chain = row[0][5]
protein_seq = row[1]
# Append value to list : http://stackoverflow.com/questions/252703/python-append-vs-extend
pdb_list.append([protein_name, protein_chain, protein_seq])
print pdb_list
nb_proteins = len(pdb_list)
# How to store a matrix: http://stackoverflow.com/questions/7945722/efficient-way-to-represent-a-lower-upper-triangular-matrix
rmsd_matrix = numpy.zeros((nb_proteins, nb_proteins))
nb_dist_to_compute = nb_proteins/2*(nb_proteins-1)
completed = 0
''' Compute the distance similarity matrix between all pairwise proteins '''
for i in range(len(pdb_list)):
for j in range(len(pdb_list)):
''' Matrix is symmetric: compute only triangular matrix '''
if j <= i:
continue
''' Get the given chain '''
chain_sel_i = chain_sel_j = "A"
protein_i = pdb_list[i]
protein_j = pdb_list[j]
''' Check if a given chain has been selected ''' # http://stackoverflow.com/questions/1504717/why-does-comparing-strings-in-python-using-either-or-is-sometimes-produce
if protein_i[1] != "_":
chain_sel_i = protein_i[1]
if protein_j[1] != "_":
chain_sel_j = protein_j[1]
map_seq_i = build_enr_seq_aln(protein_i[0], chain_sel_i, protein_i[2])
map_seq_j = build_enr_seq_aln(protein_j[0], chain_sel_j, protein_j[2])
paired_atom_lists = build_pair_list_atoms(map_seq_i, map_seq_j)
rmsd = get_rmsd_prots_aln(paired_atom_lists[0], paired_atom_lists[1])
# Number to string : http://stackoverflow.com/questions/22617/format-numbers-to-strings-in-python
print "RMSD ["+protein_i[0]+"/"+protein_j[0]+"]: %0.3f" % (rmsd)
n_rmsd = norm_rmsd(rmsd,paired_atom_lists[2])
print "Normalized RMSD ["+protein_i[0]+"/"+protein_j[0]+"]: %0.3f" % (n_rmsd)
rmsd_matrix[i][j] = n_rmsd
completed += 1
percent_completed = float(completed)/nb_dist_to_compute*100
# Print % : http://stackoverflow.com/questions/10678229/how-can-i-selectively-escape-percent-in-python-strings
print "Completed: %0.3f %%" % (percent_completed)
'''
Symmetrize the triangular matrix i.e.: fill the empty cells by symmetry
Dump the RMSD similarity matrix into a csv file
'''
# http://stackoverflow.com/questions/6081008/dump-a-numpy-array-into-a-csv-file
numpy.savetxt("Scheeff_rmsd_mat.csv", symmetrize(rmsd_matrix), delimiter=",")
''' Run PyRAT '''
PyRAT("")
#secondary_structure, accessibility=dssp[(chain_id, res_id)]
# ''' Run the DSATool for particular cases '''
# pdb_id = "1f3m"
# scheeff_aln_seq = "YTRF-EKI-GQG-ASGTVYTAMDVA--------TGQEVAIKQMNLQQ---------QP-------KKE--LIINEILVMRENK----------------NPNIVNYLDSYLVG-------------------------------DELWVVMEYLA------GGSL-------------------------TDVVTET-------------------------CMD----------------EGQIAAVCRECLQALEFLHS--------NQ-----------------------------------------------------------------VIHRDI---------KSDNILLGM----------------------------------------------------------------------------DGSVKLTDFGFCAQITPEQ----SKR---STMVGTPYWMAPEVVTR------KA----YG-----------------PKVDIWSLGIMAIEMIEG----------E-PPYLNE-------NPLRALYLIAT-NG---------------------------------------TP--EL--Q----NPEK------------LSAIFRDFLNRCLDMDVEKRGS------AKELLQHQFLKI"
# map_seq_1 = build_enr_seq_aln(pdb_id, "C", scheeff_aln_seq)
# print map_seq_1
#
# pdb_id = "1o6y"
# scheeff_aln_seq = "YELG-EIL-GFG-GMSEVHLARDLR--------LHRDVAVKVLRADL------ARDPS-------FYL--RFRREAQNAAALN----------------HPAIVAVYDTGEAETP---------------------------AGPLPYIVMEYVD------GVTL-------------------------RDIVHTE------------------------GPMT----------------PKRAIEVIADACQALNFSHQ--------NG-----------------------------------------------------------------IIHRDV---------KPANIMISA----------------------------------------------------------------------------TNAVKVMDFGIARAIADSGNS--VTQT--AAVIGTAQYLSPEQARG------DS----VD-----------------ARSDVYSLGCVLYEVLTG----------E-PPFTGD-------SPVSVAYQHVR-ED----------------------------------------P--IP--PS-A-RHEG------------LSADLDAVVLKALAKNPENRYQT-----AAEMRAD-LVRV"
# map_seq_2 = build_enr_seq_aln(pdb_id, "A", scheeff_aln_seq)
# print map_seq_2
#
# paired_atom_lists = build_pair_list_atoms(map_seq_1, map_seq_2)
# rmsd = get_rmsd_prots_aln(paired_atom_lists[0], paired_atom_lists[1])
# # Number to string : http://stackoverflow.com/questions/22617/format-numbers-to-strings-in-python
# print "RMSD: %0.3f" % (rmsd)
# n_rmsd = norm_rmsd(rmsd,paired_atom_lists[2])
# print "Normalized RMSD: %0.3f" % (n_rmsd)
|
maxdy/BioTools
|
PyRAT/main.py
|
Python
|
mit
| 18,402
|
[
"Biopython"
] |
bd029a5f2720a2719020b26c0c2fd33e20eff7a305bec6e3e9cba5d3c1cb84fb
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import numpy as np
from pyspark import SparkContext, since
from pyspark.mllib.common import callMLlibFunc, inherit_doc
from pyspark.mllib.linalg import Vectors, SparseVector, _convert_to_vector
from pyspark.sql import DataFrame
class MLUtils(object):
"""
Helper methods to load, save and pre-process data used in MLlib.
.. versionadded:: 1.0.0
"""
@staticmethod
def _parse_libsvm_line(line):
"""
Parses a line in LIBSVM format into (label, indices, values).
"""
items = line.split(None)
label = float(items[0])
nnz = len(items) - 1
indices = np.zeros(nnz, dtype=np.int32)
values = np.zeros(nnz)
for i in range(nnz):
index, value = items[1 + i].split(":")
indices[i] = int(index) - 1
values[i] = float(value)
return label, indices, values
@staticmethod
def _convert_labeled_point_to_libsvm(p):
"""Converts a LabeledPoint to a string in LIBSVM format."""
from pyspark.mllib.regression import LabeledPoint
assert isinstance(p, LabeledPoint)
items = [str(p.label)]
v = _convert_to_vector(p.features)
if isinstance(v, SparseVector):
nnz = len(v.indices)
for i in range(nnz):
items.append(str(v.indices[i] + 1) + ":" + str(v.values[i]))
else:
for i in range(len(v)):
items.append(str(i + 1) + ":" + str(v[i]))
return " ".join(items)
@staticmethod
@since("1.0.0")
def loadLibSVMFile(sc, path, numFeatures=-1, minPartitions=None):
"""
Loads labeled data in the LIBSVM format into an RDD of
LabeledPoint. The LIBSVM format is a text-based format used by
LIBSVM and LIBLINEAR. Each line represents a labeled sparse
feature vector using the following format:
label index1:value1 index2:value2 ...
where the indices are one-based and in ascending order. This
method parses each line into a LabeledPoint, where the feature
indices are converted to zero-based.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param numFeatures: number of features, which will be determined
from the input data if a nonpositive value
is given. This is useful when the dataset is
already split into multiple files and you
want to load them separately, because some
features may not present in certain files,
which leads to inconsistent feature
dimensions.
:param minPartitions: min number of partitions
:return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> tempFile = NamedTemporaryFile(delete=True)
>>> _ = tempFile.write(b"+1 1:1.0 3:2.0 5:3.0\\n-1\\n-1 2:4.0 4:5.0 6:6.0")
>>> tempFile.flush()
>>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect()
>>> tempFile.close()
>>> examples[0]
LabeledPoint(1.0, (6,[0,2,4],[1.0,2.0,3.0]))
>>> examples[1]
LabeledPoint(-1.0, (6,[],[]))
>>> examples[2]
LabeledPoint(-1.0, (6,[1,3,5],[4.0,5.0,6.0]))
"""
from pyspark.mllib.regression import LabeledPoint
lines = sc.textFile(path, minPartitions)
parsed = lines.map(lambda l: MLUtils._parse_libsvm_line(l))
if numFeatures <= 0:
parsed.cache()
numFeatures = parsed.map(lambda x: -1 if x[1].size == 0 else x[1][-1]).reduce(max) + 1
return parsed.map(lambda x: LabeledPoint(x[0], Vectors.sparse(numFeatures, x[1], x[2])))
@staticmethod
@since("1.0.0")
def saveAsLibSVMFile(data, dir):
"""
Save labeled data in LIBSVM format.
:param data: an RDD of LabeledPoint to be saved
:param dir: directory to save the data
>>> from tempfile import NamedTemporaryFile
>>> from fileinput import input
>>> from pyspark.mllib.regression import LabeledPoint
>>> from glob import glob
>>> from pyspark.mllib.util import MLUtils
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name)
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0.0 1:1.01 2:2.02 3:3.03\\n1.1 1:1.23 3:4.56\\n'
"""
lines = data.map(lambda p: MLUtils._convert_labeled_point_to_libsvm(p))
lines.saveAsTextFile(dir)
@staticmethod
@since("1.1.0")
def loadLabeledPoints(sc, path, minPartitions=None):
"""
Load labeled points saved using RDD.saveAsTextFile.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param minPartitions: min number of partitions
:return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name)
>>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect()
[LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])]
"""
minPartitions = minPartitions or min(sc.defaultParallelism, 2)
return callMLlibFunc("loadLabeledPoints", sc, path, minPartitions)
@staticmethod
@since("1.5.0")
def appendBias(data):
"""
Returns a new vector with `1.0` (bias) appended to
the end of the input vector.
"""
vec = _convert_to_vector(data)
if isinstance(vec, SparseVector):
newIndices = np.append(vec.indices, len(vec))
newValues = np.append(vec.values, 1.0)
return SparseVector(len(vec) + 1, newIndices, newValues)
else:
return _convert_to_vector(np.append(vec.toArray(), 1.0))
@staticmethod
@since("1.5.0")
def loadVectors(sc, path):
"""
Loads vectors saved using `RDD[Vector].saveAsTextFile`
with the default number of partitions.
"""
return callMLlibFunc("loadVectors", sc, path)
@staticmethod
@since("2.0.0")
def convertVectorColumnsToML(dataset, *cols):
"""
Converts vector columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Vector` type to the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of vector columns to be converted.
New vector columns will be ignored. If unspecified, all old
vector columns will be converted excepted nested ones.
:return:
the input dataset with old vector columns converted to the
new vector type
>>> import pyspark
>>> from pyspark.mllib.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsToML", dataset, list(cols))
@staticmethod
@since("2.0.0")
def convertVectorColumnsFromML(dataset, *cols):
"""
Converts vector columns in an input DataFrame to the
:py:class:`pyspark.mllib.linalg.Vector` type from the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of vector columns to be converted.
Old vector columns will be ignored. If unspecified, all new
vector columns will be converted except nested ones.
:return:
the input dataset with new vector columns converted to the
old vector type
>>> import pyspark
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsFromML(df).first()
>>> isinstance(r1.x, pyspark.mllib.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.mllib.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsFromML(df, "x").first()
>>> isinstance(r2.x, pyspark.mllib.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.ml.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsFromML", dataset, list(cols))
@staticmethod
@since("2.0.0")
def convertMatrixColumnsToML(dataset, *cols):
"""
Converts matrix columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Matrix` type to the new
:py:class:`pyspark.ml.linalg.Matrix` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of matrix columns to be converted.
New matrix columns will be ignored. If unspecified, all old
matrix columns will be converted excepted nested ones.
:return:
the input dataset with old matrix columns converted to the
new matrix type
>>> import pyspark
>>> from pyspark.mllib.linalg import Matrices
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]),
... Matrices.dense(2, 2, range(4)))], ["id", "x", "y"])
>>> r1 = MLUtils.convertMatrixColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseMatrix)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseMatrix)
True
>>> r2 = MLUtils.convertMatrixColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseMatrix)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseMatrix)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertMatrixColumnsToML", dataset, list(cols))
@staticmethod
@since("2.0.0")
def convertMatrixColumnsFromML(dataset, *cols):
"""
Converts matrix columns in an input DataFrame to the
:py:class:`pyspark.mllib.linalg.Matrix` type from the new
:py:class:`pyspark.ml.linalg.Matrix` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of matrix columns to be converted.
Old matrix columns will be ignored. If unspecified, all new
matrix columns will be converted except nested ones.
:return:
the input dataset with new matrix columns converted to the
old matrix type
>>> import pyspark
>>> from pyspark.ml.linalg import Matrices
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]),
... Matrices.dense(2, 2, range(4)))], ["id", "x", "y"])
>>> r1 = MLUtils.convertMatrixColumnsFromML(df).first()
>>> isinstance(r1.x, pyspark.mllib.linalg.SparseMatrix)
True
>>> isinstance(r1.y, pyspark.mllib.linalg.DenseMatrix)
True
>>> r2 = MLUtils.convertMatrixColumnsFromML(df, "x").first()
>>> isinstance(r2.x, pyspark.mllib.linalg.SparseMatrix)
True
>>> isinstance(r2.y, pyspark.ml.linalg.DenseMatrix)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertMatrixColumnsFromML", dataset, list(cols))
class Saveable(object):
"""
Mixin for models and transformers which may be saved as files.
.. versionadded:: 1.3.0
"""
def save(self, sc, path):
"""
Save this model to the given path.
This saves:
* human-readable (JSON) model metadata to path/metadata/
* Parquet formatted data to path/data/
The model may be loaded using :py:meth:`Loader.load`.
:param sc: Spark context used to save model data.
:param path: Path specifying the directory in which to save
this model. If the directory already exists,
this method throws an exception.
"""
raise NotImplementedError
@inherit_doc
class JavaSaveable(Saveable):
"""
Mixin for models that provide save() through their Scala
implementation.
.. versionadded:: 1.3.0
"""
@since("1.3.0")
def save(self, sc, path):
"""Save this model to the given path."""
if not isinstance(sc, SparkContext):
raise TypeError("sc should be a SparkContext, got type %s" % type(sc))
if not isinstance(path, str):
raise TypeError("path should be a string, got type %s" % type(path))
self._java_model.save(sc._jsc.sc(), path)
class Loader(object):
"""
Mixin for classes which can load saved models from files.
.. versionadded:: 1.3.0
"""
@classmethod
def load(cls, sc, path):
"""
Load a model from the given path. The model should have been
saved using :py:meth:`Saveable.save`.
:param sc: Spark context used for loading model files.
:param path: Path specifying the directory to which the model
was saved.
:return: model instance
"""
raise NotImplementedError
@inherit_doc
class JavaLoader(Loader):
"""
Mixin for classes which can load saved models using its Scala
implementation.
.. versionadded:: 1.3.0
"""
@classmethod
def _java_loader_class(cls):
"""
Returns the full class name of the Java loader. The default
implementation replaces "pyspark" by "org.apache.spark" in
the Python full class name.
"""
java_package = cls.__module__.replace("pyspark", "org.apache.spark")
return ".".join([java_package, cls.__name__])
@classmethod
def _load_java(cls, sc, path):
"""
Load a Java model from the given path.
"""
java_class = cls._java_loader_class()
java_obj = sc._jvm
for name in java_class.split("."):
java_obj = getattr(java_obj, name)
return java_obj.load(sc._jsc.sc(), path)
@classmethod
@since("1.3.0")
def load(cls, sc, path):
"""Load a model from the given path."""
java_model = cls._load_java(sc, path)
return cls(java_model)
class LinearDataGenerator(object):
"""Utils for generating linear data.
.. versionadded:: 1.5.0
"""
@staticmethod
@since("1.5.0")
def generateLinearInput(intercept, weights, xMean, xVariance,
nPoints, seed, eps):
"""
:param: intercept bias factor, the term c in X'w + c
:param: weights feature vector, the term w in X'w + c
:param: xMean Point around which the data X is centered.
:param: xVariance Variance of the given data
:param: nPoints Number of points to be generated
:param: seed Random Seed
:param: eps Used to scale the noise. If eps is set high,
the amount of gaussian noise added is more.
Returns a list of LabeledPoints of length nPoints
"""
weights = [float(weight) for weight in weights]
xMean = [float(mean) for mean in xMean]
xVariance = [float(var) for var in xVariance]
return list(callMLlibFunc(
"generateLinearInputWrapper", float(intercept), weights, xMean,
xVariance, int(nPoints), int(seed), float(eps)))
@staticmethod
@since("1.5.0")
def generateLinearRDD(sc, nexamples, nfeatures, eps,
nParts=2, intercept=0.0):
"""
Generate an RDD of LabeledPoints.
"""
return callMLlibFunc(
"generateLinearRDDWrapper", sc, int(nexamples), int(nfeatures),
float(eps), int(nParts), float(intercept))
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("mllib.util tests")\
.getOrCreate()
globs['spark'] = spark
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
dbtsai/spark
|
python/pyspark/mllib/util.py
|
Python
|
apache-2.0
| 19,536
|
[
"Gaussian"
] |
7f88a7dec45f42edd1af4d75d06b41c3c6c69f08d540385833ace2d5ab54be5e
|
from ase.atoms import string2symbols
abinitio_energies = {
'CO_gas': -626.611970497,
'H2_gas': -32.9625308725,
'CH4_gas': -231.60983421,
'H2O_gas': -496.411394229,
'CO_111': -115390.445596,
'C_111': -114926.212205,
'O_111': -115225.106527,
'H_111': -114779.038569,
'CH_111': -114943.455431,
'OH_111': -115241.861661,
'CH2_111': -114959.776961,
'CH3_111': -114976.7397,
'C-O_111': -115386.76440668429,
'H-OH_111': -115257.78796158083,
'H-C_111': -114942.25042955727,
'slab_111': -114762.254842,
}
ref_dict = {}
ref_dict['H'] = 0.5 * abinitio_energies['H2_gas']
ref_dict['O'] = abinitio_energies['H2O_gas'] - 2 * ref_dict['H']
ref_dict['C'] = abinitio_energies['CH4_gas'] - 4 * ref_dict['H']
ref_dict['111'] = abinitio_energies['slab_111']
def get_formation_energies(energy_dict, ref_dict):
formation_energies = {}
for key in energy_dict.keys(): # iterate through keys
E0 = energy_dict[key] # raw energy
name, site = key.split('_') # split key into name/site
if 'slab' not in name: # do not include empty site energy (0)
if site == '111':
E0 -= ref_dict[site] # subtract slab energy if adsorbed
# remove - from transition-states
formula = name.replace('-', '')
# get the composition as a list of atomic species
composition = string2symbols(formula)
# for each atomic species, subtract off the reference energy
for atom in composition:
E0 -= ref_dict[atom]
# round to 3 decimals since this is the accuracy of DFT
E0 = round(E0, 3)
formation_energies[key] = E0
return formation_energies
formation_energies = get_formation_energies(abinitio_energies, ref_dict)
for key in formation_energies:
print key, formation_energies[key]
frequency_dict = {
'CO_gas': [2170],
'H2_gas': [4401],
'CH4_gas': [2917, 1534, 1534, 3019, 3019, 3019, 1306,
1306, 1306],
'H2O_gas': [3657, 1595, 3756],
'CO_111': [60.8, 230.9, 256.0, 302.9, 469.9, 1747.3],
'C_111': [464.9, 490.0, 535.9],
'O_111': [359.5, 393.3, 507.0],
'H_111': [462.8, 715.9, 982.5],
'CH_111': [413.3, 437.5, 487.6, 709.6, 735.1, 3045.0],
'OH_111': [55, 340.9, 396.1, 670.3, 718.0, 3681.7],
'CH2_111': [55, 305.5, 381.3, 468.0, 663.4, 790.2, 1356.1,
2737.7, 3003.9],
'CH3_111': [55, 113.5, 167.4, 621.8, 686.0, 702.5, 1381.3,
1417.5, 1575.8, 3026.6, 3093.2, 3098.9],
'C-O_111': [],
'H-OH_111': [],
'H-C_111': []
}
def make_input_file(file_name, energy_dict, frequency_dict):
# create a header
header = '\t'.join(['surface_name', 'site_name',
'species_name', 'formation_energy',
'frequencies', 'reference'])
lines = [] # list of lines in the output
for key in energy_dict.keys(): # iterate through keys
E = energy_dict[key] # raw energy
name, site = key.split('_') # split key into name/site
if 'slab' not in name: # do not include empty site energy (0)
frequency = frequency_dict[key]
if site == 'gas':
surface = None
else:
surface = 'Rh'
outline = [surface, site, name, E, frequency, 'Input File Tutorial.']
line = '\t'.join([str(w) for w in outline])
lines.append(line)
lines.sort() # The file is easier to read if sorted (optional)
lines = [header] + lines # add header to top
input_file = '\n'.join(lines) # Join the lines with a line break
input = open(file_name, 'w') # open the file name in write mode
input.write(input_file) # write the text
input.close() # close the file
print 'Successfully created input file'
file_name = 'energies.txt'
make_input_file(file_name, formation_energies, frequency_dict)
# Test that input is parsed correctly
from catmap.model import ReactionModel
from catmap.parsers import TableParser
rxm = ReactionModel()
# The following lines are normally assigned by the setup_file
# and are thus not usually necessary.
rxm.surface_names = ['Rh']
rxm.adsorbate_names = ('CO', 'C', 'O', 'H', 'CH', 'OH', 'CH2', 'CH3')
rxm.transition_state_names = ('C-O', 'H-OH', 'H-C')
rxm.gas_names = ('CO_g', 'H2_g', 'CH4_g', 'H2O_g')
rxm.site_names = ('s',)
rxm.species_definitions = {'s': {'site_names': ['111']}}
# Now we initialize a parser instance (also normally done by setup_file)
parser = TableParser(rxm)
parser.input_file = file_name
parser.parse()
# All structured data is stored in species_definitions; thus we can
# check that the parsing was successful by ensuring that all the
# data in the input file was collected in this dictionary.
for key in rxm.species_definitions:
print key, rxm.species_definitions[key]
|
charlietsai/catmap
|
tutorials/1-generating_input_file/generate_input.py
|
Python
|
gpl-3.0
| 4,903
|
[
"ASE"
] |
aa576f17edb4210b32024be136dc356a782e1e7ae7a32a3c4aa9717ee6c6119c
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 08 16:12:29 2015
@author: winsemi
$Id: wflow_flood_lib.py $
$Date: 2016-04-07 12:05:38 +0200 (Thu, 7 Apr 2016) $
$Author: winsemi $
$Revision: $
$HeadURL: $
$Keywords: $
"""
import sys
import os
import configparser
import logging
import logging.handlers
import numpy as np
from osgeo import osr, gdal, gdalconst
import pcraster as pcr
import netCDF4 as nc
import cftime
def setlogger(logfilename, logReference, verbose=True):
"""
Set-up the logging system. Exit if this fails
"""
try:
# create logger
logger = logging.getLogger(logReference)
logger.setLevel(logging.DEBUG)
ch = logging.handlers.RotatingFileHandler(
logfilename, maxBytes=10 * 1024 * 1024, backupCount=5
)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(module)s - %(levelname)s - %(message)s"
)
# add formatter to ch
ch.setFormatter(formatter)
console.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.addHandler(console)
logger.debug("File logging to " + logfilename)
return logger, ch
except IOError:
print("ERROR: Failed to initialize logger with logfile: " + logfilename)
sys.exit(1)
def closeLogger(logger, ch):
logger.removeHandler(ch)
ch.flush()
ch.close()
return logger, ch
def close_with_error(logger, ch, msg):
logger.error(msg)
logger, ch = closeLogger(logger, ch)
del logger, ch
sys.exit(1)
def open_conf(fn):
config = configparser.ConfigParser()
config.optionxform = str
if os.path.exists(fn):
config.read(fn)
else:
print("Cannot open config file: " + fn)
sys.exit(1)
return config
def configget(config, section, var, default, datatype="str"):
"""
Gets a string from a config file (.ini) and returns a default value if
the key is not found. If the key is not found it also sets the value
with the default in the config-file
Input:
- config - python ConfigParser object
- section - section in the file
- var - variable (key) to get
- default - default value
- datatype='str' - can be set to 'boolean', 'int', 'float' or 'str'
Returns:
- value (str, boolean, float or int) - either the value from the config file or the default value
"""
Def = False
try:
if datatype == "int":
ret = config.getint(section, var)
elif datatype == "float":
ret = config.getfloat(section, var)
elif datatype == "boolean":
ret = config.getboolean(section, var)
else:
ret = config.get(section, var)
except:
Def = True
ret = default
# configset(config, section, var, str(default), overwrite=False)
default = Def
return ret
def get_gdal_extent(filename):
""" Return list of corner coordinates from a dataset"""
ds = gdal.Open(filename, gdal.GA_ReadOnly)
gt = ds.GetGeoTransform()
# 'top left x', 'w-e pixel resolution', '0', 'top left y', '0', 'n-s pixel resolution (negative value)'
nx, ny = ds.RasterXSize, ds.RasterYSize
xmin = np.float64(gt[0])
ymin = np.float64(gt[3]) + np.float64(ny) * np.float64(gt[5])
xmax = np.float64(gt[0]) + np.float64(nx) * np.float64(gt[1])
ymax = np.float64(gt[3])
ds = None
return xmin, ymin, xmax, ymax
def get_gdal_geotransform(filename):
""" Return geotransform of dataset"""
ds = gdal.Open(filename, gdal.GA_ReadOnly)
if ds is None:
logging.warning("Could not open {:s} Shutting down").format(filename)
sys.exit(1)
# Retrieve geoTransform info
gt = ds.GetGeoTransform()
ds = None
return gt
def get_gdal_axes(filename, logging=logging):
geotrans = get_gdal_geotransform(filename)
# Retrieve geoTransform info
originX = geotrans[0]
originY = geotrans[3]
resX = geotrans[1]
resY = geotrans[5]
ds = gdal.Open(filename, gdal.GA_ReadOnly)
cols = ds.RasterXSize
rows = ds.RasterYSize
x = np.linspace(originX + resX / 2, originX + resX / 2 + resX * (cols - 1), cols)
y = np.linspace(originY + resY / 2, originY + resY / 2 + resY * (rows - 1), rows)
ds = None
return x, y
def get_gdal_fill(filename, logging=logging):
ds = gdal.Open(filename, gdal.GA_ReadOnly)
if ds is None:
logging.warning("Could not open {:s} Shutting down").format(filename)
sys.exit(1)
# Retrieve geoTransform info
geotrans = get_gdal_geotransform(filename)
# Retrieve geoTransform info
originX = geotrans[0]
originY = geotrans[3]
resX = geotrans[1]
resY = geotrans[5]
ds = None
return fill_value
def get_gdal_projection(filename, logging=logging):
ds = gdal.Open(filename, gdal.GA_ReadOnly)
if ds is None:
logging.warning("Could not open {:s} Shutting down").format(filename)
sys.exit(1)
WktString = ds.GetProjection()
srs = osr.SpatialReference()
srs.ImportFromWkt(WktString)
ds = None
return srs
def get_gdal_rasterband(filename, band=1, logging=logging):
"""
:param filename: GDAL compatible raster file to read from
:param band: band number (default=1)
:param logging: logging object
:return: gdal dataset object, gdal rasterband object
"""
ds = gdal.Open(filename)
if ds is None:
logging.warning("Could not open {:s} Shutting down").format(filename)
sys.exit(1)
# Retrieve geoTransform info
return ds, ds.GetRasterBand(band) # there's only 1 band, starting from 1
def prepare_nc(
trg_file,
times,
x,
y,
metadata={},
logging=logging,
units="Days since 1900-01-01 00:00:00",
calendar="gregorian",
):
"""
This function prepares a NetCDF file with given metadata, for a certain year, daily basis data
The function assumes a gregorian calendar and a time unit 'Days since 1900-01-01 00:00:00'
"""
logger.info('Setting up "' + trg_file + '"')
times_list = cftime.date2num(times, units=units, calendar=calendar)
nc_trg = nc.Dataset(trg_file, "w")
logger.info("Setting up dimensions and attributes")
nc_trg.createDimension("time", 0) # NrOfDays*8
nc_trg.createDimension("lat", len(y))
nc_trg.createDimension("lon", len(x))
times_nc = nc_trg.createVariable("time", "f8", ("time",))
times_nc.units = units
times_nc.calendar = calendar
times_nc.standard_name = "time"
times_nc.long_name = "time"
times_nc[:] = times_list
y_var = nc_trg.createVariable("lat", "f4", ("lat",))
y_var.standard_name = "latitude"
y_var.long_name = "latitude"
y_var.units = "degrees_north"
x_var = nc_trg.createVariable("lon", "f4", ("lon",))
x_var.standard_name = "longitude"
x_var.long_name = "longitude"
x_var.units = "degrees_east"
y_var[:] = y
x_var[:] = x
projection = nc_trg.createVariable("projection", "c")
projection.long_name = "wgs84"
projection.EPSG_code = "EPSG:4326"
projection.proj4_params = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
projection.grid_mapping_name = "latitude_longitude"
# now add all attributes from user-defined metadata
for attr in metadata:
nc_trg.setncattr(attr, metadata[attr])
nc_trg.sync()
return nc_trg
def prepare_gdal(
filename,
x,
y,
format="GTiff",
logging=logging,
metadata={},
metadata_var={},
gdal_type=gdal.GDT_Float32,
zlib=True,
srs=None,
):
# prepare geotrans
xul = x[0] - (x[1] - x[0]) / 2
xres = x[1] - x[0]
yul = y[0] + (y[0] - y[1]) / 2
yres = y[1] - y[0]
geotrans = [xul, xres, 0, yul, 0, yres]
gdal.AllRegister()
driver = gdal.GetDriverByName("GTiff")
# Processing
logging.info(str("Preparing file {:s}").format(filename))
if zlib:
ds = driver.Create(filename, len(x), len(y), 1, gdal_type, ["COMPRESS=DEFLATE"])
else:
ds = driver.Create(filename, len(x), len(y), 1, gdal_type)
ds.SetGeoTransform(geotrans)
if srs:
ds.SetProjection(srs.ExportToWkt())
# get rasterband entry
band = ds.GetRasterBand(1)
band.SetNoDataValue(-9999.0)
ds.SetMetadata(metadata)
band.SetMetadata(metadata_var)
logging.info("Prepared {:s}".format(filename))
return ds, band
def write_tile_nc(var, data, x_start, y_start, flipud=False):
"""
:param var: netcdf variable to write into
:param data:
:param x_start: column to start writing
:param y_start: row to start writing (is flipped up-side-down)
:return: var
"""
# determine x end and y end
x_end = x_start + data.shape[1]
y_end = y_start + data.shape[0]
if flipud:
if y_start == 0:
var[-y_end:, x_start:x_end] = data
else:
var[-y_end:-y_start, x_start:x_end] = data
else:
var[y_start:y_end] = data
return var
def gdal_warp(
src_filename,
clone_filename,
dst_filename,
gdal_type=gdalconst.GDT_Float32,
gdal_interp=gdalconst.GRA_Bilinear,
format="GTiff",
ds_in=None,
override_src_proj=None,
):
"""
Equivalent of the gdalwarp executable, commonly used on command line.
The function prepares from a source file, a new file, that has the same
extent and projection as a clone file.
The clone file should contain the correct projection.
The same projection will then be produced for the target file.
If the clone does not have a projection, EPSG:4326 (i.e. WGS 1984 lat-lon)
will be assumed.
:param src_filename: string - file with data that will be warped
:param clone_filename: string - containing clone file (with projection information)
:param dst_filename: string - destination file (will have the same extent/projection as clone)
:param gdal_type: - data type to use for output file (default=gdalconst.GDT_Float32)
:param gdal_interp: - interpolation type used (default=gdalconst.GRA_Bilinear)
:param format: - GDAL data format to return (default='GTiff')
:return: No parameters returned, instead a file is prepared
"""
if ds_in is None:
src = gdal.Open(src_filename, gdalconst.GA_ReadOnly)
else:
src = ds_in
src_proj = src.GetProjection()
if override_src_proj is not None:
srs = osr.SpatialReference()
srs.ImportFromEPSG(override_src_proj)
src_proj = srs.ExportToWkt()
src_nodata = src.GetRasterBand(1).GetNoDataValue()
# replace nodata value temporarily for some other value
src.GetRasterBand(1).SetNoDataValue(np.nan)
# We want a section of source that matches this:
clone_ds = gdal.Open(clone_filename, gdalconst.GA_ReadOnly)
clone_proj = clone_ds.GetProjection()
if not clone_proj:
# assume a WGS 1984 projection
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
clone_proj = srs.ExportToWkt()
clone_geotrans = clone_ds.GetGeoTransform()
wide = clone_ds.RasterXSize
high = clone_ds.RasterYSize
# Output / destination
dst_mem = gdal.GetDriverByName("MEM").Create("", wide, high, 1, gdal_type)
dst_mem.SetGeoTransform(clone_geotrans)
dst_mem.SetProjection(clone_proj)
if not (src_nodata is None):
dst_mem.GetRasterBand(1).SetNoDataValue(src_nodata)
# Do the work, UUUUUUGGGGGHHHH: first make a nearest neighbour interpolation with the nodata values
# as actual values and determine which indexes have nodata values. This is needed because there is a bug in
# gdal.ReprojectImage, nodata values are not included and instead replaced by zeros! This is not ideal and if
# a better solution comes up, it should be replaced.
gdal.ReprojectImage(
src, dst_mem, src_proj, clone_proj, gdalconst.GRA_NearestNeighbour
)
data = dst_mem.GetRasterBand(1).ReadAsArray(0, 0)
idx = np.where(data == src_nodata)
# now remove the dataset
del data
# now do the real transformation and replace the values that are covered by NaNs by the missing value
if not (src_nodata is None):
src.GetRasterBand(1).SetNoDataValue(src_nodata)
gdal.ReprojectImage(src, dst_mem, src_proj, clone_proj, gdal_interp)
data = dst_mem.GetRasterBand(1).ReadAsArray(0, 0)
data[idx] = src_nodata
dst_mem.GetRasterBand(1).WriteArray(data, 0, 0)
if format == "MEM":
return dst_mem
else:
# retrieve numpy array of interpolated values
# write to final file in the chosen file format
gdal.GetDriverByName(format).CreateCopy(dst_filename, dst_mem, 0)
def derive_HAND(
dem, ldd, accuThreshold, rivers=None, basin=None, up_area=None, neg_HAND=None
):
"""
Function derives Height-Above-Nearest-Drain.
See http://www.sciencedirect.com/science/article/pii/S003442570800120X
Input:
dem -- pcraster object float32, elevation data
ldd -- pcraster object direction, local drain directions
accuThreshold -- upstream amount of cells as threshold for river
delineation
rivers=None -- you can provide a rivers layer here. Pixels that are
identified as river should have a value > 0, other
pixels a value of zero.
basin=None -- set a boolean pcraster map where areas with True are estimated using the nearest drain in ldd distance
and areas with False by means of the nearest friction distance. Friction distance estimated using the
upstream area as weight (i.e. drains with a bigger upstream area have a lower friction)
the spreadzone operator is used in this case.
up_area=None -- provide the upstream area (if not assigned a guesstimate is prepared, assuming the LDD covers a
full catchment area)
neg_HAND=None -- if set to 1, HAND maps can have negative values when elevation outside of stream is lower than
stream (for example when there are natural embankments)
Output:
hand -- pcraster bject float32, height, normalised to nearest stream
dist -- distance to nearest stream measured in cell lengths
according to D8 directions
"""
if rivers is None:
# prepare stream from a strahler threshold
stream = pcr.ifthenelse(
pcr.accuflux(ldd, 1) >= accuThreshold, pcr.boolean(1), pcr.boolean(0)
)
else:
# convert stream network to boolean
stream = pcr.boolean(pcr.cover(rivers, 0))
# determine height in river (in DEM*100 unit as ordinal)
height_river = pcr.ifthenelse(stream, pcr.ordinal(dem * 100), 0)
if basin is None:
up_elevation = pcr.scalar(pcr.subcatchment(ldd, height_river))
else:
# use basin to allocate areas outside basin to the nearest stream. Nearest is weighted by upstream area
if up_area is None:
up_area = pcr.accuflux(ldd, 1)
up_area = pcr.ifthen(stream, up_area) # mask areas outside streams
friction = 1.0 / pcr.scalar(
pcr.spreadzone(pcr.cover(pcr.ordinal(up_area), 0), 0, 0)
)
# if basin, use nearest river within subcatchment, if outside basin, use weighted-nearest river
up_elevation = pcr.ifthenelse(
basin,
pcr.scalar(pcr.subcatchment(ldd, height_river)),
pcr.scalar(pcr.spreadzone(height_river, 0, friction)),
)
# replace areas outside of basin by a spread zone calculation.
# make negative HANDS also possible
if neg_HAND == 1:
hand = (
pcr.scalar(pcr.ordinal(dem * 100)) - up_elevation
) / 100 # convert back to float in DEM units
else:
hand = (
pcr.max(pcr.scalar(pcr.ordinal(dem * 100)) - up_elevation, 0) / 100
) # convert back to float in DEM units
dist = pcr.ldddist(ldd, stream, 1) # compute horizontal distance estimate
return hand, dist
def subcatch_stream(
ldd,
threshold,
stream=None,
min_strahler=-999,
max_strahler=999,
assign_edge=False,
assign_existing=False,
up_area=None,
basin=None,
):
"""
Derive catchments based upon strahler threshold
Input:
ldd -- pcraster object direction, local drain directions
threshold -- integer, strahler threshold, subcatchments ge threshold
are derived
stream=None -- pcraster object ordinal, stream order map (made with pcr.streamorder), if provided, stream order
map is not generated on the fly but used from this map. Useful when a subdomain within a catchment is
provided, which would cause edge effects in the stream order map
min_strahler=-999 -- integer, minimum strahler threshold of river catchments
to return
max_strahler=999 -- integer, maximum strahler threshold of river catchments
to return
assign_unique=False -- if set to True, unassigned connected areas at
the edges of the domain are assigned a unique id as well. If set
to False, edges are not assigned
assign_existing=False == if set to True, unassigned edges are assigned
to existing basins with an upstream weighting. If set to False,
edges are assigned to unique IDs, or not assigned
output:
stream_ge -- pcraster object, streams of strahler order ge threshold
subcatch -- pcraster object, subcatchments of strahler order ge threshold
"""
# derive stream order
if stream is None:
stream = pcr.streamorder(ldd)
stream_ge = pcr.ifthen(stream >= threshold, stream)
stream_up_sum = pcr.ordinal(pcr.upstream(ldd, pcr.cover(pcr.scalar(stream_ge), 0)))
# detect any transfer of strahler order, to a higher strahler order.
transition_strahler = pcr.ifthenelse(
pcr.downstream(ldd, stream_ge) != stream_ge,
pcr.boolean(1),
pcr.ifthenelse(
pcr.nominal(ldd) == 5,
pcr.boolean(1),
pcr.ifthenelse(
pcr.downstream(ldd, pcr.scalar(stream_up_sum)) > pcr.scalar(stream_ge),
pcr.boolean(1),
pcr.boolean(0),
),
),
)
# make unique ids (write to file)
transition_unique = pcr.ordinal(pcr.uniqueid(transition_strahler))
# derive upstream catchment areas (write to file)
subcatch = pcr.nominal(pcr.subcatchment(ldd, transition_unique))
# mask out areas outside basin
if basin is not None:
subcatch = pcr.ifthen(basin, subcatch)
if assign_edge:
# fill unclassified areas (in pcraster equal to zero) with a unique id, above the maximum id assigned so far
unique_edge = pcr.clump(pcr.ifthen(subcatch == 0, pcr.ordinal(0)))
subcatch = pcr.ifthenelse(
subcatch == 0,
pcr.nominal(pcr.mapmaximum(pcr.scalar(subcatch)) + pcr.scalar(unique_edge)),
pcr.nominal(subcatch),
)
elif assign_existing:
# unaccounted areas are added to largest nearest draining basin
if up_area is None:
up_area = pcr.ifthen(
pcr.boolean(pcr.cover(stream_ge, 0)), pcr.accuflux(ldd, 1)
)
riverid = pcr.ifthen(pcr.boolean(pcr.cover(stream_ge, 0)), subcatch)
friction = 1.0 / pcr.scalar(
pcr.spreadzone(pcr.cover(pcr.ordinal(up_area), 0), 0, 0)
) # *(pcr.scalar(ldd)*0+1)
delta = pcr.ifthen(
pcr.scalar(ldd) >= 0,
pcr.ifthen(
pcr.cover(subcatch, 0) == 0,
pcr.spreadzone(pcr.cover(riverid, 0), 0, friction),
),
)
subcatch = pcr.ifthenelse(pcr.boolean(pcr.cover(subcatch, 0)), subcatch, delta)
# finally, only keep basins with minimum and maximum river order flowing through them
strahler_subcatch = pcr.areamaximum(stream, subcatch)
subcatch = pcr.ifthen(
pcr.ordinal(strahler_subcatch) >= min_strahler,
pcr.ifthen(pcr.ordinal(strahler_subcatch) <= max_strahler, subcatch),
)
return stream_ge, pcr.ordinal(subcatch)
def volume_spread(
ldd,
hand,
subcatch,
volume,
volume_thres=0.0,
cell_surface=1.0,
iterations=15,
logging=logging,
order=0,
neg_HAND=None,
):
"""
Estimate 2D flooding from a 1D simulation per subcatchment reach
Input:
ldd -- pcraster object direction, local drain directions
hand -- pcraster object float32, elevation data normalised to nearest drain
subcatch -- pcraster object ordinal, subcatchments with IDs
volume -- pcraster object float32, scalar flood volume (i.e. m3 volume outside the river bank within subcatchment)
volume_thres=0. -- scalar threshold, at least this amount of m3 of volume should be present in a catchment
area_multiplier=1. -- in case the maps are not in m2, set a multiplier other than 1. to convert
iterations=15 -- number of iterations to use
neg_HAND -- if set to 1, HAND maps can have negative values when elevation outside of stream is lower than
stream (for example when there are natural embankments)
Output:
inundation -- pcraster object float32, scalar inundation estimate
"""
# initial values
pcr.setglobaloption("unitcell")
dem_min = pcr.areaminimum(hand, subcatch) # minimum elevation in subcatchments
dem_norm = hand - dem_min
# surface of each subcatchment
surface = pcr.areaarea(subcatch) * pcr.areaaverage(
cell_surface, subcatch
) # area_multiplier
error_abs = pcr.scalar(1e10) # initial error (very high)
volume_catch = pcr.areatotal(volume, subcatch)
depth_catch = volume_catch / surface # meters water disc averaged over subcatchment
# ilt(depth_catch, 'depth_catch_{:02d}.map'.format(order))
# pcr.report(volume, 'volume_{:02d}.map'.format(order))
if neg_HAND == 1:
dem_max = pcr.ifthenelse(
volume_catch > volume_thres, pcr.scalar(32.0), pcr.scalar(-32.0)
) # bizarre high inundation depth☻
dem_min = pcr.scalar(-32.0)
else:
dem_max = pcr.ifthenelse(
volume_catch > volume_thres, pcr.scalar(32.0), pcr.scalar(0.0)
) # bizarre high inundation depth☻
dem_min = pcr.scalar(0.0)
for n in range(iterations):
logging.debug("Iteration: {:02d}".format(n + 1))
#####while np.logical_and(error_abs > error_thres, dem_min < dem_max):
dem_av = (dem_min + dem_max) / 2
# compute value at dem_av
average_depth_catch = pcr.areaaverage(pcr.max(dem_av - dem_norm, 0), subcatch)
error = pcr.cover(
(depth_catch - average_depth_catch) / depth_catch, depth_catch * 0
)
dem_min = pcr.ifthenelse(error > 0, dem_av, dem_min)
dem_max = pcr.ifthenelse(error <= 0, dem_av, dem_max)
inundation = pcr.max(dem_av - dem_norm, 0)
pcr.setglobaloption("unittrue")
return inundation
def gdal_writemap(
file_name,
file_format,
x,
y,
data,
fill_val,
zlib=False,
gdal_type=gdal.GDT_Float32,
resolution=None,
srs=None,
logging=logging,
):
""" Write geographical file from numpy array
Dependencies are osgeo.gdal and numpy
Input:
file_name: -- string: reference path to GDAL-compatible file
file_format: -- string: file format according to GDAL acronym
(see http://www.gdal.org/formats_list.html)
x: -- 1D np-array: x-axis, or (if only one value), top-left x-coordinate
y: -- 1D np-array: y-axis, or (if only one value), top-left y-coordinate
data: -- 2D np-array: raster data
fill_val: -- float: fill value
--------------------------------
optional inputs:
zlib=False: -- boolean: determines if output file should be internally
zipped or not
gdal_type=gdal.GDT_Float32: -- gdal data type to write
resolution=None: -- resolution of dataset, only needed if x and y are given as upperleft coordinates
srs=None: -- projection object (imported by osgeo.osr)
"""
# make the geotransform
# Give georeferences
if hasattr(x, "__len__"):
# x is the full axes
xul = x[0] - (x[1] - x[0]) / 2
xres = x[1] - x[0]
else:
# x is the top-left corner
xul = x
xres = resolution
if hasattr(y, "__len__"):
# y is the full axes
yul = y[0] + (y[0] - y[1]) / 2
yres = y[1] - y[0]
else:
# y is the top-left corner
yul = y
yres = -resolution
geotrans = [xul, xres, 0, yul, 0, yres]
gdal.AllRegister()
driver1 = gdal.GetDriverByName("GTiff")
driver2 = gdal.GetDriverByName(file_format)
# Processing
temp_file_name = str("{:s}.tif").format(file_name)
logging.info(str("Writing to temporary file {:s}").format(temp_file_name))
if zlib:
TempDataset = driver1.Create(
temp_file_name,
data.shape[1],
data.shape[0],
1,
gdal_type,
["COMPRESS=DEFLATE"],
)
else:
TempDataset = driver1.Create(
temp_file_name, data.shape[1], data.shape[0], 1, gdal_type
)
TempDataset.SetGeoTransform(geotrans)
if srs:
TempDataset.SetProjection(srs.ExportToWkt())
# get rasterband entry
TempBand = TempDataset.GetRasterBand(1)
# fill rasterband with array
TempBand.WriteArray(data, 0, 0)
TempBand.FlushCache()
TempBand.SetNoDataValue(fill_val)
# Create data to write to correct format (supported by 'CreateCopy')
logging.info(str("Writing to {:s}").format(file_name))
if zlib:
driver2.CreateCopy(file_name, TempDataset, 0, ["COMPRESS=DEFLATE"])
else:
driver2.CreateCopy(file_name, TempDataset, 0)
TempDataset = None
os.remove(temp_file_name)
def gdal_readmap(file_name, file_format, give_geotrans=False, logging=logging):
""" Read geographical file into memory
Dependencies are osgeo.gdal and numpy
Input:
file_name: -- string: reference path to GDAL-compatible file
file_format: -- string: file format according to GDAL acronym
(see http://www.gdal.org/formats_list.html)
give_geotrans (default=False): -- return the geotrans and amount of
cols/rows instead of x, y axis
Output (if give_geotrans=False):
x: -- 1D np-array: x-axis
y: -- 1D np-array: y-axis
data: -- 2D np-array: raster data
fill_val -- float: fill value
Output (if give_geotrans=True):
geotrans: -- 6-digit list with GDAL geotrans vector
size: -- 2-digit tuple with (cols, rows)
data: -- 2D np-array: raster data
fill_val -- float: fill value
"""
# Open file for binary-reading
mapFormat = gdal.GetDriverByName(file_format)
mapFormat.Register()
ds = gdal.Open(file_name)
if ds is None:
logging.warning("Could not open {:s} Shutting down").format(file_name)
sys.exit(1)
# Retrieve geoTransform info
geotrans = ds.GetGeoTransform()
originX = geotrans[0]
originY = geotrans[3]
resX = geotrans[1]
resY = geotrans[5]
cols = ds.RasterXSize
rows = ds.RasterYSize
x = np.linspace(originX + resX / 2, originX + resX / 2 + resX * (cols - 1), cols)
y = np.linspace(originY + resY / 2, originY + resY / 2 + resY * (rows - 1), rows)
# Retrieve raster
RasterBand = ds.GetRasterBand(1) # there's only 1 band, starting from 1
data = RasterBand.ReadAsArray(0, 0, cols, rows)
fill_val = RasterBand.GetNoDataValue()
RasterBand = None
ds = None
if give_geotrans == True:
return geotrans, (ds.RasterXSize, ds.RasterYSize), data, fill_val
else:
return x, y, data, fill_val
def define_max_strahler(stream_file, logging=logging):
xax, yax, stream_data, fill_value = gdal_readmap(
stream_file, "GTiff", logging=logging
)
return stream_data.max()
|
openstreams/wflow
|
Scripts/wflow_flood_lib.py
|
Python
|
gpl-3.0
| 28,442
|
[
"NetCDF"
] |
6f4137b4593071927c4404d577c3ba5936f11dcca4b297b08243e7e82942700f
|
#!/usr/bin/env python
"""
FCKeditor - The text editor for internet
Copyright (C) 2003-2005 Frederico Caldeira Knabben
Licensed under the terms of the GNU Lesser General Public License:
http://www.opensource.org/licenses/lgpl-license.php
For further information visit:
http://www.fckeditor.net/
"Support Open Source software. What about a donation today?"
File Name: sample01.py
Sample page.
File Authors:
Andrew Liu (andrew@liuholdings.com)
"""
import cgi
import os
# Ensure that the fckeditor.py is included in your classpath
import fckeditor
# Tell the browser to render html
print "Content-Type: text/html"
print ""
# Document header
print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>FCKeditor - Sample</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="robots" content="noindex, nofollow">
<link href="../sample.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h1>FCKeditor - Python - Sample 1</h1>
This sample displays a normal HTML form with an FCKeditor with full features
enabled.
<hr>
<form action="sampleposteddata.py" method="post" target="_blank">
"""
# This is the real work
try:
sBasePath = os.environ.get("SCRIPT_NAME")
sBasePath = sBasePath[0:sBasePath.find("_samples")]
oFCKeditor = fckeditor.FCKeditor('FCKeditor1')
oFCKeditor.BasePath = sBasePath
oFCKeditor.Value = """This is some <strong>sample text</strong>. You are using <a href="http://www.fckeditor.net/">FCKeditor</a>."""
print oFCKeditor.Create()
except Exception, e:
print e
print """
<br>
<input type="submit" value="Submit">
</form>
"""
# For testing your environments
print "<hr>"
for key in os.environ.keys():
print "%s: %s<br>" % (key, os.environ.get(key, ""))
print "<hr>"
# Document footer
print """
</body>
</html>
"""
|
dapfru/gladiators
|
parsek/cls/editor/_samples/py/sample01.py
|
Python
|
gpl-2.0
| 1,935
|
[
"VisIt"
] |
28ce32ecd606fbc671d0a8380b7e332469d48a93cf06dc33d0db496036a11101
|
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
import random
from hypothesis import given
from swh.model.hashutil import DEFAULT_ALGORITHMS
from swh.web.api import utils
from swh.web.common.origin_visits import get_origin_visits
from swh.web.common.utils import resolve_branch_alias, reverse
from swh.web.tests.strategies import (
content,
directory,
origin,
release,
revision,
snapshot,
)
url_map = [
{
"rule": "/other/<slug>",
"methods": set(["GET", "POST", "HEAD"]),
"endpoint": "foo",
},
{
"rule": "/some/old/url/<slug>",
"methods": set(["GET", "POST"]),
"endpoint": "blablafn",
},
{
"rule": "/other/old/url/<int:id>",
"methods": set(["GET", "HEAD"]),
"endpoint": "bar",
},
{"rule": "/other", "methods": set([]), "endpoint": None},
{"rule": "/other2", "methods": set([]), "endpoint": None},
]
def test_filter_field_keys_dict_unknown_keys():
actual_res = utils.filter_field_keys(
{"directory": 1, "file": 2, "link": 3}, {"directory1", "file2"}
)
assert actual_res == {}
def test_filter_field_keys_dict():
actual_res = utils.filter_field_keys(
{"directory": 1, "file": 2, "link": 3}, {"directory", "link"}
)
assert actual_res == {"directory": 1, "link": 3}
def test_filter_field_keys_list_unknown_keys():
actual_res = utils.filter_field_keys(
[{"directory": 1, "file": 2, "link": 3}, {"1": 1, "2": 2, "link": 3}], {"d"}
)
assert actual_res == [{}, {}]
def test_filter_field_keys_map():
actual_res = utils.filter_field_keys(
map(
lambda x: {"i": x["i"] + 1, "j": x["j"]},
[{"i": 1, "j": None}, {"i": 2, "j": None}, {"i": 3, "j": None}],
),
{"i"},
)
assert list(actual_res) == [{"i": 2}, {"i": 3}, {"i": 4}]
def test_filter_field_keys_list():
actual_res = utils.filter_field_keys(
[{"directory": 1, "file": 2, "link": 3}, {"dir": 1, "fil": 2, "lin": 3}],
{"directory", "dir"},
)
assert actual_res == [{"directory": 1}, {"dir": 1}]
def test_filter_field_keys_other():
input_set = {1, 2}
actual_res = utils.filter_field_keys(input_set, {"a", "1"})
assert actual_res == input_set
def test_person_to_string():
assert (
utils.person_to_string({"name": "raboof", "email": "foo@bar"})
== "raboof <foo@bar>"
)
def test_enrich_release_empty():
actual_release = utils.enrich_release({})
assert actual_release == {}
@given(release())
def test_enrich_release_content_target(api_request_factory, archive_data, release):
release_data = archive_data.release_get(release)
release_data["target_type"] = "content"
url = reverse("api-1-release", url_args={"sha1_git": release})
request = api_request_factory.get(url)
actual_release = utils.enrich_release(release_data, request)
release_data["target_url"] = reverse(
"api-1-content",
url_args={"q": f'sha1_git:{release_data["target"]}'},
request=request,
)
assert actual_release == release_data
@given(release())
def test_enrich_release_directory_target(api_request_factory, archive_data, release):
release_data = archive_data.release_get(release)
release_data["target_type"] = "directory"
url = reverse("api-1-release", url_args={"sha1_git": release})
request = api_request_factory.get(url)
actual_release = utils.enrich_release(release_data, request)
release_data["target_url"] = reverse(
"api-1-directory",
url_args={"sha1_git": release_data["target"]},
request=request,
)
assert actual_release == release_data
@given(release())
def test_enrich_release_revision_target(api_request_factory, archive_data, release):
release_data = archive_data.release_get(release)
release_data["target_type"] = "revision"
url = reverse("api-1-release", url_args={"sha1_git": release})
request = api_request_factory.get(url)
actual_release = utils.enrich_release(release_data, request)
release_data["target_url"] = reverse(
"api-1-revision", url_args={"sha1_git": release_data["target"]}, request=request
)
assert actual_release == release_data
@given(release())
def test_enrich_release_release_target(api_request_factory, archive_data, release):
release_data = archive_data.release_get(release)
release_data["target_type"] = "release"
url = reverse("api-1-release", url_args={"sha1_git": release})
request = api_request_factory.get(url)
actual_release = utils.enrich_release(release_data, request)
release_data["target_url"] = reverse(
"api-1-release", url_args={"sha1_git": release_data["target"]}, request=request
)
assert actual_release == release_data
def test_enrich_directory_entry_no_type():
assert utils.enrich_directory_entry({"id": "dir-id"}) == {"id": "dir-id"}
@given(directory())
def test_enrich_directory_entry_with_type(api_request_factory, archive_data, directory):
dir_content = archive_data.directory_ls(directory)
dir_entry = random.choice(dir_content)
url = reverse("api-1-directory", url_args={"sha1_git": directory})
request = api_request_factory.get(url)
actual_directory = utils.enrich_directory_entry(dir_entry, request)
if dir_entry["type"] == "file":
dir_entry["target_url"] = reverse(
"api-1-content",
url_args={"q": f'sha1_git:{dir_entry["target"]}'},
request=request,
)
elif dir_entry["type"] == "dir":
dir_entry["target_url"] = reverse(
"api-1-directory",
url_args={"sha1_git": dir_entry["target"]},
request=request,
)
elif dir_entry["type"] == "rev":
dir_entry["target_url"] = reverse(
"api-1-revision",
url_args={"sha1_git": dir_entry["target"]},
request=request,
)
assert actual_directory == dir_entry
def test_enrich_content_without_hashes():
assert utils.enrich_content({"id": "123"}) == {"id": "123"}
@given(content())
def test_enrich_content_with_hashes(api_request_factory, content):
for algo in DEFAULT_ALGORITHMS:
content_data = dict(content)
query_string = "%s:%s" % (algo, content_data[algo])
url = reverse("api-1-content", url_args={"q": query_string})
request = api_request_factory.get(url)
enriched_content = utils.enrich_content(
content_data, query_string=query_string, request=request
)
content_data["data_url"] = reverse(
"api-1-content-raw", url_args={"q": query_string}, request=request
)
content_data["filetype_url"] = reverse(
"api-1-content-filetype", url_args={"q": query_string}, request=request
)
content_data["language_url"] = reverse(
"api-1-content-language", url_args={"q": query_string}, request=request
)
content_data["license_url"] = reverse(
"api-1-content-license", url_args={"q": query_string}, request=request
)
assert enriched_content == content_data
@given(content())
def test_enrich_content_with_hashes_and_top_level_url(api_request_factory, content):
for algo in DEFAULT_ALGORITHMS:
content_data = dict(content)
query_string = "%s:%s" % (algo, content_data[algo])
url = reverse("api-1-content", url_args={"q": query_string})
request = api_request_factory.get(url)
enriched_content = utils.enrich_content(
content_data, query_string=query_string, top_url=True, request=request
)
content_data["content_url"] = reverse(
"api-1-content", url_args={"q": query_string}, request=request
)
content_data["data_url"] = reverse(
"api-1-content-raw", url_args={"q": query_string}, request=request
)
content_data["filetype_url"] = reverse(
"api-1-content-filetype", url_args={"q": query_string}, request=request
)
content_data["language_url"] = reverse(
"api-1-content-language", url_args={"q": query_string}, request=request
)
content_data["license_url"] = reverse(
"api-1-content-license", url_args={"q": query_string}, request=request
)
assert enriched_content == content_data
@given(revision())
def test_enrich_revision_without_children_or_parent(
api_request_factory, archive_data, revision
):
revision_data = archive_data.revision_get(revision)
del revision_data["parents"]
url = reverse("api-1-revision", url_args={"sha1_git": revision})
request = api_request_factory.get(url)
actual_revision = utils.enrich_revision(revision_data, request)
revision_data["url"] = reverse(
"api-1-revision", url_args={"sha1_git": revision}, request=request
)
revision_data["history_url"] = reverse(
"api-1-revision-log", url_args={"sha1_git": revision}, request=request
)
revision_data["directory_url"] = reverse(
"api-1-directory",
url_args={"sha1_git": revision_data["directory"]},
request=request,
)
assert actual_revision == revision_data
@given(revision(), revision(), revision())
def test_enrich_revision_with_children_and_parent_no_dir(
api_request_factory, archive_data, revision, parent_revision, child_revision
):
revision_data = archive_data.revision_get(revision)
del revision_data["directory"]
revision_data["parents"] = revision_data["parents"] + (parent_revision,)
revision_data["children"] = child_revision
url = reverse("api-1-revision", url_args={"sha1_git": revision})
request = api_request_factory.get(url)
actual_revision = utils.enrich_revision(revision_data, request)
revision_data["url"] = reverse(
"api-1-revision", url_args={"sha1_git": revision}, request=request
)
revision_data["history_url"] = reverse(
"api-1-revision-log", url_args={"sha1_git": revision}, request=request
)
revision_data["parents"] = tuple(
{
"id": p["id"],
"url": reverse(
"api-1-revision", url_args={"sha1_git": p["id"]}, request=request
),
}
for p in revision_data["parents"]
)
revision_data["children_urls"] = [
reverse(
"api-1-revision", url_args={"sha1_git": child_revision}, request=request
)
]
assert actual_revision == revision_data
@given(revision(), revision(), revision())
def test_enrich_revision_no_context(
api_request_factory, revision, parent_revision, child_revision
):
revision_data = {
"id": revision,
"parents": [parent_revision],
"children": [child_revision],
}
url = reverse("api-1-revision", url_args={"sha1_git": revision})
request = api_request_factory.get(url)
actual_revision = utils.enrich_revision(revision_data, request)
revision_data["url"] = reverse(
"api-1-revision", url_args={"sha1_git": revision}, request=request
)
revision_data["history_url"] = reverse(
"api-1-revision-log", url_args={"sha1_git": revision}, request=request
)
revision_data["parents"] = tuple(
{
"id": parent_revision,
"url": reverse(
"api-1-revision",
url_args={"sha1_git": parent_revision},
request=request,
),
}
)
revision_data["children_urls"] = [
reverse(
"api-1-revision", url_args={"sha1_git": child_revision}, request=request
)
]
assert actual_revision == revision_data
@given(revision(), revision(), revision())
def test_enrich_revision_with_no_message(
api_request_factory, archive_data, revision, parent_revision, child_revision
):
revision_data = archive_data.revision_get(revision)
revision_data["message"] = None
revision_data["parents"] = revision_data["parents"] + (parent_revision,)
revision_data["children"] = child_revision
url = reverse("api-1-revision", url_args={"sha1_git": revision})
request = api_request_factory.get(url)
actual_revision = utils.enrich_revision(revision_data, request)
revision_data["url"] = reverse(
"api-1-revision", url_args={"sha1_git": revision}, request=request
)
revision_data["directory_url"] = reverse(
"api-1-directory",
url_args={"sha1_git": revision_data["directory"]},
request=request,
)
revision_data["history_url"] = reverse(
"api-1-revision-log", url_args={"sha1_git": revision}, request=request
)
revision_data["parents"] = tuple(
{
"id": p["id"],
"url": reverse(
"api-1-revision", url_args={"sha1_git": p["id"]}, request=request
),
}
for p in revision_data["parents"]
)
revision_data["children_urls"] = [
reverse(
"api-1-revision", url_args={"sha1_git": child_revision}, request=request
)
]
assert actual_revision == revision_data
@given(revision(), revision(), revision())
def test_enrich_revision_with_invalid_message(
api_request_factory, archive_data, revision, parent_revision, child_revision
):
revision_data = archive_data.revision_get(revision)
revision_data["decoding_failures"] = ["message"]
revision_data["parents"] = revision_data["parents"] + (parent_revision,)
revision_data["children"] = child_revision
url = reverse("api-1-revision", url_args={"sha1_git": revision})
request = api_request_factory.get(url)
actual_revision = utils.enrich_revision(revision_data, request)
revision_data["url"] = reverse(
"api-1-revision", url_args={"sha1_git": revision}, request=request
)
revision_data["message_url"] = reverse(
"api-1-revision-raw-message", url_args={"sha1_git": revision}, request=request
)
revision_data["directory_url"] = reverse(
"api-1-directory",
url_args={"sha1_git": revision_data["directory"]},
request=request,
)
revision_data["history_url"] = reverse(
"api-1-revision-log", url_args={"sha1_git": revision}, request=request
)
revision_data["parents"] = tuple(
{
"id": p["id"],
"url": reverse(
"api-1-revision", url_args={"sha1_git": p["id"]}, request=request
),
}
for p in revision_data["parents"]
)
revision_data["children_urls"] = [
reverse(
"api-1-revision", url_args={"sha1_git": child_revision}, request=request
)
]
assert actual_revision == revision_data
@given(snapshot())
def test_enrich_snapshot(api_request_factory, archive_data, snapshot):
snapshot_data = archive_data.snapshot_get(snapshot)
url = reverse("api-1-snapshot", url_args={"snapshot_id": snapshot})
request = api_request_factory.get(url)
actual_snapshot = utils.enrich_snapshot(snapshot_data, request)
for _, b in snapshot_data["branches"].items():
if b["target_type"] in ("directory", "revision", "release"):
b["target_url"] = reverse(
f'api-1-{b["target_type"]}',
url_args={"sha1_git": b["target"]},
request=request,
)
elif b["target_type"] == "content":
b["target_url"] = reverse(
"api-1-content",
url_args={"q": f'sha1_git:{b["target"]}'},
request=request,
)
for _, b in snapshot_data["branches"].items():
if b["target_type"] == "alias":
target = resolve_branch_alias(snapshot_data, b)
b["target_url"] = target["target_url"]
assert actual_snapshot == snapshot_data
@given(origin())
def test_enrich_origin(api_request_factory, origin):
url = reverse("api-1-origin", url_args={"origin_url": origin["url"]})
request = api_request_factory.get(url)
origin_data = {"url": origin["url"]}
actual_origin = utils.enrich_origin(origin_data, request)
origin_data["origin_visits_url"] = reverse(
"api-1-origin-visits", url_args={"origin_url": origin["url"]}, request=request
)
assert actual_origin == origin_data
@given(origin())
def test_enrich_origin_search_result(api_request_factory, origin):
url = reverse("api-1-origin-search", url_args={"url_pattern": origin["url"]})
request = api_request_factory.get(url)
origin_visits_url = reverse(
"api-1-origin-visits", url_args={"origin_url": origin["url"]}, request=request
)
origin_search_result_data = (
[{"url": origin["url"]}],
None,
)
enriched_origin_search_result = (
[{"url": origin["url"], "origin_visits_url": origin_visits_url}],
None,
)
assert (
utils.enrich_origin_search_result(origin_search_result_data, request=request)
== enriched_origin_search_result
)
@given(origin())
def test_enrich_origin_visit(api_request_factory, origin):
origin_visit = random.choice(get_origin_visits(origin))
url = reverse(
"api-1-origin-visit",
url_args={"origin_url": origin["url"], "visit_id": origin_visit["visit"]},
)
request = api_request_factory.get(url)
actual_origin_visit = utils.enrich_origin_visit(
origin_visit,
with_origin_link=True,
with_origin_visit_link=True,
request=request,
)
origin_visit["origin_url"] = reverse(
"api-1-origin", url_args={"origin_url": origin["url"]}, request=request
)
origin_visit["origin_visit_url"] = reverse(
"api-1-origin-visit",
url_args={"origin_url": origin["url"], "visit_id": origin_visit["visit"]},
request=request,
)
origin_visit["snapshot_url"] = reverse(
"api-1-snapshot",
url_args={"snapshot_id": origin_visit["snapshot"]},
request=request,
)
assert actual_origin_visit == origin_visit
|
SoftwareHeritage/swh-web-ui
|
swh/web/tests/api/test_utils.py
|
Python
|
agpl-3.0
| 18,364
|
[
"VisIt"
] |
b069a07f6a04c4ed0c979a7394125c21383f00506ee82d1ca49bc12a48cf86b2
|
from __future__ import division, print_function, absolute_import
import math
import warnings
from collections import namedtuple
import numpy as np
from numpy import (isscalar, r_, log, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d,
sqrt, ceil, floor, array, poly1d, compress,
pi, exp, ravel, count_nonzero, sin, cos, arctan2, hypot)
from numpy.testing.decorators import setastest
from scipy._lib.six import string_types
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats, _contains_nan
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp'
]
Mean = namedtuple('Mean', ('statistic', 'minmax'))
Variance = namedtuple('Variance', ('statistic', 'minmax'))
Std_dev = namedtuple('Std_dev', ('statistic', 'minmax'))
def bayes_mvs(data, alpha=0.90):
r"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability ``alpha``.
See Also
--------
mvsdist
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
``alpha``.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
First a basic example to demonstrate the outputs:
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.bayes_mvs(data)
>>> mean
Mean(statistic=9.0, minmax=(7.1036502226125329, 10.896349777387467))
>>> var
Variance(statistic=10.0, minmax=(3.176724206..., 24.45910382...))
>>> std
Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.9456146050146295))
Now we generate some normally distributed random data, and get estimates of
mean and standard deviation with 95% confidence intervals for those
estimates:
>>> n_samples = 100000
>>> data = stats.norm.rvs(size=n_samples)
>>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.hist(data, bins=100, normed=True, label='Histogram of data')
>>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean')
>>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r',
... alpha=0.2, label=r'Estimated mean (95% limits)')
>>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale')
>>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2,
... label=r'Estimated scale (95% limits)')
>>> ax.legend(fontsize=10)
>>> ax.set_xlim([-4, 4])
>>> ax.set_ylim([0, 0.5])
>>> plt.show()
"""
m, v, s = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given."
% alpha)
m_res = Mean(m.mean(), m.interval(alpha))
v_res = Variance(v.mean(), v.interval(alpha))
s_res = Std_dev(s.mean(), s.interval(alpha))
return m_res, v_res, s_res
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data
vdist : "frozen" distribution object
Distribution object representing the variance of the data
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data
See Also
--------
bayes_mvs
Notes
-----
The return values from ``bayes_mvs(data)`` is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if n < 2:
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if n > 1000: # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)
else:
nm1 = n - 1
fac = n * C / 2.
val = nm1 / 2.
mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1))
sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac))
vdist = distributions.invgamma(val, scale=fac)
return mdist, vdist, sdist
def kstat(data, n=2):
r"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic k_n is the unique symmetric unbiased estimator of the
nth cumulant kappa_n.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
For a sample size n, the first few k-statistics are given by:
.. math::
k_{1} = \mu
k_{2} = \frac{n}{n-1} m_{2}
k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3}
k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)}
where :math:`\mu` is the sample mean, :math:`m_2` is the sample
variance, and :math:`m_i` is the i-th sample central moment.
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
Examples
--------
>>> from scipy import stats
>>> rndm = np.random.RandomState(1234)
As sample size increases, n-th moment and n-th k-statistic converge to the
same number (although they aren't identical). In the case of the normal
distribution, they converge to zero.
>>> for n in [2, 3, 4, 5, 6, 7]:
... x = rndm.normal(size=10**n)
... m, k = stats.moment(x, 3), stats.kstat(x, 3)
... print("%.3g %.3g %.3g" % (m, k, m-k))
-0.631 -0.651 0.0194
0.0282 0.0283 -8.49e-05
-0.0454 -0.0454 1.36e-05
7.53e-05 7.53e-05 -2.26e-09
0.00166 0.00166 -4.99e-09
-2.88e-06 -2.88e-06 8.63e-13
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = np.zeros(n + 1, np.float64)
data = ravel(data)
N = data.size
# raise ValueError on empty input
if N == 0:
raise ValueError("Data input must not be empty")
# on nan input, return nan without warning
if np.isnan(np.sum(data)):
return np.nan
for k in range(1, n + 1):
S[k] = np.sum(data**k, axis=0)
if n == 1:
return S[1] * 1.0/N
elif n == 2:
return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))
elif n == 4:
return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /
(N*(N-1.0)*(N-2.0)*(N-3.0)))
else:
raise ValueError("Should not be here.")
def kstatvar(data, n=2):
r"""
Returns an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat: Returns the n-th k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
The variances of the first few k-statistics are given by:
.. math::
var(k_{1}) = \frac{\kappa^2}{n}
var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1}
var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} +
\frac{9 \kappa^2_{3}}{n - 1} +
\frac{6 n \kappa^3_{2}}{(n-1) (n-2)}
var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} +
\frac{48 \kappa_{3} \kappa_5}{n - 1} +
\frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} +
\frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} +
\frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)}
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data, n=2) * 1.0/N
elif n == 2:
k2 = kstat(data, n=2)
k4 = kstat(data, n=4)
return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(n):
"""
Approximations of uniform order statistic medians.
Parameters
----------
n : int
Sample size.
Returns
-------
v : 1d float array
Approximations of the order statistic medians.
References
----------
.. [1] James J. Filliben, "The Probability Plot Correlation Coefficient
Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
Order statistics of the uniform distribution on the unit interval
are marginally distributed according to beta distributions.
The expectations of these order statistic are evenly spaced across
the interval, but the distributions are skewed in a way that
pushes the medians slightly towards the endpoints of the unit interval:
>>> n = 4
>>> k = np.arange(1, n+1)
>>> from scipy.stats import beta
>>> a = k
>>> b = n-k+1
>>> beta.mean(a, b)
array([ 0.2, 0.4, 0.6, 0.8])
>>> beta.median(a, b)
array([ 0.15910358, 0.38572757, 0.61427243, 0.84089642])
The Filliben approximation uses the exact medians of the smallest
and greatest order statistics, and the remaining medians are approximated
by points spread evenly across a sub-interval of the unit interval:
>>> from scipy.morestats import _calc_uniform_order_statistic_medians
>>> _calc_uniform_order_statistic_medians(n)
array([ 0.15910358, 0.38545246, 0.61454754, 0.84089642])
This plot shows the skewed distributions of the order statistics
of a sample of size four from a uniform distribution on the unit interval:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0.0, 1.0, num=50, endpoint=True)
>>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)]
>>> plt.figure()
>>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3])
"""
v = np.zeros(n, dtype=np.float64)
v[-1] = 0.5**(1.0 / n)
v[0] = 1 - v[-1]
i = np.arange(2, n)
v[1:-1] = (i - 0.3175) / (n + 0.365)
return v
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, string_types):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def _add_axis_labels_title(plot, xlabel, ylabel, title):
"""Helper function to add axes labels and a title to stats plots"""
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title(title)
plot.set_xlabel(xlabel)
plot.set_ylabel(ylabel)
else:
# matplotlib.pyplot module
plot.title(title)
plot.xlabel(xlabel)
plot.ylabel(ylabel)
except:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
def probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample//2,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
_perform_fit = fit or (plot is not None)
if x.size == 0:
if _perform_fit:
return (x, x), (np.nan, np.nan, 0.0)
else:
return x, x
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if _perform_fit:
# perform a linear least squares fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
_add_axis_labels_title(plot, xlabel='Theoretical quantiles',
ylabel='Ordered Values',
title='Probability Plot')
# Add R^2 value to the plot as text
if rvalue:
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):
"""
Calculate the shape parameter that maximizes the PPCC
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. ppcc_max returns the shape parameter that would maximize the
probability plot correlation coefficient for the given data to a
one-parameter family of distributions.
Parameters
----------
x : array_like
Input array.
brack : tuple, optional
Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c)
then they are assumed to be a starting interval for a downhill bracket
search (see `scipy.optimize.brent`).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
Returns
-------
shape_value : float
The shape parameter at which the probability plot correlation
coefficient reaches its max value.
See also
--------
ppcc_plot, probplot, boxcox
Notes
-----
The brack keyword serves as a starting point which is useful in corner
cases. One can use a plot to obtain a rough visual estimate of the location
for the maximum to start the search near it.
References
----------
.. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
.. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
... random_state=1234567) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(8, 6))
>>> ax = fig.add_subplot(111)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax)
We calculate the value where the shape should reach its maximum and a red
line is drawn there. The line should coincide with the highest point in the
ppcc_plot.
>>> max = stats.ppcc_max(x)
>>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):
"""
Calculate and optionally plot probability plot correlation coefficient.
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. It cannot be used for distributions without shape parameters
(like the normal distribution) or with multiple shape parameters.
By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A
Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed
distributions via an approximately normal one, and is therefore particularly
useful in practice.
Parameters
----------
x : array_like
Input array.
a, b: scalar
Lower and upper bounds of the shape parameter to use.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
plot : object, optional
If given, plots PPCC against the shape parameter.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`a` to `b`).
Returns
-------
svals : ndarray
The shape values for which `ppcc` was calculated.
ppcc : ndarray
The calculated probability plot correlation coefficient values.
See also
--------
ppcc_max, probplot, boxcox_normplot, tukeylambda
References
----------
J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234567)
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> fig = plt.figure(figsize=(12, 4))
>>> ax1 = fig.add_subplot(131)
>>> ax2 = fig.add_subplot(132)
>>> ax3 = fig.add_subplot(133)
>>> res = stats.probplot(x, plot=ax1)
>>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax3)
>>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
if b <= a:
raise ValueError("`b` has to be larger than `a`.")
svals = np.linspace(a, b, num=N)
ppcc = np.empty_like(svals)
for k, sval in enumerate(svals):
_, r2 = probplot(x, sval, dist=dist, fit=True)
ppcc[k] = r2[-1]
if plot is not None:
plot.plot(svals, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='Shape Values',
ylabel='Prob Plot Corr. Coef.',
title='(%s) PPCC Plot' % dist)
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title('$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
y = boxcox(data, lmb)
y_mean = np.mean(y, axis=0)
llf = (lmb - 1) * np.sum(np.log(data), axis=0)
llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0))
return llf
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and ``alpha`` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given ``alpha``.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when ``alpha`` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the correlation coefficient of transformed x
z = boxcox(x, lmbda=val)
_, r2 = probplot(z, dist='norm', fit=True)
ppcc[i] = r2[-1]
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='$\lambda$',
ylabel='Prob Plot Corr. Coef.',
title='Box-Cox Normality Plot')
return lmbdas, ppcc
def shapiro(x, a=None, reta=False):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
a : array_like, optional
Array of internal parameters used in the calculation. If these
are not given, they will be computed internally. If x has length
n, then a must have length n/2.
reta : bool, optional
Whether or not to return the internally computed a values. The
default is False.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
a : array_like, optional
If `reta` is True, then these are the internally computed "a"
values that may be passed into this function on future calls.
See Also
--------
anderson : The Anderson-Darling test for normality
kstest : The Kolmogorov-Smirnov test for goodness of fit.
Notes
-----
The algorithm used is described in [4]_ but censoring parameters as
described are not implemented. For N > 5000 the W test statistic is accurate
but the p-value may not be.
The chance of rejecting the null hypothesis when it is true is close to 5%
regardless of sample size.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for
normality (complete samples), Biometrika, Vol. 52, pp. 591-611.
.. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk,
Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of
Statistical Modeling and Analytics, Vol. 2, pp. 21-33.
.. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = stats.norm.rvs(loc=5, scale=3, size=100)
>>> stats.shapiro(x)
(0.9772805571556091, 0.08144091814756393)
"""
if a is not None or reta:
warnings.warn("input parameters 'a' and 'reta' are scheduled to be "
"removed in version 0.18.0", FutureWarning)
x = np.ravel(x)
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
if a is None:
a = zeros(N, 'f')
init = 0
else:
if len(a) != N // 2:
raise ValueError("len(a) must equal len(x)/2")
init = 1
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0, 2]:
warnings.warn("Input data for shapiro has range zero. The results "
"may not be accurate.")
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
if reta:
return w, pw, a
else:
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of he American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
AndersonResult = namedtuple('AndersonResult', ('statistic',
'critical_values',
'significance_level'))
def anderson(x, dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling test is a modification of the Kolmogorov-
Smirnov test `kstest` for the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
array of sample data
dist : {'norm','expon','logistic','gumbel','gumbel_l', gumbel_r',
'extreme1'}, optional
the type of distribution to test against. The default is 'norm'
and 'extreme1', 'gumbel_l' and 'gumbel' are synonyms.
Returns
-------
statistic : float
The Anderson-Darling test statistic
critical_values : list
The critical values for this distribution
significance_level : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If A2 is larger than these critical values then for the corresponding
significance level, the null hypothesis that the data come from the
chosen distribution can be rejected.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm', 'expon', 'gumbel', 'gumbel_l',
'gumbel_r', 'extreme1', 'logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y - xbar) / s
logcdf = distributions.norm.logcdf(w)
logsf = distributions.norm.logsf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3)
elif dist == 'expon':
w = y / xbar
logcdf = distributions.expon.logcdf(w)
logsf = distributions.expon.logsf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_expon / (1.0 + 0.6/N), 3)
elif dist == 'logistic':
def rootfunc(ab, xj, N):
a, b = ab
tmp = (xj - a) / b
tmp2 = exp(tmp)
val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N,
np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N]
return array(val)
sol0 = array([xbar, np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5)
w = (y - sol[0]) / sol[1]
logcdf = distributions.logistic.logcdf(w)
logsf = distributions.logistic.logsf(w)
sig = array([25, 10, 5, 2.5, 1, 0.5])
critical = around(_Avals_logistic / (1.0 + 0.25/N), 3)
elif dist == 'gumbel_r':
xbar, s = distributions.gumbel_r.fit(x)
w = (y - xbar) / s
logcdf = distributions.gumbel_r.logcdf(w)
logsf = distributions.gumbel_r.logsf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
else: # (dist == 'gumbel') or (dist == 'gumbel_l') or (dist == 'extreme1')
xbar, s = distributions.gumbel_l.fit(x)
w = (y - xbar) / s
logcdf = distributions.gumbel_l.logcdf(w)
logsf = distributions.gumbel_l.logsf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
i = arange(1, N + 1)
A2 = -N - np.sum((2*i - 1.0) / N * (logcdf + logsf[::-1]), axis=0)
return AndersonResult(A2, critical, sig)
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
Anderson_ksampResult = namedtuple('Anderson_ksampResult',
('statistic', 'critical_values',
'significance_level'))
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
statistic : float
Normalized k-sample Anderson-Darling test statistic.
critical_values : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
significance_level : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.1%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752]),
0.03134990135800783)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The approximate p-value (87%) has to be
computed by extrapolation and may not be very accurate:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]),
0.8789283903979661)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
H = (1. / n).sum()
hs_cs = (1. / arange(N - 1, 1, -1)).cumsum()
h = hs_cs[-1] + 1
g = (hs_cs / arange(2, N)).sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396])
critical = b0 + b1 / math.sqrt(m) + b2 / m
pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2)
if A2 < critical.min() or A2 > critical.max():
warnings.warn("approximate p-value will be computed by extrapolation")
p = math.exp(np.polyval(pf, A2))
return Anderson_ksampResult(A2, critical, p)
AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue'))
def ansari(x, y):
"""
Perform the Ansari-Bradley test for equal scale parameters
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
arrays of sample data
Returns
-------
statistic : float
The Ansari-Bradley test statistic
pvalue : float
The p-value of the hypothesis test
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x, y = asarray(x), asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m + n
xy = r_[x, y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank, N - rank + 1)), 0)
AB = np.sum(symrank[:n], axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and (m < 55 or n < 55):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n, m)
ind = AB - astart
total = np.sum(a1, axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if ind == cind:
pval = 2.0 * np.sum(a1[:cind+1], axis=0) / total
else:
pval = 2.0 * np.sum(a1[:cind], axis=0) / total
else:
find = int(floor(ind))
if ind == floor(ind):
pval = 2.0 * np.sum(a1[find:], axis=0) / total
else:
pval = 2.0 * np.sum(a1[find+1:], axis=0) / total
return AnsariResult(AB, min(1.0, pval))
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n * (N+1.0)**2 / 4.0 / N
varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2)
else:
mnAB = n * (N+2.0) / 4.0
varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0)
if repeats: # adjust variance estimates
# compute np.sum(tj * rj**2,axis=0)
fac = np.sum(symrank**2, axis=0)
if N % 2: # N odd
varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1))
else: # N even
varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1))
z = (AB - mnAB) / sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AnsariResult(AB, pval)
BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue'))
def bartlett(*args):
"""
Perform Bartlett's test for equal variances
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. May be different lengths.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value of the test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
levene : A robust parametric test for equality of k variances
Notes
-----
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical
Tests. Proceedings of the Royal Society of London. Series A,
Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return BartlettResult(np.nan, np.nan)
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k, 'd')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = np.sum(Ni, axis=0)
spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k))
numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0)
denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) -
1.0/(Ntot - k))
T = numer / denom
pval = distributions.chi2.sf(T, k - 1) # 1 - cdf
return BartlettResult(T, pval)
LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue'))
def levene(*args, **kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
Yci = zeros(k, 'd')
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
" or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [None] * k
for i in range(k):
Zij[i] = abs(asarray(args[i]) - Yci[i])
# compute Zbari
Zbari = zeros(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i] * Ni[i]
Zbar /= Ntot
numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0)
denom = (k - 1.0) * dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return LeveneResult(W, pval)
@setastest(False)
def binom_test(x, n=None, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : integer or array_like
the number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : integer
the number of trials. This is ignored if x gives both the
number of successes and failures
p : float, optional
The hypothesized probability of success. 0 <= p <= 1. The
default value is p = 0.5
alternative : {'two-sided', 'greater', 'less'}, optional
Indicates the alternative hypothesis. The default value is
'two-sided'.
Returns
-------
p-value : float
The p-value of the hypothesis test
References
----------
.. [1] http://en.wikipedia.org/wiki/Binomial_test
"""
x = atleast_1d(x).astype(np.integer)
if len(x) == 2:
n = x[1] + x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = distributions.binom.cdf(x, n, p)
return pval
if alternative == 'greater':
pval = distributions.binom.sf(x-1, n, p)
return pval
# if alternative was neither 'less' nor 'greater', then it's 'two-sided'
d = distributions.binom.pmf(x, n, p)
rerr = 1 + 1e-7
if x == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif x < p * n:
i = np.arange(np.ceil(p * n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n - y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = []
for k in range(len(g) - 1):
output.append(func(x[g[k]:g[k+1]]))
return asarray(output)
FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue'))
def fligner(*args, **kwds):
"""
Perform Fligner-Killeen test for equality of variance.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner-Killeen's test is
distribution free when populations are identical [2]_.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the hypothesis test.
See Also
--------
bartlett : A parametric test for equality of k variances in normal samples
levene : A robust parametric test for equality of k variances
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A
comparative study of tests for homogeneity of variances, with
applications to the outer continental shelf biding data.
Technometrics, 23(4), 351-361.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return FlignerResult(np.nan, np.nan)
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
" or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, np.sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return FlignerResult(Xsq, pval)
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis : int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(1234)
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.7178125 , -5.25342163]), array([ 1.07904114e-08, 1.49299218e-07]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue'))
def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
The first set of measurements.
y : array_like, optional
The second set of measurements. If `y` is not given, then the `x`
array is considered to be the differences between the two sets of
measurements.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt":
Pratt treatment: includes zero-differences in the ranking process
(more conservative)
"wilcox":
Wilcox treatment: discards all zero-differences
"zsplit":
Zero rank split: just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
Returns
-------
statistic : float
The sum of the ranks of the differences above or below zero, whichever
is smaller.
pvalue : float
The two-sided p-value for the test.
Notes
-----
Because the normal approximation is used for the calculations, the
samples used should be large. A typical rule is to require that
n > 20.
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' "
"or 'pratt' or 'zsplit'")
if y is None:
d = asarray(x)
else:
x, y = map(asarray, (x, y))
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxon. Aborting.')
d = x - y
if zero_method == "wilcox":
# Keep all non-zero differences
d = compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
if count < 10:
warnings.warn("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = np.sum((d > 0) * r, axis=0)
r_minus = np.sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = np.sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
T = min(r_plus, r_minus)
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
correction = 0.5 * int(bool(correction)) * np.sign(T - mn)
z = (T - mn - correction) / se
prob = 2. * distributions.norm.sf(abs(z))
return WilcoxonResult(T, prob)
@setastest(False)
def median_test(*args, **kwds):
"""
Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table. If ``nan_policy`` is "propagate" and there
are nans in the input, the return value for ``table`` is ``None``.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> from scipy.stats import median_test
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
ties = kwds.pop('ties', 'below')
correction = kwds.pop('correction', True)
lambda_ = kwds.pop('lambda_', None)
nan_policy = kwds.pop('nan_policy', 'propagate')
if len(kwds) > 0:
bad_kwd = kwds.keys()[0]
raise TypeError("median_test() got an unexpected keyword "
"argument %r" % bad_kwd)
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
cdata = np.concatenate(data)
contains_nan, nan_policy = _contains_nan(cdata, nan_policy)
if contains_nan and nan_policy == 'propagate':
return np.nan, np.nan, np.nan, None
if contains_nan:
grand_median = np.median(cdata[~np.isnan(cdata)])
else:
grand_median = np.median(cdata)
# When the minimum version of numpy supported by scipy is 1.9.0,
# the above if/else statement can be replaced by the single line:
# grand_median = np.nanmedian(cdata)
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
sample = sample[~np.isnan(sample)]
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.where((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None] * N
plist[0] = poly1d(1)
for n in range(1, N):
plist[n] = plist[n-1].deriv() - poly1d([1, 0]) * plist[n-1]
return plist
# Note: when removing pdf_fromgamma, also remove the _hermnorm support function
@np.deprecate(message="scipy.stats.pdf_fromgamma is deprecated in scipy 0.16.0 "
"in favour of statsmodels.distributions.ExpandedNormal.")
def pdf_fromgamma(g1, g2, g3=0.0, g4=None):
if g4 is None:
g4 = 3 * g2**2
sigsq = 1.0 / g2
sig = sqrt(sigsq)
mu = g1 * sig**3.0
p12 = _hermnorm(13)
for k in range(13):
p12[k] /= sig**k
# Add all of the terms to polynomial
totp = (p12[0] - g1/6.0*p12[3] +
g2/24.0*p12[4] + g1**2/72.0 * p12[6] -
g3/120.0*p12[5] - g1*g2/144.0*p12[7] - g1**3.0/1296.0*p12[9] +
g4/720*p12[6] + (g2**2/1152.0 + g1*g3/720)*p12[8] +
g1**2 * g2/1728.0*p12[10] + g1**4.0 / 31104.0*p12[12])
# Final normalization
totp = totp / sqrt(2*pi) / sig
def thefunc(x):
xn = (x - mu) / sig
return totp(xn) * exp(-xn**2 / 2.)
return thefunc
def _circfuncs_common(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
ang = (samples - low)*2*pi / (high - low)
return samples, ang
def circmean(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
Returns
-------
circmean : float
Circular mean.
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).sum(axis=axis)
C = cos(ang).sum(axis=axis)
res = arctan2(S, C)
mask = res < 0
if mask.ndim > 0:
res[mask] += 2*pi
elif mask:
res += 2*pi
return res*(high - low)/2.0/pi + low
def circvar(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular variance for samples assumed to be in a range
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).mean(axis=axis)
C = cos(ang).mean(axis=axis)
R = hypot(S, C)
return ((high - low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).mean(axis=axis)
C = cos(ang).mean(axis=axis)
R = hypot(S, C)
return ((high - low)/2.0/pi) * sqrt(-2*log(R))
|
bkendzior/scipy
|
scipy/stats/morestats.py
|
Python
|
bsd-3-clause
| 95,258
|
[
"Gaussian"
] |
7c0d0dd04489459b91a1f3f75abb7a420a7299820a15fca8b905bfb7d276be1e
|
# Orca
#
# Copyright 2018-2019 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom braille generator for Chromium."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2018-2019 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
from orca import debug
from orca import orca_state
from orca.scripts import web
class BrailleGenerator(web.BrailleGenerator):
def __init__(self, script):
super().__init__(script)
def _generateLabelOrName(self, obj, **args):
if obj.getRole() == pyatspi.ROLE_FRAME:
document = self._script.utilities.activeDocument(obj)
if document and not self._script.utilities.documentFrameURI(document):
# Eliminates including "untitled" in the frame name.
return super()._generateLabelOrName(obj.parent)
return super()._generateLabelOrName(obj)
def generateBraille(self, obj, **args):
if self._script.utilities.inDocumentContent(obj):
return super().generateBraille(obj, **args)
oldRole = None
if self._script.utilities.treatAsMenu(obj):
msg = "CHROMIUM: HACK? Displaying menu item as menu %s" % obj
debug.println(debug.LEVEL_INFO, msg, True)
oldRole = self._overrideRole(pyatspi.ROLE_MENU, args)
result = super().generateBraille(obj, **args)
if oldRole is not None:
self._restoreRole(oldRole, args)
return result
|
GNOME/orca
|
src/orca/scripts/toolkits/Chromium/braille_generator.py
|
Python
|
lgpl-2.1
| 2,248
|
[
"ORCA"
] |
63bb6e434f6d35e61267f51ddf539451061f72f013571a8d478e958465ff87ed
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
""" sextractor.py:
Classes to read SExtractor table format
Built on daophot.py:
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu)
"""
import re
from . import core
class SExtractorHeader(core.BaseHeader):
"""Read the header from a file produced by SExtractor."""
comment = r'^\s*#\s*\S\D.*' # Find lines that don't have "# digit"
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines`` for a SExtractor
header. The SExtractor header is specialized so that we just copy the entire BaseHeader
get_cols routine and modify as needed.
Parameters
----------
lines : list
List of table lines
"""
# This assumes that the columns are listed in order, one per line with a
# header comment string of the format: "# 1 ID short description [unit]"
# However, some may be missing and must be inferred from skipped column numbers
columns = {}
# E.g. '# 1 ID identification number' (no units) or '# 2 MAGERR magnitude of error [mag]'
# Updated along with issue #4603, for more robust parsing of unit
re_name_def = re.compile(r"""^\s* \# \s* # possible whitespace around #
(?P<colnumber> [0-9]+)\s+ # number of the column in table
(?P<colname> [-\w]+) # name of the column
# column description, match any character until...
(?:\s+(?P<coldescr> \w .+)
# ...until [non-space][space][unit] or [not-right-bracket][end]
(?:(?<!(\]))$|(?=(?:(?<=\S)\s+\[.+\]))))?
(?:\s*\[(?P<colunit>.+)\])?.* # match units in brackets
""", re.VERBOSE)
dataline = None
for line in lines:
if not line.startswith('#'):
dataline = line # save for later to infer the actual number of columns
break # End of header lines
else:
match = re_name_def.search(line)
if match:
colnumber = int(match.group('colnumber'))
colname = match.group('colname')
coldescr = match.group('coldescr')
colunit = match.group('colunit') # If no units are given, colunit = None
columns[colnumber] = (colname, coldescr, colunit)
# Handle skipped column numbers
colnumbers = sorted(columns)
# Handle the case where the last column is array-like by append a pseudo column
# If there are more data columns than the largest column number
# then add a pseudo-column that will be dropped later. This allows
# the array column logic below to work in all cases.
if dataline is not None:
n_data_cols = len(dataline.split())
else:
# handles no data, where we have to rely on the last column number
n_data_cols = colnumbers[-1]
# sextractor column number start at 1.
columns[n_data_cols + 1] = (None, None, None)
colnumbers.append(n_data_cols + 1)
if len(columns) > 1: # only fill in skipped columns when there is genuine column initially
previous_column = 0
for n in colnumbers:
if n != previous_column + 1:
for c in range(previous_column + 1, n):
column_name = (columns[previous_column][0]
+ f"_{c - previous_column}")
column_descr = columns[previous_column][1]
column_unit = columns[previous_column][2]
columns[c] = (column_name, column_descr, column_unit)
previous_column = n
# Add the columns in order to self.names
colnumbers = sorted(columns)[:-1] # drop the pseudo column
self.names = []
for n in colnumbers:
self.names.append(columns[n][0])
if not self.names:
raise core.InconsistentTableError('No column names found in SExtractor header')
self.cols = []
for n in colnumbers:
col = core.Column(name=columns[n][0])
col.description = columns[n][1]
col.unit = columns[n][2]
self.cols.append(col)
class SExtractorData(core.BaseData):
start_line = 0
delimiter = ' '
comment = r'\s*#'
class SExtractor(core.BaseReader):
"""SExtractor format table.
SExtractor is a package for faint-galaxy photometry (Bertin & Arnouts
1996, A&A Supp. 317, 393.)
See: http://www.astromatic.net/software/sextractor
Example::
# 1 NUMBER
# 2 ALPHA_J2000
# 3 DELTA_J2000
# 4 FLUX_RADIUS
# 7 MAG_AUTO [mag]
# 8 X2_IMAGE Variance along x [pixel**2]
# 9 X_MAMA Barycenter position along MAMA x axis [m**(-6)]
# 10 MU_MAX Peak surface brightness above background [mag * arcsec**(-2)]
1 32.23222 10.1211 0.8 1.2 1.4 18.1 1000.0 0.00304 -3.498
2 38.12321 -88.1321 2.2 2.4 3.1 17.0 1500.0 0.00908 1.401
Note the skipped numbers since flux_radius has 3 columns. The three
FLUX_RADIUS columns will be named FLUX_RADIUS, FLUX_RADIUS_1, FLUX_RADIUS_2
Also note that a post-ID description (e.g. "Variance along x") is optional
and that units may be specified at the end of a line in brackets.
"""
_format_name = 'sextractor'
_io_registry_can_write = False
_description = 'SExtractor format table'
header_class = SExtractorHeader
data_class = SExtractorData
inputter_class = core.ContinuationLinesInputter
def read(self, table):
"""
Read input data (file-like object, filename, list of strings, or
single string) into a Table and return the result.
"""
out = super().read(table)
# remove the comments
if 'comments' in out.meta:
del out.meta['comments']
return out
def write(self, table):
raise NotImplementedError
|
StuartLittlefair/astropy
|
astropy/io/ascii/sextractor.py
|
Python
|
bsd-3-clause
| 6,346
|
[
"Galaxy"
] |
033d6fd3b1ed191d2adcaa9d39833c9e4a47cf3c04b119a70359534084a969f4
|
from vtk import vtkRenderer, vtkConeSource, vtkPolyDataMapper, vtkActor, \
vtkImplicitPlaneWidget2, vtkImplicitPlaneRepresentation, \
vtkObject, vtkPNGReader, vtkImageActor, QVTKWidget2, \
vtkRenderWindow, vtkOrientationMarkerWidget, vtkAxesActor, \
vtkTransform, vtkPolyData, vtkPoints, vtkCellArray, \
vtkTubeFilter, vtkQImageToImageSource, vtkImageImport, \
vtkDiscreteMarchingCubes, vtkWindowedSincPolyDataFilter, \
vtkMaskFields, vtkGeometryFilter, vtkThreshold, vtkDataObject, \
vtkDataSetAttributes, vtkCutter, vtkPlane, vtkPropAssembly, \
vtkGenericOpenGLRenderWindow, QVTKWidget, vtkOBJExporter
from PyQt4.QtGui import QWidget, QVBoxLayout, QHBoxLayout, QPushButton, \
QSizePolicy, QSpacerItem, QIcon, QFileDialog
from PyQt4.QtCore import SIGNAL
import qimage2ndarray
from numpy2vtk import toVtkImageData
from GenerateModelsFromLabels_thread import *
import platform #to check whether we are running on a Mac
import copy
from ilastik.gui.slicingPlanesWidget import SlicingPlanesWidget
from ilastik.gui.iconMgr import ilastikIcons
def convertVTPtoOBJ(vtpFilename, objFilename):
f = open(vtpFilename, 'r')
lines = f.readlines()
inPoints = False
inPolygons = False
numPoints = -1
readPoints = 0
o = open(objFilename, 'w')
for l in lines:
l = l.strip()
if l == "":
continue
if inPoints:
i=0
outLine = ""
for n in l.split(" "):
if i==0:
outLine = "v"
i+=1
outLine += " "+n
readPoints += 1
if i==3:
o.write(outLine+"\n")
i=0
if readPoints == numPoints:
inPoints = False
elif inPolygons:
indices = l[2:].split(" ")
o.write("f ")
o.write(str(int(indices[0])+1)+" ")
o.write(str(int(indices[1])+1)+" ")
o.write(str(int(indices[2])+1)+" ")
o.write("\n")
else:
if l.startswith("POINTS"):
m = l.split(" ")
numPoints = 3*int(m[1])
inPoints = True
inPolygons = False
elif l.startswith("POLYGONS"):
inPoints = False
inPolygons = True
#*******************************************************************************
# Q V T K O p e n G L W i d g e t *
#*******************************************************************************
class QVTKOpenGLWidget(QVTKWidget2):
wireframe = False
def __init__(self, parent = None):
QVTKWidget2.__init__(self, parent)
def init(self):
self.renderer = vtkRenderer()
self.renderer.SetUseDepthPeeling(1); ####
self.renderer.SetBackground(1,1,1)
self.renderWindow = vtkGenericOpenGLRenderWindow()
self.renderWindow.SetAlphaBitPlanes(True) ####
self.renderWindow.AddRenderer(self.renderer)
self.SetRenderWindow(self.renderWindow)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.actors = vtkPropCollection()
#self.picker = vtkCellPicker()
#self.picker = vtkPointPicker()
#self.picker.PickFromListOn()
def registerObject(self, o):
#print "add item to prop collection"
self.actors.AddItem(o)
#self.picker.AddPickList(o)
def update(self):
QVTKWidget2.update(self)
#Refresh the content, works around a bug on OS X
self.paintGL()
def keyPressEvent(self, e):
if e.key() == Qt.Key_W:
self.actors.InitTraversal();
for i in range(self.actors.GetNumberOfItems()):
if self.wireframe:
"to surface"
self.actors.GetNextProp().GetProperty().SetRepresentationToSurface()
else:
self.actors.GetNextProp().GetProperty().SetRepresentationToWireframe()
self.wireframe = not self.wireframe
self.update()
def mousePressEvent(self, e):
if e.type() == QEvent.MouseButtonDblClick:
print "double clicked"
#self.picker.SetTolerance(0.05)
picker = vtkCellPicker()
picker.SetTolerance(0.05)
res = picker.Pick(e.pos().x(), e.pos().y(), 0, self.renderer)
if res > 0:
c = picker.GetPickPosition()
print " picked at coordinate =", c
self.emit(SIGNAL("objectPicked"), c[0:3])
else:
QVTKWidget2.mousePressEvent(self, e)
#*******************************************************************************
# O u t l i n e r *
#*******************************************************************************
class Outliner(vtkPropAssembly):
def SetPickable(self, pickable):
props = self.GetParts()
props.InitTraversal();
for i in range(props.GetNumberOfItems()):
props.GetNextProp().SetPickable(pickable)
def __init__(self, mesh):
self.cutter = vtkCutter()
self.cutter.SetCutFunction(vtkPlane())
self.tubes = vtkTubeFilter()
self.tubes.SetInputConnection(self.cutter.GetOutputPort())
self.tubes.SetRadius(1)
self.tubes.SetNumberOfSides(8)
self.tubes.CappingOn()
self.mapper = vtkPolyDataMapper()
self.mapper.SetInputConnection(self.tubes.GetOutputPort())
self.actor = vtkActor()
self.actor.SetMapper(self.mapper)
self.cutter.SetInput(mesh)
self.AddPart(self.actor)
def GetOutlineProperty(self):
return self.actor.GetProperty()
def SetPlane(self, plane):
self.cutter.SetCutFunction(plane)
self.cutter.Update()
#*******************************************************************************
# O v e r v i e w S c e n e *
#*******************************************************************************
class OverviewScene(QWidget):
changedSlice = pyqtSignal(int,int)
def resizeEvent(self, event):
QWidget.resizeEvent(self,event)
self.qvtk.update() #needed on OS X
def slicingCallback(self, obj, event):
num = obj.coordinate[obj.lastChangedAxis]
axis = obj.lastChangedAxis
self.changedSlice.emit(num, axis)
def ShowPlaneWidget(self, axis, show):
self.planes.ShowPlane(axis, show)
self.qvtk.update()
def TogglePlaneWidgetX(self):
self.planes.TogglePlaneWidget(0)
self.qvtk.update()
def TogglePlaneWidgetY(self):
self.planes.TogglePlaneWidget(1)
self.qvtk.update()
def TogglePlaneWidgetZ(self):
self.planes.TogglePlaneWidget(2)
self.qvtk.update()
def __init__(self, parent, shape):
super(OverviewScene, self).__init__(parent)
self.colorTable = None
self.anaglyph = False
self.sceneShape = shape
self.sceneItems = []
self.cutter = 3*[None]
self.objects = []
layout = QVBoxLayout()
layout.setMargin(0)
layout.setSpacing(0)
self.qvtk = QVTKOpenGLWidget()
layout.addWidget(self.qvtk)
self.setLayout(layout)
self.qvtk.init()
hbox = QHBoxLayout(None)
hbox.setMargin(0)
hbox.setSpacing(5)
hbox.setContentsMargins(5,0,5,0)
b1 = QToolButton(); b1.setText('X')
b1.setToolTip("toggle displaying the plane indicating the position of the slice with normal x")
b1.setCheckable(True); b1.setChecked(True)
b2 = QToolButton(); b2.setText('Y')
b2.setToolTip("toggle displaying the plane indicating the position of the slice with normal y")
b2.setCheckable(True); b2.setChecked(True)
b3 = QToolButton(); b3.setText('Z')
b3.setToolTip("toggle displaying the plane indicating the position of the slice with normal z")
b3.setCheckable(True); b3.setChecked(True)
bAnaglyph = QToolButton(); bAnaglyph.setText('A')
bAnaglyph.setToolTip("toggle anaglyph rendering for red-blue colored glasses")
bAnaglyph.setCheckable(True); bAnaglyph.setChecked(False)
bCutter = QToolButton(); bCutter.setText('use cutter')
bCutter.setToolTip("Toggle showing colored ribbons where the shown x,y or z planes intersect the rendered 3D object. This is a slow operation for very large opjects.")
bCutter.setCheckable(True); bCutter.setChecked(False)
self.bCutter = bCutter
bExportMesh = QToolButton()
bExportMesh.setIcon(QIcon(ilastikIcons.SaveAs))
bExportMesh.setToolTip("export the currently shown object as a wavefront OBJ mesh file")
hbox.addWidget(b1)
hbox.addWidget(b2)
hbox.addWidget(b3)
hbox.addWidget(bAnaglyph)
hbox.addWidget(bCutter)
hbox.addStretch()
hbox.addWidget(bExportMesh)
layout.addLayout(hbox)
self.planes = SlicingPlanesWidget(shape)
self.planes.SetInteractor(self.qvtk.GetInteractor())
self.planes.AddObserver("CoordinatesEvent", self.slicingCallback)
self.planes.SetCoordinate([0,0,0])
self.planes.SetPickable(False)
## Add RGB arrow axes
self.axes = vtkAxesActor();
self.axes.AxisLabelsOff()
self.axes.SetTotalLength(0.5*shape[0], 0.5*shape[1], 0.5*shape[2])
self.axes.SetShaftTypeToCylinder()
self.qvtk.renderer.AddActor(self.axes)
self.qvtk.renderer.AddActor(self.planes)
self.qvtk.renderer.ResetCamera()
self.connect(b1, SIGNAL("clicked()"), self.TogglePlaneWidgetX)
self.connect(b2, SIGNAL("clicked()"), self.TogglePlaneWidgetY)
self.connect(b3, SIGNAL("clicked()"), self.TogglePlaneWidgetZ)
self.connect(bAnaglyph, SIGNAL("clicked()"), self.ToggleAnaglyph3D)
self.connect(bExportMesh, SIGNAL("clicked()"), self.exportMesh)
bCutter.toggled.connect(self.useCutterToggled)
self.connect(self.qvtk, SIGNAL("objectPicked"), self.__onObjectPicked)
self.qvtk.setFocus()
@property
def useCutter(self):
return self.bCutter.isChecked()
def useCutterToggled(self):
self.__updateCutter()
if self.useCutter:
for i in range(3): self.qvtk.renderer.AddActor(self.cutter[i])
else:
for i in range(3): self.qvtk.renderer.RemoveActor(self.cutter[i])
self.qvtk.update()
def exportMesh(self):
filename = QFileDialog.getSaveFileName(self,"Save Meshes As")
self.qvtk.actors.InitTraversal();
for i in range(self.qvtk.actors.GetNumberOfItems()):
p = self.qvtk.actors.GetNextProp()
if p.GetPickable() and self.qvtk.actors.IsItemPresent(p):
vtpFilename = "%s%02d.vtp" % (filename, i)
objFilename = "%s%02d.obj" % (filename, i)
print "writing VTP file '%s'" % vtpFilename
d = p.GetMapper().GetInput()
w = vtkPolyDataWriter()
w.SetFileTypeToASCII()
w.SetInput(d)
w.SetFileName("%s%02d.vtp" % (filename, i))
w.Write()
print "converting to OBJ file '%s'" % objFilename
convertVTPtoOBJ(vtpFilename, objFilename)
#renWin = vtkRenderWindow()
#ren = vtkRenderer()
#renWin.AddRenderer(ren)
#ren.AddActor(p)
#exporter = vtkOBJExporter()
#exporter.SetInput(renWin)
#exporter.SetFilePrefix("%s%02d" % (filename, i))
#exporter.Update()
def __onObjectPicked(self, coor):
self.ChangeSlice( coor[0], 0)
self.ChangeSlice( coor[1], 1)
self.ChangeSlice( coor[2], 2)
def __onLeftButtonReleased(self):
print "CLICK"
def ToggleAnaglyph3D(self):
self.anaglyph = not self.anaglyph
if self.anaglyph:
print 'setting stero mode ON'
self.qvtk.renderWindow.StereoRenderOn()
self.qvtk.renderWindow.SetStereoTypeToAnaglyph()
else:
print 'setting stero mode OFF'
self.qvtk.renderWindow.StereoRenderOff()
self.qvtk.update()
def __updateCutter(self):
if(self.useCutter):
#print "Update cutter"
for i in range(3):
if self.cutter[i]: self.cutter[i].SetPlane(self.planes.Plane(i))
else:
pass
#print "Do NOT update cutter"
def ChangeSlice(self, num, axis):
c = copy.copy(self.planes.coordinate)
c[axis] = num
self.planes.SetCoordinate(c)
self.__updateCutter()
self.qvtk.update()
def display(self, axis):
self.qvtk.update()
def redisplay(self):
self.qvtk.update()
def DisplayObjectMeshes(self, v, suppressLabels=(), smooth=True):
print "OverviewScene::DisplayObjectMeshes", suppressLabels
self.dlg = MeshExtractorDialog(self)
self.connect(self.dlg, SIGNAL('done()'), self.onObjectMeshesComputed)
self.dlg.show()
self.dlg.run(v, suppressLabels, smooth)
def SetColorTable(self, table):
self.colorTable = table
def onObjectMeshesComputed(self):
self.dlg.accept()
print "*** Preparing 3D view ***"
#Clean up possible previous 3D displays
for c in self.cutter:
if c: self.qvtk.renderer.RemoveActor(c)
for a in self.objects:
self.qvtk.renderer.RemoveActor(a)
self.polygonAppender = vtkAppendPolyData()
for g in self.dlg.extractor.meshes.values():
self.polygonAppender.AddInput(g)
self.cutter[0] = Outliner(self.polygonAppender.GetOutput())
self.cutter[0].GetOutlineProperty().SetColor(1,0,0)
self.cutter[1] = Outliner(self.polygonAppender.GetOutput())
self.cutter[1].GetOutlineProperty().SetColor(0,1,0)
self.cutter[2] = Outliner(self.polygonAppender.GetOutput())
self.cutter[2].GetOutlineProperty().SetColor(0,0,1)
for c in self.cutter:
c.SetPickable(False)
## 1. Use a render window with alpha bits (as initial value is 0 (false)):
#self.renderWindow.SetAlphaBitPlanes(True);
## 2. Force to not pick a framebuffer with a multisample buffer
## (as initial value is 8):
#self.renderWindow.SetMultiSamples(0);
## 3. Choose to use depth peeling (if supported) (initial value is 0 (false)):
#self.renderer.SetUseDepthPeeling(True);
## 4. Set depth peeling parameters
## - Set the maximum number of rendering passes (initial value is 4):
#self.renderer.SetMaximumNumberOfPeels(100);
## - Set the occlusion ratio (initial value is 0.0, exact image):
#self.renderer.SetOcclusionRatio(0.0);
for i, g in self.dlg.extractor.meshes.items():
print " - showing object with label =", i
mapper = vtkPolyDataMapper()
mapper.SetInput(g)
actor = vtkActor()
actor.SetMapper(mapper)
self.qvtk.registerObject(actor)
self.objects.append(actor)
if self.colorTable:
c = self.colorTable[i]
c = QColor.fromRgba(c)
actor.GetProperty().SetColor(c.red()/255.0, c.green()/255.0, c.blue()/255.0)
self.qvtk.renderer.AddActor(actor)
self.qvtk.update()
#*******************************************************************************
# i f _ _ n a m e _ _ = = " _ _ m a i n _ _ " *
#*******************************************************************************
if __name__ == '__main__':
import numpy
def updateSlice(num, axis):
o.ChangeSlice(num,axis)
from PyQt4.QtGui import QApplication
import sys, h5py
app = QApplication(sys.argv)
o = OverviewScene(None, [100,100,100])
o.changedSlice.connect(updateSlice)
o.show()
o.resize(600,600)
#f=h5py.File("/home/thorben/phd/src/vtkqt-test/seg.h5")
#seg=f['volume/data'][0,:,:,:,0]
#f.close()
seg = numpy.ones((120,120,120), dtype=numpy.uint8)
seg[20:40,20:40,20:40] = 2
seg[50:70,50:70,50:70] = 3
seg[80:100,80:100,80:100] = 4
seg[80:100,80:100,20:50] = 5
colorTable = [qRgb(255,0,0), qRgb(0,255,0), qRgb(255,255,0), qRgb(255,0,255), qRgb(0,0,255), qRgb(128,0,128)]
o.SetColorTable(colorTable)
QTimer.singleShot(0, partial(o.DisplayObjectMeshes, seg, suppressLabels=(1,)))
app.exec_()
# [vtkusers] Depth peeling not used, but I can't see why.
# http://public.kitware.com/pipermail/vtkusers/2010-August/111040.html
|
ilastik/ilastik-0.5
|
ilastik/gui/view3d.py
|
Python
|
bsd-2-clause
| 17,552
|
[
"VTK"
] |
3ffa58f0bc089256362fef91f9f78ffca4c7c0bd471f222bfb163d267a123083
|
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
from itertools import count
import re, os, cStringIO, time, cgi, string, urlparse
from xml.dom import minidom as dom
from xml.sax.handler import ErrorHandler, feature_validation
from xml.dom.pulldom import SAX2DOM
from xml.sax import make_parser
from xml.sax.xmlreader import InputSource
from twisted.python import htmlizer, text
from twisted.python.filepath import FilePath
from twisted.python.deprecate import deprecated
from twisted.python.versions import Version
from twisted.web import domhelpers
import process, latex, indexer, numberer, htmlbook
# relative links to html files
def fixLinks(document, ext):
"""
Rewrite links to XHTML lore input documents so they point to lore XHTML
output documents.
Any node with an C{href} attribute which does not contain a value starting
with C{http}, C{https}, C{ftp}, or C{mailto} and which does not have a
C{class} attribute of C{absolute} or which contains C{listing} and which
does point to an URL ending with C{html} will have that attribute value
rewritten so that the filename extension is C{ext} instead of C{html}.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type ext: C{str}
@param ext: The extension to use when selecting an output file name. This
replaces the extension of the input file name.
@return: C{None}
"""
supported_schemes=['http', 'https', 'ftp', 'mailto']
for node in domhelpers.findElementsWithAttribute(document, 'href'):
href = node.getAttribute("href")
if urlparse.urlparse(href)[0] in supported_schemes:
continue
if node.getAttribute("class") == "absolute":
continue
if node.getAttribute("class").find('listing') != -1:
continue
# This is a relative link, so it should be munged.
if href.endswith('html') or href[:href.rfind('#')].endswith('html'):
fname, fext = os.path.splitext(href)
if '#' in fext:
fext = ext+'#'+fext.split('#', 1)[1]
else:
fext = ext
node.setAttribute("href", fname + fext)
def addMtime(document, fullpath):
"""
Set the last modified time of the given document.
@type document: A DOM Node or Document
@param document: The output template which defines the presentation of the
last modified time.
@type fullpath: C{str}
@param fullpath: The file name from which to take the last modified time.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class","mtime"):
txt = dom.Text()
txt.data = time.ctime(os.path.getmtime(fullpath))
node.appendChild(txt)
def _getAPI(node):
"""
Retrieve the fully qualified Python name represented by the given node.
The name is represented by one or two aspects of the node: the value of the
node's first child forms the end of the name. If the node has a C{base}
attribute, that attribute's value is prepended to the node's value, with
C{.} separating the two parts.
@rtype: C{str}
@return: The fully qualified Python name.
"""
base = ""
if node.hasAttribute("base"):
base = node.getAttribute("base") + "."
return base+node.childNodes[0].nodeValue
def fixAPI(document, url):
"""
Replace API references with links to API documentation.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type url: C{str}
@param url: A string which will be interpolated with the fully qualified
Python name of any API reference encountered in the input document, the
result of which will be used as a link to API documentation for that name
in the output document.
@return: C{None}
"""
# API references
for node in domhelpers.findElementsWithAttribute(document, "class", "API"):
fullname = _getAPI(node)
anchor = dom.Element('a')
anchor.setAttribute('href', url % (fullname,))
anchor.setAttribute('title', fullname)
while node.childNodes:
child = node.childNodes[0]
node.removeChild(child)
anchor.appendChild(child)
node.appendChild(anchor)
if node.hasAttribute('base'):
node.removeAttribute('base')
def fontifyPython(document):
"""
Syntax color any node in the given document which contains a Python source
listing.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
def matcher(node):
return (node.nodeName == 'pre' and node.hasAttribute('class') and
node.getAttribute('class') == 'python')
for node in domhelpers.findElements(document, matcher):
fontifyPythonNode(node)
def fontifyPythonNode(node):
"""
Syntax color the given node containing Python source code.
The node must have a parent.
@return: C{None}
"""
oldio = cStringIO.StringIO()
latex.getLatexText(node, oldio.write,
entities={'lt': '<', 'gt': '>', 'amp': '&'})
oldio = cStringIO.StringIO(oldio.getvalue().strip()+'\n')
howManyLines = len(oldio.getvalue().splitlines())
newio = cStringIO.StringIO()
htmlizer.filter(oldio, newio, writer=htmlizer.SmallerHTMLWriter)
lineLabels = _makeLineNumbers(howManyLines)
newel = dom.parseString(newio.getvalue()).documentElement
newel.setAttribute("class", "python")
node.parentNode.replaceChild(newel, node)
newel.insertBefore(lineLabels, newel.firstChild)
def addPyListings(document, dir):
"""
Insert Python source listings into the given document from files in the
given directory based on C{py-listing} nodes.
Any node in C{document} with a C{class} attribute set to C{py-listing} will
have source lines taken from the file named in that node's C{href}
attribute (searched for in C{dir}) inserted in place of that node.
If a node has a C{skipLines} attribute, its value will be parsed as an
integer and that many lines will be skipped at the beginning of the source
file.
@type document: A DOM Node or Document
@param document: The document within which to make listing replacements.
@type dir: C{str}
@param dir: The directory in which to find source files containing the
referenced Python listings.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class",
"py-listing"):
filename = node.getAttribute("href")
outfile = cStringIO.StringIO()
lines = map(string.rstrip, open(os.path.join(dir, filename)).readlines())
skip = node.getAttribute('skipLines') or 0
lines = lines[int(skip):]
howManyLines = len(lines)
data = '\n'.join(lines)
data = cStringIO.StringIO(text.removeLeadingTrailingBlanks(data))
htmlizer.filter(data, outfile, writer=htmlizer.SmallerHTMLWriter)
sourceNode = dom.parseString(outfile.getvalue()).documentElement
sourceNode.insertBefore(_makeLineNumbers(howManyLines), sourceNode.firstChild)
_replaceWithListing(node, sourceNode.toxml(), filename, "py-listing")
def _makeLineNumbers(howMany):
"""
Return an element which will render line numbers for a source listing.
@param howMany: The number of lines in the source listing.
@type howMany: C{int}
@return: An L{dom.Element} which can be added to the document before
the source listing to add line numbers to it.
"""
# Figure out how many digits wide the widest line number label will be.
width = len(str(howMany))
# Render all the line labels with appropriate padding
labels = ['%*d' % (width, i) for i in range(1, howMany + 1)]
# Create a p element with the right style containing the labels
p = dom.Element('p')
p.setAttribute('class', 'py-linenumber')
t = dom.Text()
t.data = '\n'.join(labels) + '\n'
p.appendChild(t)
return p
def _replaceWithListing(node, val, filename, class_):
captionTitle = domhelpers.getNodeText(node)
if captionTitle == os.path.basename(filename):
captionTitle = 'Source listing'
text = ('<div class="%s">%s<div class="caption">%s - '
'<a href="%s"><span class="filename">%s</span></a></div></div>' %
(class_, val, captionTitle, filename, filename))
newnode = dom.parseString(text).documentElement
node.parentNode.replaceChild(newnode, node)
def addHTMLListings(document, dir):
"""
Insert HTML source listings into the given document from files in the given
directory based on C{html-listing} nodes.
Any node in C{document} with a C{class} attribute set to C{html-listing}
will have source lines taken from the file named in that node's C{href}
attribute (searched for in C{dir}) inserted in place of that node.
@type document: A DOM Node or Document
@param document: The document within which to make listing replacements.
@type dir: C{str}
@param dir: The directory in which to find source files containing the
referenced HTML listings.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class",
"html-listing"):
filename = node.getAttribute("href")
val = ('<pre class="htmlsource">\n%s</pre>' %
cgi.escape(open(os.path.join(dir, filename)).read()))
_replaceWithListing(node, val, filename, "html-listing")
def addPlainListings(document, dir):
"""
Insert text listings into the given document from files in the given
directory based on C{listing} nodes.
Any node in C{document} with a C{class} attribute set to C{listing} will
have source lines taken from the file named in that node's C{href}
attribute (searched for in C{dir}) inserted in place of that node.
@type document: A DOM Node or Document
@param document: The document within which to make listing replacements.
@type dir: C{str}
@param dir: The directory in which to find source files containing the
referenced text listings.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(document, "class",
"listing"):
filename = node.getAttribute("href")
val = ('<pre>\n%s</pre>' %
cgi.escape(open(os.path.join(dir, filename)).read()))
_replaceWithListing(node, val, filename, "listing")
def getHeaders(document):
"""
Return all H2 and H3 nodes in the given document.
@type document: A DOM Node or Document
@rtype: C{list}
"""
return domhelpers.findElements(
document,
lambda n, m=re.compile('h[23]$').match: m(n.nodeName))
def generateToC(document):
"""
Create a table of contents for the given document.
@type document: A DOM Node or Document
@rtype: A DOM Node
@return: a Node containing a table of contents based on the headers of the
given document.
"""
subHeaders = None
headers = []
for element in getHeaders(document):
if element.tagName == 'h2':
subHeaders = []
headers.append((element, subHeaders))
elif subHeaders is None:
raise ValueError(
"No H3 element is allowed until after an H2 element")
else:
subHeaders.append(element)
auto = count().next
def addItem(headerElement, parent):
anchor = dom.Element('a')
name = 'auto%d' % (auto(),)
anchor.setAttribute('href', '#' + name)
text = dom.Text()
text.data = domhelpers.getNodeText(headerElement)
anchor.appendChild(text)
headerNameItem = dom.Element('li')
headerNameItem.appendChild(anchor)
parent.appendChild(headerNameItem)
anchor = dom.Element('a')
anchor.setAttribute('name', name)
headerElement.appendChild(anchor)
toc = dom.Element('ol')
for headerElement, subHeaders in headers:
addItem(headerElement, toc)
if subHeaders:
subtoc = dom.Element('ul')
toc.appendChild(subtoc)
for subHeaderElement in subHeaders:
addItem(subHeaderElement, subtoc)
return toc
def putInToC(document, toc):
"""
Insert the given table of contents into the given document.
The node with C{class} attribute set to C{toc} has its children replaced
with C{toc}.
@type document: A DOM Node or Document
@type toc: A DOM Node
"""
tocOrig = domhelpers.findElementsWithAttribute(document, 'class', 'toc')
if tocOrig:
tocOrig= tocOrig[0]
tocOrig.childNodes = [toc]
def removeH1(document):
"""
Replace all C{h1} nodes in the given document with empty C{span} nodes.
C{h1} nodes mark up document sections and the output template is given an
opportunity to present this information in a different way.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
h1 = domhelpers.findNodesNamed(document, 'h1')
empty = dom.Element('span')
for node in h1:
node.parentNode.replaceChild(empty, node)
def footnotes(document):
"""
Find footnotes in the given document, move them to the end of the body, and
generate links to them.
A footnote is any node with a C{class} attribute set to C{footnote}.
Footnote links are generated as superscript. Footnotes are collected in a
C{ol} node at the end of the document.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
footnotes = domhelpers.findElementsWithAttribute(document, "class",
"footnote")
if not footnotes:
return
footnoteElement = dom.Element('ol')
id = 1
for footnote in footnotes:
href = dom.parseString('<a href="#footnote-%(id)d">'
'<super>%(id)d</super></a>'
% vars()).documentElement
text = ' '.join(domhelpers.getNodeText(footnote).split())
href.setAttribute('title', text)
target = dom.Element('a')
target.setAttribute('name', 'footnote-%d' % (id,))
target.childNodes = [footnote]
footnoteContent = dom.Element('li')
footnoteContent.childNodes = [target]
footnoteElement.childNodes.append(footnoteContent)
footnote.parentNode.replaceChild(href, footnote)
id += 1
body = domhelpers.findNodesNamed(document, "body")[0]
header = dom.parseString('<h2>Footnotes</h2>').documentElement
body.childNodes.append(header)
body.childNodes.append(footnoteElement)
def notes(document):
"""
Find notes in the given document and mark them up as such.
A note is any node with a C{class} attribute set to C{note}.
(I think this is a very stupid feature. When I found it I actually
exclaimed out loud. -exarkun)
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@return: C{None}
"""
notes = domhelpers.findElementsWithAttribute(document, "class", "note")
notePrefix = dom.parseString('<strong>Note: </strong>').documentElement
for note in notes:
note.childNodes.insert(0, notePrefix)
def compareMarkPos(a, b):
"""
Perform in every way identically to L{cmp} for valid inputs.
"""
linecmp = cmp(a[0], b[0])
if linecmp:
return linecmp
return cmp(a[1], b[1])
compareMarkPos = deprecated(Version('Twisted', 9, 0, 0))(compareMarkPos)
def comparePosition(firstElement, secondElement):
"""
Compare the two elements given by their position in the document or
documents they were parsed from.
@type firstElement: C{dom.Element}
@type secondElement: C{dom.Element}
@return: C{-1}, C{0}, or C{1}, with the same meanings as the return value
of L{cmp}.
"""
return cmp(firstElement._markpos, secondElement._markpos)
comparePosition = deprecated(Version('Twisted', 9, 0, 0))(comparePosition)
def findNodeJustBefore(target, nodes):
"""
Find the last Element which is a sibling of C{target} and is in C{nodes}.
@param target: A node the previous sibling of which to return.
@param nodes: A list of nodes which might be the right node.
@return: The previous sibling of C{target}.
"""
while target is not None:
node = target.previousSibling
while node is not None:
if node in nodes:
return node
node = node.previousSibling
target = target.parentNode
raise RuntimeError("Oops")
def getFirstAncestorWithSectionHeader(entry):
"""
Visit the ancestors of C{entry} until one with at least one C{h2} child
node is found, then return all of that node's C{h2} child nodes.
@type entry: A DOM Node
@param entry: The node from which to begin traversal. This node itself is
excluded from consideration.
@rtype: C{list} of DOM Nodes
@return: All C{h2} nodes of the ultimately selected parent node.
"""
for a in domhelpers.getParents(entry)[1:]:
headers = domhelpers.findNodesNamed(a, "h2")
if len(headers) > 0:
return headers
return []
def getSectionNumber(header):
"""
Retrieve the section number of the given node.
This is probably intended to interact in a rather specific way with
L{numberDocument}.
@type header: A DOM Node or L{None}
@param header: The section from which to extract a number. The section
number is the value of this node's first child.
@return: C{None} or a C{str} giving the section number.
"""
if not header:
return None
return domhelpers.gatherTextNodes(header.childNodes[0])
def getSectionReference(entry):
"""
Find the section number which contains the given node.
This function looks at the given node's ancestry until it finds a node
which defines a section, then returns that section's number.
@type entry: A DOM Node
@param entry: The node for which to determine the section.
@rtype: C{str}
@return: The section number, as returned by C{getSectionNumber} of the
first ancestor of C{entry} which defines a section, as determined by
L{getFirstAncestorWithSectionHeader}.
"""
headers = getFirstAncestorWithSectionHeader(entry)
myHeader = findNodeJustBefore(entry, headers)
return getSectionNumber(myHeader)
def index(document, filename, chapterReference):
"""
Extract index entries from the given document and store them for later use
and insert named anchors so that the index can link back to those entries.
Any node with a C{class} attribute set to C{index} is considered an index
entry.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type filename: C{str}
@param filename: A link to the output for the given document which will be
included in the index to link to any index entry found here.
@type chapterReference: ???
@param chapterReference: ???
@return: C{None}
"""
entries = domhelpers.findElementsWithAttribute(document, "class", "index")
if not entries:
return
i = 0;
for entry in entries:
i += 1
anchor = 'index%02d' % i
if chapterReference:
ref = getSectionReference(entry) or chapterReference
else:
ref = 'link'
indexer.addEntry(filename, anchor, entry.getAttribute('value'), ref)
# does nodeName even affect anything?
entry.nodeName = entry.tagName = entry.endTagName = 'a'
for attrName in entry.attributes.keys():
entry.removeAttribute(attrName)
entry.setAttribute('name', anchor)
def setIndexLink(template, indexFilename):
"""
Insert a link to an index document.
Any node with a C{class} attribute set to C{index-link} will have its tag
name changed to C{a} and its C{href} attribute set to C{indexFilename}.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type indexFilename: C{str}
@param indexFilename: The address of the index document to which to link.
If any C{False} value, this function will remove all index-link nodes.
@return: C{None}
"""
indexLinks = domhelpers.findElementsWithAttribute(template,
"class",
"index-link")
for link in indexLinks:
if indexFilename is None:
link.parentNode.removeChild(link)
else:
link.nodeName = link.tagName = link.endTagName = 'a'
for attrName in link.attributes.keys():
link.removeAttribute(attrName)
link.setAttribute('href', indexFilename)
def numberDocument(document, chapterNumber):
"""
Number the sections of the given document.
A dot-separated chapter, section number is added to the beginning of each
section, as defined by C{h2} nodes.
This is probably intended to interact in a rather specific way with
L{getSectionNumber}.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type chapterNumber: C{int}
@param chapterNumber: The chapter number of this content in an overall
document.
@return: C{None}
"""
i = 1
for node in domhelpers.findNodesNamed(document, "h2"):
label = dom.Text()
label.data = "%s.%d " % (chapterNumber, i)
node.insertBefore(label, node.firstChild)
i += 1
def fixRelativeLinks(document, linkrel):
"""
Replace relative links in C{str} and C{href} attributes with links relative
to C{linkrel}.
@type document: A DOM Node or Document
@param document: The output template.
@type linkrel: C{str}
@param linkrel: An prefix to apply to all relative links in C{src} or
C{href} attributes in the input document when generating the output
document.
"""
for attr in 'src', 'href':
for node in domhelpers.findElementsWithAttribute(document, attr):
href = node.getAttribute(attr)
if not href.startswith('http') and not href.startswith('/'):
node.setAttribute(attr, linkrel+node.getAttribute(attr))
def setTitle(template, title, chapterNumber):
"""
Add title and chapter number information to the template document.
The title is added to the end of the first C{title} tag and the end of the
first tag with a C{class} attribute set to C{title}. If specified, the
chapter is inserted before the title.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type title: C{list} of DOM Nodes
@param title: Nodes from the input document defining its title.
@type chapterNumber: C{int}
@param chapterNumber: The chapter number of this content in an overall
document. If not applicable, any C{False} value will result in this
information being omitted.
@return: C{None}
"""
if numberer.getNumberSections() and chapterNumber:
titleNode = dom.Text()
# This is necessary in order for cloning below to work. See Python
# isuse 4851.
titleNode.ownerDocument = template.ownerDocument
titleNode.data = '%s. ' % (chapterNumber,)
title.insert(0, titleNode)
for nodeList in (domhelpers.findNodesNamed(template, "title"),
domhelpers.findElementsWithAttribute(template, "class",
'title')):
if nodeList:
for titleNode in title:
nodeList[0].appendChild(titleNode.cloneNode(True))
def setAuthors(template, authors):
"""
Add author information to the template document.
Names and contact information for authors are added to each node with a
C{class} attribute set to C{authors} and to the template head as C{link}
nodes.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type authors: C{list} of two-tuples of C{str}
@param authors: List of names and contact information for the authors of
the input document.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(template,
"class", 'authors'):
# First, similarly to setTitle, insert text into an <div
# class="authors">
container = dom.Element('span')
for name, href in authors:
anchor = dom.Element('a')
anchor.setAttribute('href', href)
anchorText = dom.Text()
anchorText.data = name
anchor.appendChild(anchorText)
if (name, href) == authors[-1]:
if len(authors) == 1:
container.appendChild(anchor)
else:
andText = dom.Text()
andText.data = 'and '
container.appendChild(andText)
container.appendChild(anchor)
else:
container.appendChild(anchor)
commaText = dom.Text()
commaText.data = ', '
container.appendChild(commaText)
node.appendChild(container)
# Second, add appropriate <link rel="author" ...> tags to the <head>.
head = domhelpers.findNodesNamed(template, 'head')[0]
authors = [dom.parseString('<link rel="author" href="%s" title="%s"/>'
% (href, name)).childNodes[0]
for name, href in authors]
head.childNodes.extend(authors)
def setVersion(template, version):
"""
Add a version indicator to the given template.
@type template: A DOM Node or Document
@param template: The output template which defines the presentation of the
version information.
@type version: C{str}
@param version: The version string to add to the template.
@return: C{None}
"""
for node in domhelpers.findElementsWithAttribute(template, "class",
"version"):
text = dom.Text()
text.data = version
node.appendChild(text)
def getOutputFileName(originalFileName, outputExtension, index=None):
"""
Return a filename which is the same as C{originalFileName} except for the
extension, which is replaced with C{outputExtension}.
For example, if C{originalFileName} is C{'/foo/bar.baz'} and
C{outputExtension} is C{'quux'}, the return value will be
C{'/foo/bar.quux'}.
@type originalFileName: C{str}
@type outputExtension: C{stR}
@param index: ignored, never passed.
@rtype: C{str}
"""
return os.path.splitext(originalFileName)[0]+outputExtension
def munge(document, template, linkrel, dir, fullpath, ext, url, config, outfileGenerator=getOutputFileName):
"""
Mutate C{template} until it resembles C{document}.
@type document: A DOM Node or Document
@param document: The input document which contains all of the content to be
presented.
@type template: A DOM Node or Document
@param template: The template document which defines the desired
presentation format of the content.
@type linkrel: C{str}
@param linkrel: An prefix to apply to all relative links in C{src} or
C{href} attributes in the input document when generating the output
document.
@type dir: C{str}
@param dir: The directory in which to search for source listing files.
@type fullpath: C{str}
@param fullpath: The file name which contained the input document.
@type ext: C{str}
@param ext: The extension to use when selecting an output file name. This
replaces the extension of the input file name.
@type url: C{str}
@param url: A string which will be interpolated with the fully qualified
Python name of any API reference encountered in the input document, the
result of which will be used as a link to API documentation for that name
in the output document.
@type config: C{dict}
@param config: Further specification of the desired form of the output.
Valid keys in this dictionary::
noapi: If present and set to a True value, links to API documentation
will not be generated.
version: A string which will be included in the output to indicate the
version of this documentation.
@type outfileGenerator: Callable of C{str}, C{str} returning C{str}
@param outfileGenerator: Output filename factory. This is invoked with the
intput filename and C{ext} and the output document is serialized to the
file with the name returned.
@return: C{None}
"""
fixRelativeLinks(template, linkrel)
addMtime(template, fullpath)
removeH1(document)
if not config.get('noapi', False):
fixAPI(document, url)
fontifyPython(document)
fixLinks(document, ext)
addPyListings(document, dir)
addHTMLListings(document, dir)
addPlainListings(document, dir)
putInToC(template, generateToC(document))
footnotes(document)
notes(document)
setIndexLink(template, indexer.getIndexFilename())
setVersion(template, config.get('version', ''))
# Insert the document into the template
chapterNumber = htmlbook.getNumber(fullpath)
title = domhelpers.findNodesNamed(document, 'title')[0].childNodes
setTitle(template, title, chapterNumber)
if numberer.getNumberSections() and chapterNumber:
numberDocument(document, chapterNumber)
index(document, outfileGenerator(os.path.split(fullpath)[1], ext),
htmlbook.getReference(fullpath))
authors = domhelpers.findNodesNamed(document, 'link')
authors = [(node.getAttribute('title') or '',
node.getAttribute('href') or '')
for node in authors
if node.getAttribute('rel') == 'author']
setAuthors(template, authors)
body = domhelpers.findNodesNamed(document, "body")[0]
tmplbody = domhelpers.findElementsWithAttribute(template, "class",
"body")[0]
tmplbody.childNodes = body.childNodes
tmplbody.setAttribute("class", "content")
class _LocationReportingErrorHandler(ErrorHandler):
"""
Define a SAX error handler which can report the location of fatal
errors.
Unlike the errors reported during parsing by other APIs in the xml
package, this one tries to mismatched tag errors by including the
location of both the relevant opening and closing tags.
"""
def __init__(self, contentHandler):
self.contentHandler = contentHandler
def fatalError(self, err):
# Unfortunately, the underlying expat error code is only exposed as
# a string. I surely do hope no one ever goes and localizes expat.
if err.getMessage() == 'mismatched tag':
expect, begLine, begCol = self.contentHandler._locationStack[-1]
endLine, endCol = err.getLineNumber(), err.getColumnNumber()
raise process.ProcessingFailure(
"mismatched close tag at line %d, column %d; expected </%s> "
"(from line %d, column %d)" % (
endLine, endCol, expect, begLine, begCol))
raise process.ProcessingFailure(
'%s at line %d, column %d' % (err.getMessage(),
err.getLineNumber(),
err.getColumnNumber()))
class _TagTrackingContentHandler(SAX2DOM):
"""
Define a SAX content handler which keeps track of the start location of
all open tags. This information is used by the above defined error
handler to report useful locations when a fatal error is encountered.
"""
def __init__(self):
SAX2DOM.__init__(self)
self._locationStack = []
def setDocumentLocator(self, locator):
self._docLocator = locator
SAX2DOM.setDocumentLocator(self, locator)
def startElement(self, name, attrs):
self._locationStack.append((name, self._docLocator.getLineNumber(), self._docLocator.getColumnNumber()))
SAX2DOM.startElement(self, name, attrs)
def endElement(self, name):
self._locationStack.pop()
SAX2DOM.endElement(self, name)
class _LocalEntityResolver(object):
"""
Implement DTD loading (from a local source) for the limited number of
DTDs which are allowed for Lore input documents.
@ivar filename: The name of the file containing the lore input
document.
@ivar knownDTDs: A mapping from DTD system identifiers to L{FilePath}
instances pointing to the corresponding DTD.
"""
s = FilePath(__file__).sibling
knownDTDs = {
None: s("xhtml1-strict.dtd"),
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd": s("xhtml1-strict.dtd"),
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd": s("xhtml1-transitional.dtd"),
"xhtml-lat1.ent": s("xhtml-lat1.ent"),
"xhtml-symbol.ent": s("xhtml-symbol.ent"),
"xhtml-special.ent": s("xhtml-special.ent"),
}
del s
def __init__(self, filename):
self.filename = filename
def resolveEntity(self, publicId, systemId):
source = InputSource()
source.setSystemId(systemId)
try:
dtdPath = self.knownDTDs[systemId]
except KeyError:
raise process.ProcessingFailure(
"Invalid DTD system identifier (%r) in %s. Only "
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd "
"is allowed." % (systemId, self.filename))
source.setByteStream(dtdPath.open())
return source
def parseFileAndReport(filename, _open=file):
"""
Parse and return the contents of the given lore XHTML document.
@type filename: C{str}
@param filename: The name of a file containing a lore XHTML document to
load.
@raise process.ProcessingFailure: When the contents of the specified file
cannot be parsed.
@rtype: A DOM Document
@return: The document contained in C{filename}.
"""
content = _TagTrackingContentHandler()
error = _LocationReportingErrorHandler(content)
parser = make_parser()
parser.setContentHandler(content)
parser.setErrorHandler(error)
# In order to call a method on the expat parser which will be used by this
# parser, we need the expat parser to be created. This doesn't happen
# until reset is called, normally by the parser's parse method. That's too
# late for us, since it will then go on to parse the document without
# letting us do any extra set up. So, force the expat parser to be created
# here, and then disable reset so that the parser created is the one
# actually used to parse our document. Resetting is only needed if more
# than one document is going to be parsed, and that isn't the case here.
parser.reset()
parser.reset = lambda: None
# This is necessary to make the xhtml1 transitional declaration optional.
# It causes LocalEntityResolver.resolveEntity(None, None) to be called.
# LocalEntityResolver handles that case by giving out the xhtml1
# transitional dtd. Unfortunately, there is no public API for manipulating
# the expat parser when using xml.sax. Using the private _parser attribute
# may break. It's also possible that make_parser will return a parser
# which doesn't use expat, but uses some other parser. Oh well. :(
# -exarkun
parser._parser.UseForeignDTD(True)
parser.setEntityResolver(_LocalEntityResolver(filename))
# This is probably no-op because expat is not a validating parser. Who
# knows though, maybe you figured out a way to not use expat.
parser.setFeature(feature_validation, False)
fObj = _open(filename)
try:
try:
parser.parse(fObj)
except IOError, e:
raise process.ProcessingFailure(
e.strerror + ", filename was '" + filename + "'")
finally:
fObj.close()
return content.document
def makeSureDirectoryExists(filename):
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if (not os.path.exists(dirname)):
os.makedirs(dirname)
def doFile(filename, linkrel, ext, url, templ, options={}, outfileGenerator=getOutputFileName):
"""
Process the input document at C{filename} and write an output document.
@type filename: C{str}
@param filename: The path to the input file which will be processed.
@type linkrel: C{str}
@param linkrel: An prefix to apply to all relative links in C{src} or
C{href} attributes in the input document when generating the output
document.
@type ext: C{str}
@param ext: The extension to use when selecting an output file name. This
replaces the extension of the input file name.
@type url: C{str}
@param url: A string which will be interpolated with the fully qualified
Python name of any API reference encountered in the input document, the
result of which will be used as a link to API documentation for that name
in the output document.
@type templ: A DOM Node or Document
@param templ: The template on which the output document will be based.
This is mutated and then serialized to the output file.
@type options: C{dict}
@param options: Further specification of the desired form of the output.
Valid keys in this dictionary::
noapi: If present and set to a True value, links to API documentation
will not be generated.
version: A string which will be included in the output to indicate the
version of this documentation.
@type outfileGenerator: Callable of C{str}, C{str} returning C{str}
@param outfileGenerator: Output filename factory. This is invoked with the
intput filename and C{ext} and the output document is serialized to the
file with the name returned.
@return: C{None}
"""
doc = parseFileAndReport(filename)
clonedNode = templ.cloneNode(1)
munge(doc, clonedNode, linkrel, os.path.dirname(filename), filename, ext,
url, options, outfileGenerator)
newFilename = outfileGenerator(filename, ext)
_writeDocument(newFilename, clonedNode)
def _writeDocument(newFilename, clonedNode):
"""
Serialize the given node to XML into the named file.
@param newFilename: The name of the file to which the XML will be
written. If this is in a directory which does not exist, the
directory will be created.
@param clonedNode: The root DOM node which will be serialized.
@return: C{None}
"""
makeSureDirectoryExists(newFilename)
f = open(newFilename, 'w')
f.write(clonedNode.toxml('utf-8'))
f.close()
|
movmov/cc
|
vendor/Twisted-10.0.0/twisted/lore/tree.py
|
Python
|
apache-2.0
| 40,056
|
[
"VisIt"
] |
77d2a505e830f5589e2fad4e240bf0b4457613c6823bc968bc83017b5bc9929f
|
# Copyright (C) 2013, Walter Bender - Raul Gutierrez Segales
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gettext import gettext as _
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import Gdk
from jarabe.webservice.accountsmanager import get_webaccount_services
from jarabe.controlpanel.sectionview import SectionView
from sugar3.graphics.icon import CanvasIcon, Icon
from sugar3.graphics import style
def get_service_name(service):
if hasattr(service, '_account'):
if hasattr(service._account, 'get_description'):
return service._account.get_description()
return ''
class WebServicesConfig(SectionView):
def __init__(self, model, alerts):
SectionView.__init__(self)
self._model = model
self.restart_alerts = alerts
services = get_webaccount_services()
grid = Gtk.Grid()
if len(services) == 0:
grid.set_row_spacing(style.DEFAULT_SPACING)
icon = Icon(pixel_size=style.LARGE_ICON_SIZE,
icon_name='module-webaccount',
stroke_color=style.COLOR_BUTTON_GREY.get_svg(),
fill_color=style.COLOR_TRANSPARENT.get_svg())
grid.attach(icon, 0, 0, 1, 1)
icon.show()
label = Gtk.Label()
label.set_justify(Gtk.Justification.CENTER)
label.set_markup(
'<span foreground="%s" size="large">%s</span>'
% (style.COLOR_BUTTON_GREY.get_html(),
GLib.markup_escape_text(
_('No web services are installed.\n'
'Please visit %s for more details.' %
'http://wiki.sugarlabs.org/go/WebServices'))))
label.show()
grid.attach(label, 0, 1, 1, 1)
alignment = Gtk.Alignment.new(0.5, 0.5, 0.1, 0.1)
alignment.add(grid)
grid.show()
self.add(alignment)
alignment.show()
return
grid.set_row_spacing(style.DEFAULT_SPACING * 4)
grid.set_column_spacing(style.DEFAULT_SPACING * 4)
grid.set_border_width(style.DEFAULT_SPACING * 2)
grid.set_column_homogeneous(True)
width = Gdk.Screen.width() - 2 * style.GRID_CELL_SIZE
nx = int(width / (style.GRID_CELL_SIZE + style.DEFAULT_SPACING * 4))
self._service_config_box = Gtk.VBox()
x = 0
y = 0
for service in services:
service_grid = Gtk.Grid()
icon = CanvasIcon(icon_name=service.get_icon_name())
icon.show()
service_grid.attach(icon, x, y, 1, 1)
icon.connect('activate', service.config_service_cb, None,
self._service_config_box)
label = Gtk.Label()
label.set_justify(Gtk.Justification.CENTER)
name = get_service_name(service)
label.set_markup(name)
service_grid.attach(label, x, y + 1, 1, 1)
label.show()
grid.attach(service_grid, x, y, 1, 1)
service_grid.show()
x += 1
if x == nx:
x = 0
y += 1
alignment = Gtk.Alignment.new(0.5, 0, 0, 0)
alignment.add(grid)
grid.show()
vbox = Gtk.VBox()
vbox.pack_start(alignment, False, False, 0)
alignment.show()
scrolled = Gtk.ScrolledWindow()
vbox.pack_start(scrolled, True, True, 0)
self.add(vbox)
scrolled.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
scrolled.show()
workspace = Gtk.VBox()
scrolled.add_with_viewport(workspace)
workspace.show()
workspace.add(self._service_config_box)
workspace.show_all()
vbox.show()
def undo(self):
pass
|
icarito/sugar
|
extensions/cpsection/webaccount/view.py
|
Python
|
gpl-3.0
| 4,477
|
[
"VisIt"
] |
6e8a43360437a254874521423aa6e88ad865db04d83b2c96e4846b128f2d3994
|
import os
from os import path
import numpy as np
from netCDF4 import Dataset
import pyproj
from shapely.ops import transform
from shapely.geometry import MultiPoint, Polygon, MultiPolygon
from shapely.prepared import prep
from functools import partial
from shyft import api
from shyft import shyftdata_dir
from .. import interfaces
from .time_conversion import convert_netcdf_time
UTC = api.Calendar()
class AromeConcatDataRepositoryError(Exception):
pass
class AromeConcatDataRepository(interfaces.GeoTsRepository):
_G = 9.80665 # WMO-defined gravity constant to calculate the height in metres from geopotential
def __init__(self, epsg, filename, nb_fc_to_drop=0, selection_criteria=None, padding=5000.):
self.selection_criteria = selection_criteria
#filename = filename.replace('${SHYFTDATA}', os.getenv('SHYFTDATA', '.'))
filename = os.path.expandvars(filename)
if not path.isabs(filename):
# Relative paths will be prepended the data_dir
filename = path.join(shyftdata_dir, filename)
if not path.isfile(filename):
raise AromeConcatDataRepositoryError("No such file '{}'".format(filename))
self._filename = filename
self.nb_fc_to_drop = nb_fc_to_drop # index of first lead time: starts from 0
self.nb_fc_interval_to_concat = 1 # given as number of forecast intervals
self.shyft_cs = "+init=EPSG:{}".format(epsg)
self.padding = padding
# Field names and mappings netcdf_name: shyft_name
self._arome_shyft_map = {"relative_humidity_2m": "relative_humidity",
"air_temperature_2m": "temperature",
"precipitation_amount": "precipitation",
"precipitation_amount_acc": "precipitation",
"x_wind_10m": "x_wind",
"y_wind_10m": "y_wind",
"windspeed_10m": "wind_speed",
"integral_of_surface_downwelling_shortwave_flux_in_air_wrt_time": "radiation"}
self.var_units = {"air_temperature_2m": ['K'],
"relative_humidity_2m": ['1'],
"precipitation_amount_acc": ['kg/m^2'],
"precipitation_amount": ['kg/m^2'],
"x_wind_10m": ['m/s'],
"y_wind_10m": ['m/s'],
"windspeed_10m" : ['m/s'],
"integral_of_surface_downwelling_shortwave_flux_in_air_wrt_time": ['W s/m^2']}
self._shift_fields = ("precipitation_amount_acc","precipitation_amount",
"integral_of_surface_downwelling_shortwave_flux_in_air_wrt_time")
self.source_type_map = {"relative_humidity": api.RelHumSource,
"temperature": api.TemperatureSource,
"precipitation": api.PrecipitationSource,
"radiation": api.RadiationSource,
"wind_speed": api.WindSpeedSource}
self.series_type = {"relative_humidity": api.POINT_INSTANT_VALUE,
"temperature": api.POINT_INSTANT_VALUE,
"precipitation": api.POINT_AVERAGE_VALUE,
"radiation": api.POINT_AVERAGE_VALUE,
"wind_speed": api.POINT_INSTANT_VALUE}
if self.selection_criteria is not None: self._validate_selection_criteria()
def _validate_selection_criteria(self):
s_c = self.selection_criteria
if list(s_c)[0] == 'unique_id':
if not isinstance(s_c['unique_id'], list):
raise AromeConcatDataRepositoryError("Unique_id selection criteria should be a list.")
elif list(s_c)[0] == 'polygon':
if not isinstance(s_c['polygon'], (Polygon, MultiPolygon)):
raise AromeConcatDataRepositoryError("polygon selection criteria should be one of these shapley objects: (Polygon, MultiPolygon).")
elif list(s_c)[0] == 'bbox':
if not (isinstance(s_c['bbox'], tuple) and len(s_c['bbox']) == 2):
raise AromeConcatDataRepositoryError("bbox selection criteria should be a tuple with two numpy arrays.")
self._bounding_box = s_c['bbox']
else:
raise AromeConcatDataRepositoryError("Unrecognized selection criteria.")
def get_timeseries(self, input_source_types, utc_period, geo_location_criteria=None):
"""Get shyft source vectors of time series for input_source_types
Parameters
----------
input_source_types: list
List of source types to retrieve (precipitation, temperature..)
geo_location_criteria: object, optional
bbox og shapley polygon
utc_period: api.UtcPeriod
The utc time period that should (as a minimum) be covered.
Returns
-------
geo_loc_ts: dictionary
dictionary keyed by time series name, where values are api vectors of geo
located timeseries.
"""
with Dataset(self._filename) as dataset:
return self._get_data_from_dataset(dataset, input_source_types,
utc_period, geo_location_criteria, concat=True)
def get_forecasts(self, input_source_types, fc_selection_criteria, geo_location_criteria):
k, v = list(fc_selection_criteria.items())[0]
if k == 'forecasts_within_period':
if not isinstance(v, api.UtcPeriod):
raise AromeConcatDataRepositoryError("'forecasts_within_period' selection criteria should be of type api.UtcPeriod.")
elif k == 'latest_available_forecasts':
if not all([isinstance(v, dict), isinstance(v['number of forecasts'], int), isinstance(v['forecasts_older_than'], int)]):
raise AromeConcatDataRepositoryError("'latest_available_forecasts' selection criteria should be of type dict.")
elif k == 'forecasts_at_reference_times':
if not isinstance(v, list):
raise AromeConcatDataRepositoryError("'forecasts_at_reference_times' selection criteria should be of type list.")
else:
raise AromeConcatDataRepositoryError("Unrecognized forecast selection criteria.")
with Dataset(self._filename) as dataset:
return self._get_data_from_dataset(dataset, input_source_types,
v, geo_location_criteria, concat=False)
def get_forecast(self, input_source_types, utc_period, t_c, geo_location_criteria):
"""
Parameters:
see get_timeseries
semantics for utc_period: Get the forecast closest up to utc_period.start
"""
raise NotImplementedError("get_forecast")
def get_forecast_ensemble(self, input_source_types, utc_period,
t_c, geo_location_criteria=None):
raise NotImplementedError("get_forecast_ensemble")
def _convert_to_timeseries(self, data, concat):
"""Convert timeseries from numpy structures to shyft.api timeseries.
Returns
-------
timeseries: dict
Time series arrays keyed by type
"""
tsc = api.TsFactory().create_point_ts
time_series = {}
if concat:
for key, (data, ta) in data.items():
nb_timesteps, nb_pts = data.shape
def construct(d):
if ta.size() != d.size:
raise AromeConcatDataRepositoryError("Time axis size {} not equal to the number of "
"data points ({}) for {}"
"".format(ta.size(), d.size, key))
return tsc(ta.size(), ta.start, ta.delta_t,
api.DoubleVector.FromNdArray(d.flatten()), self.series_type[key])
time_series[key] = np.array([construct(data[:, j]) for j in range(nb_pts)])
else:
def construct(d, tax):
if tax.size() != d.size:
raise AromeConcatDataRepositoryError("Time axis size {} not equal to the number of "
"data points ({}) for {}"
"".format(tax.size(), d.size, key))
return tsc(tax.size(), tax.start, tax.delta_t,
api.DoubleVector.FromNdArray(d.flatten()), self.series_type[key])
for key, (data, ta) in data.items():
nb_forecasts, nb_timesteps, nb_pts = data.shape
time_series[key] = np.array([construct(data[i, :, j], ta[i]) for i in range(nb_forecasts) for j in range(nb_pts)])
return time_series
def _limit(self, x, y, data_cs, target_cs, ts_id):
"""
Parameters
----------
x: np.ndarray
X coordinates in meters in cartesian coordinate system
specified by data_cs
y: np.ndarray
Y coordinates in meters in cartesian coordinate system
specified by data_cs
data_cs: string
Proj4 string specifying the cartesian coordinate system
of x and y
target_cs: string
Proj4 string specifying the target coordinate system
Returns
-------
x: np.ndarray
Coordinates in target coordinate system
y: np.ndarray
Coordinates in target coordinate system
x_mask: np.ndarray
Boolean index array
y_mask: np.ndarray
Boolean index array
"""
# Get coordinate system for netcdf data
data_proj = pyproj.Proj(data_cs)
target_proj = pyproj.Proj(target_cs)
if(list(self.selection_criteria)[0]=='bbox'):
# Find bounding box in netcdf projection
bbox = np.array(self.selection_criteria['bbox'])
bbox[0][0] -= self.padding
bbox[0][1] += self.padding
bbox[0][2] += self.padding
bbox[0][3] -= self.padding
bbox[1][0] -= self.padding
bbox[1][1] -= self.padding
bbox[1][2] += self.padding
bbox[1][3] += self.padding
bb_proj = pyproj.transform(target_proj, data_proj, bbox[0], bbox[1])
x_min, x_max = min(bb_proj[0]), max(bb_proj[0])
y_min, y_max = min(bb_proj[1]), max(bb_proj[1])
# Limit data
xy_mask = ((x <= x_max) & (x >= x_min) & (y <= y_max) & (y >= y_min))
if (list(self.selection_criteria)[0] == 'polygon'):
poly = self.selection_criteria['polygon']
pts_in_file = MultiPoint(np.dstack((x, y)).reshape(-1, 2))
project = partial(pyproj.transform, target_proj, data_proj)
poly_prj = transform(project, poly)
p_poly = prep(poly_prj.buffer(self.padding))
xy_mask = np.array(list(map(p_poly.contains, pts_in_file)))
if (list(self.selection_criteria)[0] == 'unique_id'):
xy_mask = np.array([id in self.selection_criteria['unique_id'] for id in ts_id])
# Check if there is at least one point extaracted and raise error if there isn't
if not xy_mask.any():
raise AromeConcatDataRepositoryError("No points in dataset which satisfy selection criterion '{}'.".
format(list(self.selection_criteria)[0]))
xy_inds = np.nonzero(xy_mask)[0]
# Transform from source coordinates to target coordinates
xx, yy = pyproj.transform(data_proj, target_proj, x[xy_mask], y[xy_mask])
return xx, yy, xy_mask, slice(xy_inds[0], xy_inds[-1] + 1)
def _make_time_slice(self, time, lead_time, lead_times_in_sec, fc_selection_criteria_v, concat):
v = fc_selection_criteria_v
nb_extra_intervals = 0
if concat: # make continuous timeseries
self.fc_len_to_concat = self.nb_fc_interval_to_concat * self.fc_interval
utc_period = v # TODO: verify that fc_selection_criteria_v is of type api.UtcPeriod
time_after_drop = time + lead_times_in_sec[self.nb_fc_to_drop]
# idx_min = np.searchsorted(time, utc_period.start, side='left')
idx_min = np.argmin(time_after_drop <= utc_period.start) - 1 # raise error if result is -1
#idx_max = np.searchsorted(time, utc_period.end, side='right')
idx_max = np.argmax(time_after_drop >= utc_period.end) # raise error if result is 0
if idx_min<0:
first_lead_time_of_last_fc = int(time_after_drop[-1])
if first_lead_time_of_last_fc <= utc_period.start:
idx_min = len(time)-1
else:
raise AromeConcatDataRepositoryError(
"The earliest time in repository ({}) is later than the start of the period for which data is "
"requested ({})".format(UTC.to_string(int(time_after_drop[0])), UTC.to_string(utc_period.start)))
if idx_max == 0:
last_lead_time_of_last_fc = int(time[-1] + lead_times_in_sec[-1])
if last_lead_time_of_last_fc < utc_period.end:
raise AromeConcatDataRepositoryError(
"The latest time in repository ({}) is earlier than the end of the period for which data is "
"requested ({})".format(UTC.to_string(last_lead_time_of_last_fc), UTC.to_string(utc_period.end)))
else:
idx_max = len(time)-1
#issubset = True if idx_max < len(time) - 1 else False # For a concat repo 'issubset' is related to the lead_time axis and not the main time axis
issubset = True if self.nb_fc_to_drop + self.fc_len_to_concat < len(lead_time)-1 else False
time_slice = slice(idx_min, idx_max+1)
last_time = int(time[idx_max]+lead_times_in_sec[self.nb_fc_to_drop + self.fc_len_to_concat - 1])
if utc_period.end > last_time:
nb_extra_intervals = int(0.5+(utc_period.end-last_time)/(self.fc_len_to_concat*self.fc_time_res))
else:
#self.fc_len_to_concat = len(lead_time) # Take all lead_times for now
#self.nb_fc_to_drop = 0 # Take all lead_times for now
self.fc_len_to_concat = len(lead_time) - self.nb_fc_to_drop
if isinstance(v, api.UtcPeriod):
time_slice = ((time >= v.start)&(time <= v.end))
if not any(time_slice):
raise AromeConcatDataRepositoryError(
"No forecasts found with start time within period {}.".format(v.to_string()))
elif isinstance(v, list):
raise AromeConcatDataRepositoryError(
"'forecasts_at_reference_times' selection criteria not supported yet.")
elif isinstance(v, dict): # get the latest forecasts
t = v['forecasts_older_than']
n = v['number of forecasts']
idx = np.argmin(time <= t) - 1
if idx < 0:
first_lead_time_of_last_fc = int(time[-1])
if first_lead_time_of_last_fc <= t:
idx = len(time) - 1
else:
raise AromeConcatDataRepositoryError(
"The earliest time in repository ({}) is later than or at the start of the period for which data is "
"requested ({})".format(UTC.to_string(int(time[0])), UTC.to_string(t)))
if idx+1 < n:
raise AromeConcatDataRepositoryError(
"The number of forecasts available in repo ({}) and earlier than the parameter "
"'forecasts_older_than' ({}) is less than the number of forecasts requested ({})".format(
idx+1, UTC.to_string(t), n))
time_slice = slice(idx-n+1, idx+1)
issubset = False # Since we take all the lead_times for now
lead_time_slice = slice(self.nb_fc_to_drop, self.nb_fc_to_drop + self.fc_len_to_concat)
#For checking
# print('Time slice:', UTC.to_string(int(time[time_slice][0])), UTC.to_string(int(time[time_slice][-1])))
return time_slice, lead_time_slice, issubset, self.fc_len_to_concat, nb_extra_intervals
def _get_data_from_dataset(self, dataset, input_source_types, fc_selection_criteria_v,
geo_location_criteria, concat=True, ensemble_member=None):
ts_id = None
if geo_location_criteria is not None:
self.selection_criteria = geo_location_criteria
self._validate_selection_criteria()
if list(self.selection_criteria)[0]=='unique_id':
ts_id_key = [k for (k, v) in dataset.variables.items() if getattr(v, 'cf_role', None) == 'timeseries_id'][0]
ts_id = dataset.variables[ts_id_key][:]
if "wind_speed" in input_source_types and "x_wind_10m" in dataset.variables:
input_source_types = list(input_source_types) # We change input list, so take a copy
input_source_types.remove("wind_speed")
input_source_types.append("x_wind")
input_source_types.append("y_wind")
unit_ok = {k:dataset.variables[k].units in self.var_units[k]
for k in dataset.variables.keys() if self._arome_shyft_map.get(k, None) in input_source_types}
if not all(unit_ok.values()):
raise AromeConcatDataRepositoryError("The following variables have wrong unit: {}.".format(
', '.join([k for k, v in unit_ok.items() if not v])))
raw_data = {}
x = dataset.variables.get("x", None)
y = dataset.variables.get("y", None)
time = dataset.variables.get("time", None)
lead_time = dataset.variables.get("lead_time", None)
dim_nb_series = [dim.name for dim in dataset.dimensions.values() if dim.name not in ['time', 'lead_time']][0]
if not all([x, y, time, lead_time]):
raise AromeConcatDataRepositoryError("Something is wrong with the dataset."
" x/y coords or time not found.")
if not all([x, y, time]):
raise AromeConcatDataRepositoryError("Something is wrong with the dataset."
" x/y coords or time not found.")
if not all([var.units in ['km', 'm'] for var in [x, y]]) and x.units == y.units:
raise AromeConcatDataRepositoryError("The unit for x and y coordinates should be either m or km.")
coord_conv = 1.
if x.units == 'km':
coord_conv = 1000.
data_cs = dataset.variables.get("crs", None)
if data_cs is None:
raise AromeConcatDataRepositoryError("No coordinate system information in dataset.")
time = convert_netcdf_time(time.units,time)
lead_times_in_sec = lead_time[:]*3600.
self.fc_time_res = (lead_time[1]-lead_time[0])*3600. # in seconds
self.fc_interval = int((time[1]-time[0])/self.fc_time_res) # in-terms of self.fc_time_res
time_slice, lead_time_slice, issubset, self.fc_len_to_concat, nb_extra_intervals = \
self._make_time_slice(time, lead_time, lead_times_in_sec,fc_selection_criteria_v, concat)
time_ext = time[time_slice]
print('nb_extra_intervals:',nb_extra_intervals)
if nb_extra_intervals > 0:
time_extra = time_ext[-1]+np.arange(1, nb_extra_intervals+1)*self.fc_len_to_concat*self.fc_time_res
time_ext = np.concatenate((time_ext, time_extra))
# print('Extra time:', time_ext)
x, y, m_xy, xy_slice = self._limit(x[:]*coord_conv, y[:]*coord_conv, data_cs.proj4, self.shyft_cs, ts_id)
for k in dataset.variables.keys():
if self._arome_shyft_map.get(k, None) in input_source_types:
if k in self._shift_fields and issubset: # Add one to lead_time slice
data_lead_time_slice = slice(lead_time_slice.start, lead_time_slice.stop + 1)
else:
data_lead_time_slice = lead_time_slice
data = dataset.variables[k]
dims = data.dimensions
data_slice = len(data.dimensions)*[slice(None)]
if ensemble_member is not None:
data_slice[dims.index("ensemble_member")] = ensemble_member
data_slice[dims.index(dim_nb_series)] = xy_slice
data_slice[dims.index("lead_time")] = data_lead_time_slice
data_slice[dims.index("time")] = time_slice # data_time_slice
new_slice = [m_xy[xy_slice] if dim==dim_nb_series else slice(None) for dim in dims ]
print('Reading', k)
pure_arr = data[data_slice][new_slice]
print('Finished reading', k)
# To check equality of the two extraction methods
# data_slice[dims.index(dim_nb_series)] = m_xy
# print('Diff:', np.sum(data[data_slice]-pure_arr)) # This should be 0.0
if isinstance(pure_arr, np.ma.core.MaskedArray):
pure_arr = pure_arr.filled(np.nan)
if nb_extra_intervals > 0:
data_slice[dims.index("time")] = [time_slice.stop - 1]
data_slice[dims.index("lead_time")] = slice(data_lead_time_slice.stop,
data_lead_time_slice.stop + (nb_extra_intervals+1) * self.fc_len_to_concat)
data_extra = data[data_slice][new_slice].reshape(nb_extra_intervals+1, self.fc_len_to_concat, -1)
if k in self._shift_fields:
data_extra_ = np.zeros((nb_extra_intervals, self.fc_len_to_concat+1, len(x)), dtype=data_extra.dtype)
data_extra_[:, 0:-1, :] = data_extra[:-1, :, :]
data_extra_[:, -1, :] = data_extra[1:, -1, :]
data_extra = data_extra_
else:
data_extra = data_extra[:-1]
#print('Extra data shape:', data_extra.shape)
#print('Main data shape:', pure_arr.shape)
raw_data[self._arome_shyft_map[k]] = np.concatenate((pure_arr, data_extra)), k
else:
raw_data[self._arome_shyft_map[k]] = pure_arr, k
if 'z' in dataset.variables.keys():
data = dataset.variables['z']
dims = data.dimensions
data_slice = len(data.dimensions) * [slice(None)]
data_slice[dims.index(dim_nb_series)] = m_xy
z = data[data_slice]
else:
raise AromeConcatDataRepositoryError("No elevations found in dataset")
pts = np.dstack((x, y, z)).reshape(-1,3)
if not concat:
pts = np.tile(pts, (len(time[time_slice]), 1))
self.pts = pts
if set(("x_wind", "y_wind")).issubset(raw_data):
x_wind, _ = raw_data.pop("x_wind")
y_wind, _ = raw_data.pop("y_wind")
raw_data["wind_speed"] = np.sqrt(np.square(x_wind) + np.square(y_wind)), "windspeed_10m"
extracted_data = self._transform_raw(raw_data, time_ext, lead_times_in_sec[lead_time_slice], concat)
return self._geo_ts_to_vec(self._convert_to_timeseries(extracted_data, concat), pts)
def _transform_raw(self, data, time, lead_time, concat):
"""
We need full time if deaccumulating
"""
def concat_t(t):
t_stretch = np.ravel(np.repeat(t, self.fc_len_to_concat).reshape(len(t), self.fc_len_to_concat) + lead_time[
0:self.fc_len_to_concat])
return api.TimeAxisFixedDeltaT(int(t_stretch[0]), int(t_stretch[1]) - int(t_stretch[0]), len(t_stretch))
def forecast_t(t, daccumulated_var=False):
nb_ext_lead_times = self.fc_len_to_concat - 1 if daccumulated_var else self.fc_len_to_concat
t_all = np.repeat(t, nb_ext_lead_times).reshape(len(t), nb_ext_lead_times) + lead_time[0:nb_ext_lead_times]
dt = lead_time[1] - lead_time[0] # or self.fc_time_res
t_one_len = nb_ext_lead_times
return [api.TimeAxisFixedDeltaT(int(t_one[0]), int(dt), t_one_len) for t_one in t_all]
def concat_v(x):
return x.reshape(-1, x.shape[-1]) # shape = (nb_forecasts*nb_lead_times, nb_points)
def forecast_v(x):
return x # shape = (nb_forecasts, nb_lead_times, nb_points)
def air_temp_conv(T, fcn):
return fcn(T - 273.15)
def prec_conv(p, fcn):
return fcn(p[:, 1:, :])
def prec_acc_conv(p, fcn):
return fcn(np.clip(p[:, 1:, :] - p[:, :-1, :], 0.0, 1000.0))
def rad_conv(r, fcn):
dr = r[:, 1:, :] - r[:, :-1, :]
return fcn(np.clip(dr / (lead_time[1] - lead_time[0]), 0.0, 5000.0))
# Unit- and aggregation-dependent conversions go here
if concat:
convert_map = {"windspeed_10m": lambda x, t: (concat_v(x), concat_t(t)),
"relative_humidity_2m": lambda x, t: (concat_v(x), concat_t(t)),
"air_temperature_2m": lambda x, t: (air_temp_conv(x, concat_v), concat_t(t)),
"integral_of_surface_downwelling_shortwave_flux_in_air_wrt_time":
lambda x, t: (rad_conv(x, concat_v), concat_t(t)),
"precipitation_amount_acc": lambda x, t: (prec_acc_conv(x, concat_v), concat_t(t)),
"precipitation_amount": lambda x, t: (prec_conv(x, concat_v), concat_t(t))}
else:
convert_map = {"windspeed_10m": lambda x, t: (forecast_v(x), forecast_t(t)),
"relative_humidity_2m": lambda x, t: (forecast_v(x), forecast_t(t)),
"air_temperature_2m": lambda x, t: (air_temp_conv(x, forecast_v), forecast_t(t)),
"integral_of_surface_downwelling_shortwave_flux_in_air_wrt_time":
lambda x, t: (rad_conv(x, forecast_v), forecast_t(t, True)),
"precipitation_amount_acc": lambda x, t: (prec_acc_conv(x, forecast_v), forecast_t(t, True)),
"precipitation_amount": lambda x, t: (prec_conv(x, forecast_v), forecast_t(t, True))}
res = {}
for k, (v, ak) in data.items():
res[k] = convert_map[ak](v, time)
return res
def _geo_ts_to_vec(self, data, pts):
res = {}
for name, ts in data.items():
tpe = self.source_type_map[name]
tpe_v = tpe.vector_t()
for idx in np.ndindex(pts.shape[:-1]):
tpe_v.append(tpe(api.GeoPoint(*pts[idx]), ts[idx]))
res[name] = tpe_v
return res
|
felixmatt/shyft
|
shyft/repository/netcdf/arome_concat_data_repository.py
|
Python
|
lgpl-3.0
| 27,396
|
[
"NetCDF"
] |
52725118553d19b4bf1c76a927791f5b201bda4abc77754e6f61a316b469577c
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, date
from lxml import etree
import time
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.addons.resource.faces import task as Task
from openerp.osv import fields, osv
from openerp.tools import float_is_zero
from openerp.tools.translate import _
class project_task_type(osv.osv):
_name = 'project.task.type'
_description = 'Task Stage'
_order = 'sequence'
_columns = {
'name': fields.char('Stage Name', required=True, translate=True),
'description': fields.text('Description'),
'sequence': fields.integer('Sequence'),
'case_default': fields.boolean('Default for New Projects',
help="If you check this field, this stage will be proposed by default on each new project. It will not assign this stage to existing projects."),
'project_ids': fields.many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', 'Projects'),
'fold': fields.boolean('Folded in Kanban View',
help='This stage is folded in the kanban view when'
'there are no records in that stage to display.'),
}
def _get_default_project_ids(self, cr, uid, ctx={}):
project_id = self.pool['project.task']._get_default_project_id(cr, uid, context=ctx)
if project_id:
return [project_id]
return None
_defaults = {
'sequence': 1,
'project_ids': _get_default_project_ids,
}
_order = 'sequence'
class project(osv.osv):
_name = "project.project"
_description = "Project"
_inherits = {'account.analytic.account': "analytic_account_id",
"mail.alias": "alias_id"}
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _auto_init(self, cr, context=None):
""" Installation hook: aliases, project.project """
# create aliases for all projects and avoid constraint errors
alias_context = dict(context, alias_model_name='project.task')
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(project, self)._auto_init,
'project.task', self._columns['alias_id'], 'id', alias_prefix='project+', alias_defaults={'project_id':'id'}, context=alias_context)
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
if user == 1:
return super(project, self).search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
if context and context.get('user_preference'):
cr.execute("""SELECT project.id FROM project_project project
LEFT JOIN account_analytic_account account ON account.id = project.analytic_account_id
LEFT JOIN project_user_rel rel ON rel.project_id = project.id
WHERE (account.user_id = %s or rel.uid = %s)"""%(user, user))
return [(r[0]) for r in cr.fetchall()]
return super(project, self).search(cr, user, args, offset=offset, limit=limit, order=order,
context=context, count=count)
def onchange_partner_id(self, cr, uid, ids, part=False, context=None):
partner_obj = self.pool.get('res.partner')
val = {}
if not part:
return {'value': val}
if 'pricelist_id' in self.fields_get(cr, uid, context=context):
pricelist = partner_obj.read(cr, uid, part, ['property_product_pricelist'], context=context)
pricelist_id = pricelist.get('property_product_pricelist', False) and pricelist.get('property_product_pricelist')[0] or False
val['pricelist_id'] = pricelist_id
return {'value': val}
def _get_projects_from_tasks(self, cr, uid, task_ids, context=None):
tasks = self.pool.get('project.task').browse(cr, uid, task_ids, context=context)
project_ids = [task.project_id.id for task in tasks if task.project_id]
return self.pool.get('project.project')._get_project_and_parents(cr, uid, project_ids, context)
def _get_project_and_parents(self, cr, uid, ids, context=None):
""" return the project ids and all their parent projects """
res = set(ids)
while ids:
cr.execute("""
SELECT DISTINCT parent.id
FROM project_project project, project_project parent, account_analytic_account account
WHERE project.analytic_account_id = account.id
AND parent.analytic_account_id = account.parent_id
AND project.id IN %s
""", (tuple(ids),))
ids = [t[0] for t in cr.fetchall()]
res.update(ids)
return list(res)
def _get_project_and_children(self, cr, uid, ids, context=None):
""" retrieve all children projects of project ids;
return a dictionary mapping each project to its parent project (or None)
"""
res = dict.fromkeys(ids, None)
while ids:
cr.execute("""
SELECT project.id, parent.id
FROM project_project project, project_project parent, account_analytic_account account
WHERE project.analytic_account_id = account.id
AND parent.analytic_account_id = account.parent_id
AND parent.id IN %s
""", (tuple(ids),))
dic = dict(cr.fetchall())
res.update(dic)
ids = dic.keys()
return res
def _progress_rate(self, cr, uid, ids, names, arg, context=None):
child_parent = self._get_project_and_children(cr, uid, ids, context)
# compute planned_hours, total_hours, effective_hours specific to each project
cr.execute("""
SELECT project_id, COALESCE(SUM(planned_hours), 0.0),
COALESCE(SUM(total_hours), 0.0), COALESCE(SUM(effective_hours), 0.0)
FROM project_task
LEFT JOIN project_task_type ON project_task.stage_id = project_task_type.id
WHERE project_task.project_id IN %s AND project_task_type.fold = False
GROUP BY project_id
""", (tuple(child_parent.keys()),))
# aggregate results into res
res = dict([(id, {'planned_hours':0.0, 'total_hours':0.0, 'effective_hours':0.0}) for id in ids])
for id, planned, total, effective in cr.fetchall():
# add the values specific to id to all parent projects of id in the result
while id:
if id in ids:
res[id]['planned_hours'] += planned
res[id]['total_hours'] += total
res[id]['effective_hours'] += effective
id = child_parent[id]
# compute progress rates
for id in ids:
if res[id]['total_hours']:
res[id]['progress_rate'] = round(100.0 * res[id]['effective_hours'] / res[id]['total_hours'], 2)
else:
res[id]['progress_rate'] = 0.0
return res
def unlink(self, cr, uid, ids, context=None):
alias_ids = []
mail_alias = self.pool.get('mail.alias')
for proj in self.browse(cr, uid, ids, context=context):
if proj.tasks:
raise osv.except_osv(_('Invalid Action!'),
_('You cannot delete a project containing tasks. You can either delete all the project\'s tasks and then delete the project or simply deactivate the project.'))
elif proj.alias_id:
alias_ids.append(proj.alias_id.id)
res = super(project, self).unlink(cr, uid, ids, context=context)
mail_alias.unlink(cr, uid, alias_ids, context=context)
return res
def _get_attached_docs(self, cr, uid, ids, field_name, arg, context):
res = {}
attachment = self.pool.get('ir.attachment')
task = self.pool.get('project.task')
for id in ids:
project_attachments = attachment.search(cr, uid, [('res_model', '=', 'project.project'), ('res_id', '=', id)], context=context, count=True)
task_ids = task.search(cr, uid, [('project_id', '=', id)], context=context)
task_attachments = attachment.search(cr, uid, [('res_model', '=', 'project.task'), ('res_id', 'in', task_ids)], context=context, count=True)
res[id] = (project_attachments or 0) + (task_attachments or 0)
return res
def _task_count(self, cr, uid, ids, field_name, arg, context=None):
res={}
for tasks in self.browse(cr, uid, ids, dict(context, active_test=False)):
res[tasks.id] = len(tasks.task_ids)
return res
def _get_alias_models(self, cr, uid, context=None):
""" Overriden in project_issue to offer more options """
return [('project.task', "Tasks")]
def _get_visibility_selection(self, cr, uid, context=None):
""" Overriden in portal_project to offer more options """
return [('public', _('Public project')),
('employees', _('Internal project: all employees can access')),
('followers', _('Private project: followers Only'))]
def attachment_tree_view(self, cr, uid, ids, context):
task_ids = self.pool.get('project.task').search(cr, uid, [('project_id', 'in', ids)])
domain = [
'|',
'&', ('res_model', '=', 'project.project'), ('res_id', 'in', ids),
'&', ('res_model', '=', 'project.task'), ('res_id', 'in', task_ids)]
res_id = ids and ids[0] or False
return {
'name': _('Attachments'),
'domain': domain,
'res_model': 'ir.attachment',
'type': 'ir.actions.act_window',
'view_id': False,
'view_mode': 'kanban,tree,form',
'view_type': 'form',
'limit': 80,
'context': "{'default_res_model': '%s','default_res_id': %d}" % (self._name, res_id)
}
# Lambda indirection method to avoid passing a copy of the overridable method when declaring the field
_alias_models = lambda self, *args, **kwargs: self._get_alias_models(*args, **kwargs)
_visibility_selection = lambda self, *args, **kwargs: self._get_visibility_selection(*args, **kwargs)
_columns = {
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the project without removing it."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of Projects."),
'analytic_account_id': fields.many2one(
'account.analytic.account', 'Contract/Analytic',
help="Link this project to an analytic account if you need financial management on projects. "
"It enables you to connect projects with budgets, planning, cost and revenue analysis, timesheets on projects, etc.",
ondelete="cascade", required=True, auto_join=True),
'members': fields.many2many('res.users', 'project_user_rel', 'project_id', 'uid', 'Project Members',
help="Project's members are users who can have an access to the tasks related to this project.", states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'tasks': fields.one2many('project.task', 'project_id', "Task Activities"),
'planned_hours': fields.function(_progress_rate, multi="progress", string='Planned Time', help="Sum of planned hours of all tasks related to this project and its child projects.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'stage_id'], 20),
}),
'effective_hours': fields.function(_progress_rate, multi="progress", string='Time Spent', help="Sum of spent hours of all tasks related to this project and its child projects.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'stage_id'], 20),
}),
'total_hours': fields.function(_progress_rate, multi="progress", string='Total Time', help="Sum of total hours of all tasks related to this project and its child projects.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'stage_id'], 20),
}),
'progress_rate': fields.function(_progress_rate, multi="progress", string='Progress', type='float', group_operator="avg", help="Percent of tasks closed according to the total of tasks todo.",
store = {
'project.project': (_get_project_and_parents, ['tasks', 'parent_id', 'child_ids'], 10),
'project.task': (_get_projects_from_tasks, ['planned_hours', 'remaining_hours', 'work_ids', 'stage_id'], 20),
}),
'resource_calendar_id': fields.many2one('resource.calendar', 'Working Time', help="Timetable working hours to adjust the gantt diagram report", states={'close':[('readonly',True)]} ),
'type_ids': fields.many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id', 'Tasks Stages', states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'task_count': fields.function(_task_count, type='integer', string="Tasks",),
'task_ids': fields.one2many('project.task', 'project_id',
domain=[('stage_id.fold', '=', False)]),
'color': fields.integer('Color Index'),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="Internal email associated with this project. Incoming emails are automatically synchronized"
"with Tasks (or optionally Issues if the Issue Tracker module is installed)."),
'alias_model': fields.selection(_alias_models, "Alias Model", select=True, required=True,
help="The kind of document created when an email is received on this project's email alias"),
'privacy_visibility': fields.selection(_visibility_selection, 'Privacy / Visibility', required=True,
help="Holds visibility of the tasks or issues that belong to the current project:\n"
"- Public: everybody sees everything; if portal is activated, portal users\n"
" see all tasks or issues; if anonymous portal is activated, visitors\n"
" see all tasks or issues\n"
"- Portal (only available if Portal is installed): employees see everything;\n"
" if portal is activated, portal users see the tasks or issues followed by\n"
" them or by someone of their company\n"
"- Employees Only: employees see all tasks or issues\n"
"- Followers Only: employees see only the followed tasks or issues; if portal\n"
" is activated, portal users see the followed tasks or issues."),
'state': fields.selection([('template', 'Template'),
('draft','New'),
('open','In Progress'),
('cancelled', 'Cancelled'),
('pending','Pending'),
('close','Closed')],
'Status', required=True, copy=False),
'doc_count': fields.function(
_get_attached_docs, string="Number of documents attached", type='integer'
)
}
def _get_type_common(self, cr, uid, context):
ids = self.pool.get('project.task.type').search(cr, uid, [('case_default','=',1)], context=context)
return ids
_order = "sequence, id"
_defaults = {
'active': True,
'type': 'contract',
'state': 'open',
'sequence': 10,
'type_ids': _get_type_common,
'alias_model': 'project.task',
'privacy_visibility': 'employees',
}
# TODO: Why not using a SQL contraints ?
def _check_dates(self, cr, uid, ids, context=None):
for leave in self.read(cr, uid, ids, ['date_start', 'date'], context=context):
if leave['date_start'] and leave['date']:
if leave['date_start'] > leave['date']:
return False
return True
_constraints = [
(_check_dates, 'Error! project start-date must be lower than project end-date.', ['date_start', 'date'])
]
def set_template(self, cr, uid, ids, context=None):
return self.setActive(cr, uid, ids, value=False, context=context)
def set_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'close'}, context=context)
def set_cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'cancelled'}, context=context)
def set_pending(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'pending'}, context=context)
def set_open(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
def reset_project(self, cr, uid, ids, context=None):
return self.setActive(cr, uid, ids, value=True, context=context)
def map_tasks(self, cr, uid, old_project_id, new_project_id, context=None):
""" copy and map tasks from old to new project """
if context is None:
context = {}
map_task_id = {}
task_obj = self.pool.get('project.task')
proj = self.browse(cr, uid, old_project_id, context=context)
for task in proj.tasks:
# preserve task name and stage, normally altered during copy
defaults = {'stage_id': task.stage_id.id,
'name': task.name}
map_task_id[task.id] = task_obj.copy(cr, uid, task.id, defaults, context=context)
self.write(cr, uid, [new_project_id], {'tasks':[(6,0, map_task_id.values())]})
task_obj.duplicate_task(cr, uid, map_task_id, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
context = dict(context or {})
context['active_test'] = False
proj = self.browse(cr, uid, id, context=context)
if not default.get('name'):
default.update(name=_("%s (copy)") % (proj.name))
res = super(project, self).copy(cr, uid, id, default, context)
self.map_tasks(cr, uid, id, res, context=context)
return res
def duplicate_template(self, cr, uid, ids, context=None):
context = dict(context or {})
data_obj = self.pool.get('ir.model.data')
result = []
for proj in self.browse(cr, uid, ids, context=context):
parent_id = context.get('parent_id', False)
context.update({'analytic_project_copy': True})
new_date_start = time.strftime('%Y-%m-%d')
new_date_end = False
if proj.date_start and proj.date:
start_date = date(*time.strptime(proj.date_start,'%Y-%m-%d')[:3])
end_date = date(*time.strptime(proj.date,'%Y-%m-%d')[:3])
new_date_end = (datetime(*time.strptime(new_date_start,'%Y-%m-%d')[:3])+(end_date-start_date)).strftime('%Y-%m-%d')
context.update({'copy':True})
new_id = self.copy(cr, uid, proj.id, default = {
'name':_("%s (copy)") % (proj.name),
'state':'open',
'date_start':new_date_start,
'date':new_date_end,
'parent_id':parent_id}, context=context)
result.append(new_id)
child_ids = self.search(cr, uid, [('parent_id','=', proj.analytic_account_id.id)], context=context)
parent_id = self.read(cr, uid, new_id, ['analytic_account_id'])['analytic_account_id'][0]
if child_ids:
self.duplicate_template(cr, uid, child_ids, context={'parent_id': parent_id})
if result and len(result):
res_id = result[0]
form_view_id = data_obj._get_id(cr, uid, 'project', 'edit_project')
form_view = data_obj.read(cr, uid, form_view_id, ['res_id'])
tree_view_id = data_obj._get_id(cr, uid, 'project', 'view_project')
tree_view = data_obj.read(cr, uid, tree_view_id, ['res_id'])
search_view_id = data_obj._get_id(cr, uid, 'project', 'view_project_project_filter')
search_view = data_obj.read(cr, uid, search_view_id, ['res_id'])
return {
'name': _('Projects'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'project.project',
'view_id': False,
'res_id': res_id,
'views': [(form_view['res_id'],'form'),(tree_view['res_id'],'tree')],
'type': 'ir.actions.act_window',
'search_view_id': search_view['res_id'],
'nodestroy': True
}
# set active value for a project, its sub projects and its tasks
def setActive(self, cr, uid, ids, value=True, context=None):
task_obj = self.pool.get('project.task')
for proj in self.browse(cr, uid, ids, context=None):
self.write(cr, uid, [proj.id], {'state': value and 'open' or 'template'}, context)
cr.execute('select id from project_task where project_id=%s', (proj.id,))
tasks_id = [x[0] for x in cr.fetchall()]
if tasks_id:
task_obj.write(cr, uid, tasks_id, {'active': value}, context=context)
child_ids = self.search(cr, uid, [('parent_id','=', proj.analytic_account_id.id)])
if child_ids:
self.setActive(cr, uid, child_ids, value, context=None)
return True
def _schedule_header(self, cr, uid, ids, force_members=True, context=None):
context = context or {}
if type(ids) in (long, int,):
ids = [ids]
projects = self.browse(cr, uid, ids, context=context)
for project in projects:
if (not project.members) and force_members:
raise osv.except_osv(_('Warning!'),_("You must assign members on the project '%s'!") % (project.name,))
resource_pool = self.pool.get('resource.resource')
result = "from openerp.addons.resource.faces import *\n"
result += "import datetime\n"
for project in self.browse(cr, uid, ids, context=context):
u_ids = [i.id for i in project.members]
if project.user_id and (project.user_id.id not in u_ids):
u_ids.append(project.user_id.id)
for task in project.tasks:
if task.user_id and (task.user_id.id not in u_ids):
u_ids.append(task.user_id.id)
calendar_id = project.resource_calendar_id and project.resource_calendar_id.id or False
resource_objs = resource_pool.generate_resources(cr, uid, u_ids, calendar_id, context=context)
for key, vals in resource_objs.items():
result +='''
class User_%s(Resource):
efficiency = %s
''' % (key, vals.get('efficiency', False))
result += '''
def Project():
'''
return result
def _schedule_project(self, cr, uid, project, context=None):
resource_pool = self.pool.get('resource.resource')
calendar_id = project.resource_calendar_id and project.resource_calendar_id.id or False
working_days = resource_pool.compute_working_calendar(cr, uid, calendar_id, context=context)
# TODO: check if we need working_..., default values are ok.
puids = [x.id for x in project.members]
if project.user_id:
puids.append(project.user_id.id)
result = """
def Project_%d():
start = \'%s\'
working_days = %s
resource = %s
""" % (
project.id,
project.date_start or time.strftime('%Y-%m-%d'), working_days,
'|'.join(['User_'+str(x) for x in puids]) or 'None'
)
vacation = calendar_id and tuple(resource_pool.compute_vacation(cr, uid, calendar_id, context=context)) or False
if vacation:
result+= """
vacation = %s
""" % ( vacation, )
return result
#TODO: DO Resource allocation and compute availability
def compute_allocation(self, rc, uid, ids, start_date, end_date, context=None):
if context == None:
context = {}
allocation = {}
return allocation
def schedule_tasks(self, cr, uid, ids, context=None):
context = context or {}
if type(ids) in (long, int,):
ids = [ids]
projects = self.browse(cr, uid, ids, context=context)
result = self._schedule_header(cr, uid, ids, False, context=context)
for project in projects:
result += self._schedule_project(cr, uid, project, context=context)
result += self.pool.get('project.task')._generate_task(cr, uid, project.tasks, ident=4, context=context)
local_dict = {}
exec result in local_dict
projects_gantt = Task.BalancedProject(local_dict['Project'])
for project in projects:
project_gantt = getattr(projects_gantt, 'Project_%d' % (project.id,))
for task in project.tasks:
if task.stage_id and task.stage_id.fold:
continue
p = getattr(project_gantt, 'Task_%d' % (task.id,))
self.pool.get('project.task').write(cr, uid, [task.id], {
'date_start': p.start.strftime('%Y-%m-%d %H:%M:%S'),
'date_end': p.end.strftime('%Y-%m-%d %H:%M:%S')
}, context=context)
if (not task.user_id) and (p.booked_resource):
self.pool.get('project.task').write(cr, uid, [task.id], {
'user_id': int(p.booked_resource[0].name[5:]),
}, context=context)
return True
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
# Prevent double project creation when 'use_tasks' is checked + alias management
create_context = dict(context, project_creation_in_progress=True,
alias_model_name=vals.get('alias_model', 'project.task'),
alias_parent_model_name=self._name)
if vals.get('type', False) not in ('template', 'contract'):
vals['type'] = 'contract'
project_id = super(project, self).create(cr, uid, vals, context=create_context)
project_rec = self.browse(cr, uid, project_id, context=context)
self.pool.get('mail.alias').write(cr, uid, [project_rec.alias_id.id], {'alias_parent_thread_id': project_id, 'alias_defaults': {'project_id': project_id}}, context)
return project_id
def write(self, cr, uid, ids, vals, context=None):
# if alias_model has been changed, update alias_model_id accordingly
if vals.get('alias_model'):
model_ids = self.pool.get('ir.model').search(cr, uid, [('model', '=', vals.get('alias_model', 'project.task'))])
vals.update(alias_model_id=model_ids[0])
return super(project, self).write(cr, uid, ids, vals, context=context)
class task(osv.osv):
_name = "project.task"
_description = "Task"
_date_name = "date_start"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_mail_post_access = 'read'
_track = {
'stage_id': {
# this is only an heuristics; depending on your particular stage configuration it may not match all 'new' stages
'project.mt_task_new': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence <= 1,
'project.mt_task_stage': lambda self, cr, uid, obj, ctx=None: obj.stage_id.sequence > 1,
},
'user_id': {
'project.mt_task_assigned': lambda self, cr, uid, obj, ctx=None: obj.user_id and obj.user_id.id,
},
'kanban_state': {
'project.mt_task_blocked': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'blocked',
'project.mt_task_ready': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'done',
},
}
def _get_default_partner(self, cr, uid, context=None):
project_id = self._get_default_project_id(cr, uid, context)
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return project.partner_id.id
return False
def _get_default_project_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
return (self._resolve_project_id_from_context(cr, uid, context=context) or False)
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
project_id = self._get_default_project_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], project_id, [('fold', '=', False)], context=context)
def _resolve_project_id_from_context(self, cr, uid, context=None):
""" Returns ID of project based on the value of 'default_project_id'
context key, or None if it cannot be resolved to a single
project.
"""
if context is None:
context = {}
if type(context.get('default_project_id')) in (int, long):
return context['default_project_id']
if isinstance(context.get('default_project_id'), basestring):
project_name = context['default_project_id']
project_ids = self.pool.get('project.project').name_search(cr, uid, name=project_name, context=context)
if len(project_ids) == 1:
return project_ids[0][0]
return None
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
stage_obj = self.pool.get('project.task.type')
order = stage_obj._order
access_rights_uid = access_rights_uid or uid
if read_group_order == 'stage_id desc':
order = '%s desc' % order
search_domain = []
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
if project_id:
search_domain += ['|', ('project_ids', '=', project_id)]
search_domain += [('id', 'in', ids)]
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def _read_group_user_id(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
res_users = self.pool.get('res.users')
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
access_rights_uid = access_rights_uid or uid
if project_id:
ids += self.pool.get('project.project').read(cr, access_rights_uid, project_id, ['members'], context=context)['members']
order = res_users._order
# lame way to allow reverting search, should just work in the trivial case
if read_group_order == 'user_id desc':
order = '%s desc' % order
# de-duplicate and apply search order
ids = res_users._search(cr, uid, [('id','in',ids)], order=order, access_rights_uid=access_rights_uid, context=context)
result = res_users.name_get(cr, access_rights_uid, ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(ids.index(x[0]), ids.index(y[0])))
return result, {}
_group_by_full = {
'stage_id': _read_group_stage_ids,
'user_id': _read_group_user_id,
}
def _str_get(self, task, level=0, border='***', context=None):
return border+' '+(task.user_id and task.user_id.name.upper() or '')+(level and (': L'+str(level)) or '')+(' - %.1fh / %.1fh'%(task.effective_hours or 0.0,task.planned_hours))+' '+border+'\n'+ \
border[0]+' '+(task.name or '')+'\n'+ \
(task.description or '')+'\n\n'
# Compute: effective_hours, total_hours, progress
def _hours_get(self, cr, uid, ids, field_names, args, context=None):
res = {}
cr.execute("SELECT task_id, COALESCE(SUM(hours),0) FROM project_task_work WHERE task_id IN %s GROUP BY task_id",(tuple(ids),))
hours = dict(cr.fetchall())
for task in self.browse(cr, uid, ids, context=context):
res[task.id] = {'effective_hours': hours.get(task.id, 0.0), 'total_hours': (task.remaining_hours or 0.0) + hours.get(task.id, 0.0)}
res[task.id]['delay_hours'] = res[task.id]['total_hours'] - task.planned_hours
res[task.id]['progress'] = 0.0
if not float_is_zero(res[task.id]['total_hours'], precision_digits=2):
res[task.id]['progress'] = round(min(100.0 * hours.get(task.id, 0.0) / res[task.id]['total_hours'], 99.99),2)
if task.stage_id and task.stage_id.fold:
res[task.id]['progress'] = 100.0
return res
def onchange_remaining(self, cr, uid, ids, remaining=0.0, planned=0.0):
if remaining and not planned:
return {'value': {'planned_hours': remaining}}
return {}
def onchange_planned(self, cr, uid, ids, planned=0.0, effective=0.0):
return {'value': {'remaining_hours': planned - effective}}
def onchange_project(self, cr, uid, id, project_id, context=None):
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return {'value': {'partner_id': project.partner_id.id}}
return {}
def onchange_user_id(self, cr, uid, ids, user_id, context=None):
vals = {}
if user_id:
vals['date_start'] = fields.datetime.now()
return {'value': vals}
def onchange_date_deadline(
self, cr, uid, ids, date_end, date_deadline, context=None):
if not date_end or (date_end[:10] == self.browse(
cr, uid, ids, context=context).date_deadline):
return {'value': {'date_end': date_deadline}}
def duplicate_task(self, cr, uid, map_ids, context=None):
mapper = lambda t: map_ids.get(t.id, t.id)
for task in self.browse(cr, uid, map_ids.values(), context):
new_child_ids = set(map(mapper, task.child_ids))
new_parent_ids = set(map(mapper, task.parent_ids))
if new_child_ids or new_parent_ids:
task.write({'parent_ids': [(6,0,list(new_parent_ids))],
'child_ids': [(6,0,list(new_child_ids))]})
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
current = self.browse(cr, uid, id, context=context)
if not default.get('name'):
default['name'] = _("%s (copy)") % current.name
if 'remaining_hours' not in default:
default['remaining_hours'] = current.planned_hours
return super(task, self).copy_data(cr, uid, id, default, context)
def _is_template(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for task in self.browse(cr, uid, ids, context=context):
res[task.id] = True
if task.project_id:
if task.project_id.active == False or task.project_id.state == 'template':
res[task.id] = False
return res
def _get_task(self, cr, uid, ids, context=None):
result = {}
for work in self.pool.get('project.task.work').browse(cr, uid, ids, context=context):
if work.task_id: result[work.task_id.id] = True
return result.keys()
_columns = {
'active': fields.function(_is_template, store=True, string='Not a Template Task', type='boolean', help="This field is computed automatically and have the same behavior than the boolean 'active' field: if the task is linked to a template or unactivated project, it will be hidden unless specifically asked."),
'name': fields.char('Task Summary', track_visibility='onchange', size=128, required=True, select=True),
'description': fields.text('Description'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority', select=True),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of tasks."),
'stage_id': fields.many2one('project.task.type', 'Stage', track_visibility='onchange', select=True,
domain="[('project_ids', '=', project_id)]", copy=False),
'categ_ids': fields.many2many('project.category', string='Tags'),
'kanban_state': fields.selection([('normal', 'In Progress'),('blocked', 'Blocked'),('done', 'Ready for next stage')], 'Kanban State',
track_visibility='onchange',
help="A task's kanban state indicates special situations affecting it:\n"
" * Normal is the default situation\n"
" * Blocked indicates something is preventing the progress of this task\n"
" * Ready for next stage indicates the task is ready to be pulled to the next stage",
required=False, copy=False),
'create_date': fields.datetime('Create Date', readonly=True, select=True),
'write_date': fields.datetime('Last Modification Date', readonly=True, select=True), #not displayed in the view but it might be useful with base_action_rule module (and it needs to be defined first for that)
'date_start': fields.datetime('Starting Date', select=True, copy=False),
'date_end': fields.datetime('Ending Date', select=True, copy=False),
'date_deadline': fields.date('Deadline', select=True, copy=False),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True, copy=False),
'project_id': fields.many2one('project.project', 'Project', ondelete='set null', select=True, track_visibility='onchange', change_default=True),
'parent_ids': fields.many2many('project.task', 'project_task_parent_rel', 'task_id', 'parent_id', 'Parent Tasks'),
'child_ids': fields.many2many('project.task', 'project_task_parent_rel', 'parent_id', 'task_id', 'Delegated Tasks'),
'notes': fields.text('Notes'),
'planned_hours': fields.float('Initially Planned Hours', help='Estimated time to do the task, usually set by the project manager when the task is in draft state.'),
'effective_hours': fields.function(_hours_get, string='Hours Spent', multi='hours', help="Computed using the sum of the task work done.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'remaining_hours': fields.float('Remaining Hours', digits=(16,2), help="Total remaining time, can be re-estimated periodically by the assignee of the task."),
'total_hours': fields.function(_hours_get, string='Total', multi='hours', help="Computed as: Time Spent + Remaining Time.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'progress': fields.function(_hours_get, string='Working Time Progress (%)', multi='hours', group_operator="avg", help="If the task has a progress of 99.99% you should close the task if it's finished or reevaluate the time",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours', 'state', 'stage_id'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'delay_hours': fields.function(_hours_get, string='Delay Hours', multi='hours', help="Computed as difference between planned hours by the project manager and the total hours of the task.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['work_ids', 'remaining_hours', 'planned_hours'], 10),
'project.task.work': (_get_task, ['hours'], 10),
}),
'reviewer_id': fields.many2one('res.users', 'Reviewer', select=True, track_visibility='onchange'),
'user_id': fields.many2one('res.users', 'Assigned to', select=True, track_visibility='onchange'),
'delegated_user_id': fields.related('child_ids', 'user_id', type='many2one', relation='res.users', string='Delegated To'),
'partner_id': fields.many2one('res.partner', 'Customer'),
'work_ids': fields.one2many('project.task.work', 'task_id', 'Work done'),
'manager_id': fields.related('project_id', 'analytic_account_id', 'user_id', type='many2one', relation='res.users', string='Project Manager'),
'company_id': fields.many2one('res.company', 'Company'),
'id': fields.integer('ID', readonly=True),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
}
_defaults = {
'stage_id': _get_default_stage_id,
'project_id': _get_default_project_id,
'date_last_stage_update': fields.datetime.now,
'kanban_state': 'normal',
'priority': '0',
'progress': 0,
'sequence': 10,
'active': True,
'reviewer_id': lambda obj, cr, uid, ctx=None: uid,
'user_id': lambda obj, cr, uid, ctx=None: uid,
'company_id': lambda self, cr, uid, ctx=None: self.pool.get('res.company')._company_default_get(cr, uid, 'project.task', context=ctx),
'partner_id': lambda self, cr, uid, ctx=None: self._get_default_partner(cr, uid, context=ctx),
}
_order = "priority desc, sequence, date_start, name, id"
def _check_recursion(self, cr, uid, ids, context=None):
for id in ids:
visited_branch = set()
visited_node = set()
res = self._check_cycle(cr, uid, id, visited_branch, visited_node, context=context)
if not res:
return False
return True
def _check_cycle(self, cr, uid, id, visited_branch, visited_node, context=None):
if id in visited_branch: #Cycle
return False
if id in visited_node: #Already tested don't work one more time for nothing
return True
visited_branch.add(id)
visited_node.add(id)
#visit child using DFS
task = self.browse(cr, uid, id, context=context)
for child in task.child_ids:
res = self._check_cycle(cr, uid, child.id, visited_branch, visited_node, context=context)
if not res:
return False
visited_branch.remove(id)
return True
def _check_dates(self, cr, uid, ids, context=None):
if context == None:
context = {}
obj_task = self.browse(cr, uid, ids[0], context=context)
start = obj_task.date_start or False
end = obj_task.date_end or False
if start and end :
if start > end:
return False
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive tasks.', ['parent_ids']),
(_check_dates, 'Error ! Task end-date must be greater than task start-date', ['date_start','date_end'])
]
# Override view according to the company definition
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
users_obj = self.pool.get('res.users')
if context is None: context = {}
res = super(task, self).fields_view_get(cr, uid, view_id, view_type, context=context, toolbar=toolbar, submenu=submenu)
# read uom as admin to avoid access rights issues, e.g. for portal/share users,
# this should be safe (no context passed to avoid side-effects)
obj_tm = users_obj.browse(cr, SUPERUSER_ID, uid, context=context).company_id.project_time_mode_id
try:
# using get_object to get translation value
uom_hour = self.pool['ir.model.data'].get_object(cr, uid, 'product', 'product_uom_hour', context=context)
except ValueError:
uom_hour = False
if not obj_tm or not uom_hour or obj_tm.id == uom_hour.id:
return res
eview = etree.fromstring(res['arch'])
# if the project_time_mode_id is not in hours (so in days), display it as a float field
def _check_rec(eview):
if eview.attrib.get('widget','') == 'float_time':
eview.set('widget','float')
for child in eview:
_check_rec(child)
return True
_check_rec(eview)
res['arch'] = etree.tostring(eview)
# replace reference of 'Hours' to 'Day(s)'
for f in res['fields']:
# TODO this NOT work in different language than english
# the field 'Initially Planned Hours' should be replaced by 'Initially Planned Days'
# but string 'Initially Planned Days' is not available in translation
if 'Hours' in res['fields'][f]['string']:
res['fields'][f]['string'] = res['fields'][f]['string'].replace('Hours', obj_tm.name)
return res
def get_empty_list_help(self, cr, uid, help, context=None):
context = dict(context or {})
context['empty_list_help_id'] = context.get('default_project_id')
context['empty_list_help_model'] = 'project.project'
context['empty_list_help_document_name'] = _("tasks")
return super(task, self).get_empty_list_help(cr, uid, help, context=context)
# ----------------------------------------
# Case management
# ----------------------------------------
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- section_id: if set, stages must belong to this section or
be a default stage; if not set, stages must be default
stages
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for task in cases:
if task.project_id:
section_ids.append(task.project_id.id)
search_domain = []
if section_ids:
search_domain = [('|')] * (len(section_ids) - 1)
for section_id in section_ids:
search_domain.append(('project_ids', '=', section_id))
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('project.task.type').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def _check_child_task(self, cr, uid, ids, context=None):
if context == None:
context = {}
tasks = self.browse(cr, uid, ids, context=context)
for task in tasks:
if task.child_ids:
for child in task.child_ids:
if child.stage_id and not child.stage_id.fold:
raise osv.except_osv(_("Warning!"), _("Child task still open.\nPlease cancel or complete child task first."))
return True
def _delegate_task_attachments(self, cr, uid, task_id, delegated_task_id, context=None):
attachment = self.pool.get('ir.attachment')
attachment_ids = attachment.search(cr, uid, [('res_model', '=', self._name), ('res_id', '=', task_id)], context=context)
new_attachment_ids = []
for attachment_id in attachment_ids:
new_attachment_ids.append(attachment.copy(cr, uid, attachment_id, default={'res_id': delegated_task_id}, context=context))
return new_attachment_ids
def do_delegate(self, cr, uid, ids, delegate_data=None, context=None):
"""
Delegate Task to another users.
"""
if delegate_data is None:
delegate_data = {}
assert delegate_data['user_id'], _("Delegated User should be specified")
delegated_tasks = {}
for task in self.browse(cr, uid, ids, context=context):
delegated_task_id = self.copy(cr, uid, task.id, {
'name': delegate_data['name'],
'project_id': delegate_data['project_id'] and delegate_data['project_id'][0] or False,
'stage_id': delegate_data.get('stage_id') and delegate_data.get('stage_id')[0] or False,
'user_id': delegate_data['user_id'] and delegate_data['user_id'][0] or False,
'planned_hours': delegate_data['planned_hours'] or 0.0,
'parent_ids': [(6, 0, [task.id])],
'description': delegate_data['new_task_description'] or '',
'child_ids': [],
'work_ids': []
}, context=context)
self._delegate_task_attachments(cr, uid, task.id, delegated_task_id, context=context)
newname = delegate_data['prefix'] or ''
task.write({
'remaining_hours': delegate_data['planned_hours_me'],
'planned_hours': delegate_data['planned_hours_me'] + (task.effective_hours or 0.0),
'name': newname,
}, context=context)
delegated_tasks[task.id] = delegated_task_id
return delegated_tasks
def set_remaining_time(self, cr, uid, ids, remaining_time=1.0, context=None):
for task in self.browse(cr, uid, ids, context=context):
if (task.stage_id and task.stage_id.sequence <= 1) or (task.planned_hours == 0.0):
self.write(cr, uid, [task.id], {'planned_hours': remaining_time + task.effective_hours}, context=context)
self.write(cr, uid, ids, {'remaining_hours': remaining_time}, context=context)
return True
def set_remaining_time_1(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 1.0, context)
def set_remaining_time_2(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 2.0, context)
def set_remaining_time_5(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 5.0, context)
def set_remaining_time_10(self, cr, uid, ids, context=None):
return self.set_remaining_time(cr, uid, ids, 10.0, context)
def _store_history(self, cr, uid, ids, context=None):
for task in self.browse(cr, uid, ids, context=context):
self.pool.get('project.task.history').create(cr, uid, {
'task_id': task.id,
'remaining_hours': task.remaining_hours,
'planned_hours': task.planned_hours,
'kanban_state': task.kanban_state,
'type_id': task.stage_id.id,
'user_id': task.user_id.id
}, context=context)
return True
# ------------------------------------------------
# CRUD overrides
# ------------------------------------------------
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
# for default stage
if vals.get('project_id') and not context.get('default_project_id'):
context['default_project_id'] = vals.get('project_id')
# user_id change: update date_start
if vals.get('user_id') and not vals.get('date_start'):
vals['date_start'] = fields.datetime.now()
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
task_id = super(task, self).create(cr, uid, vals, context=create_context)
self._store_history(cr, uid, [task_id], context=context)
return task_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals['date_last_stage_update'] = fields.datetime.now()
# user_id change: update date_start
if vals.get('user_id') and 'date_start' not in vals:
vals['date_start'] = fields.datetime.now()
# Overridden to reset the kanban_state to normal whenever
# the stage (stage_id) of the task changes.
if vals and not 'kanban_state' in vals and 'stage_id' in vals:
new_stage = vals.get('stage_id')
vals_reset_kstate = dict(vals, kanban_state='normal')
for t in self.browse(cr, uid, ids, context=context):
write_vals = vals_reset_kstate if t.stage_id.id != new_stage else vals
super(task, self).write(cr, uid, [t.id], write_vals, context=context)
result = True
else:
result = super(task, self).write(cr, uid, ids, vals, context=context)
if any(item in vals for item in ['stage_id', 'remaining_hours', 'user_id', 'kanban_state']):
self._store_history(cr, uid, ids, context=context)
return result
def unlink(self, cr, uid, ids, context=None):
if context == None:
context = {}
self._check_child_task(cr, uid, ids, context=context)
res = super(task, self).unlink(cr, uid, ids, context)
return res
def _generate_task(self, cr, uid, tasks, ident=4, context=None):
context = context or {}
result = ""
ident = ' '*ident
company = self.pool["res.users"].browse(cr, uid, uid, context=context).company_id
duration_uom = {
'day(s)': 'd', 'days': 'd', 'day': 'd', 'd': 'd',
'month(s)': 'm', 'months': 'm', 'month': 'month', 'm': 'm',
'week(s)': 'w', 'weeks': 'w', 'week': 'w', 'w': 'w',
'hour(s)': 'H', 'hours': 'H', 'hour': 'H', 'h': 'H',
}.get(company.project_time_mode_id.name.lower(), "hour(s)")
for task in tasks:
if task.stage_id and task.stage_id.fold:
continue
result += '''
%sdef Task_%s():
%s todo = \"%.2f%s\"
%s effort = \"%.2f%s\"''' % (ident, task.id, ident, task.remaining_hours, duration_uom, ident, task.total_hours, duration_uom)
start = []
for t2 in task.parent_ids:
start.append("up.Task_%s.end" % (t2.id,))
if start:
result += '''
%s start = max(%s)
''' % (ident,','.join(start))
if task.user_id:
result += '''
%s resource = %s
''' % (ident, 'User_'+str(task.user_id.id))
result += "\n"
return result
# ---------------------------------------------------
# Mail gateway
# ---------------------------------------------------
def _message_get_auto_subscribe_fields(self, cr, uid, updated_fields, auto_follow_fields=None, context=None):
if auto_follow_fields is None:
auto_follow_fields = ['user_id', 'reviewer_id']
return super(task, self)._message_get_auto_subscribe_fields(cr, uid, updated_fields, auto_follow_fields, context=context)
def message_get_reply_to(self, cr, uid, ids, context=None):
""" Override to get the reply_to of the parent project. """
tasks = self.browse(cr, SUPERUSER_ID, ids, context=context)
project_ids = set([task.project_id.id for task in tasks if task.project_id])
aliases = self.pool['project.project'].message_get_reply_to(cr, uid, list(project_ids), context=context)
return dict((task.id, aliases.get(task.project_id and task.project_id.id or 0, False)) for task in tasks)
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Override to updates the document according to the email. """
if custom_values is None:
custom_values = {}
defaults = {
'name': msg.get('subject'),
'planned_hours': 0.0,
}
defaults.update(custom_values)
return super(task, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
def message_update(self, cr, uid, ids, msg, update_vals=None, context=None):
""" Override to update the task according to the email. """
if update_vals is None:
update_vals = {}
maps = {
'cost': 'planned_hours',
}
for line in msg['body'].split('\n'):
line = line.strip()
res = tools.command_re.match(line)
if res:
match = res.group(1).lower()
field = maps.get(match)
if field:
try:
update_vals[field] = float(res.group(2).lower())
except (ValueError, TypeError):
pass
return super(task, self).message_update(cr, uid, ids, msg, update_vals=update_vals, context=context)
class project_work(osv.osv):
_name = "project.task.work"
_description = "Project Task Work"
_columns = {
'name': fields.char('Work summary'),
'date': fields.datetime('Date', select="1"),
'task_id': fields.many2one('project.task', 'Task', ondelete='cascade', required=True, select="1"),
'hours': fields.float('Time Spent'),
'user_id': fields.many2one('res.users', 'Done by', required=True, select="1"),
'company_id': fields.related('task_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
_defaults = {
'user_id': lambda obj, cr, uid, context: uid,
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S')
}
_order = "date desc"
def create(self, cr, uid, vals, context=None):
if 'hours' in vals and (not vals['hours']):
vals['hours'] = 0.00
if 'task_id' in vals:
cr.execute('update project_task set remaining_hours=remaining_hours - %s where id=%s', (vals.get('hours',0.0), vals['task_id']))
self.pool.get('project.task').invalidate_cache(cr, uid, ['remaining_hours'], [vals['task_id']], context=context)
return super(project_work,self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'hours' in vals and (not vals['hours']):
vals['hours'] = 0.00
if 'hours' in vals:
task_obj = self.pool.get('project.task')
for work in self.browse(cr, uid, ids, context=context):
cr.execute('update project_task set remaining_hours=remaining_hours - %s + (%s) where id=%s', (vals.get('hours',0.0), work.hours, work.task_id.id))
task_obj.invalidate_cache(cr, uid, ['remaining_hours'], [work.task_id.id], context=context)
return super(project_work,self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, context=None):
task_obj = self.pool.get('project.task')
for work in self.browse(cr, uid, ids):
cr.execute('update project_task set remaining_hours=remaining_hours + %s where id=%s', (work.hours, work.task_id.id))
task_obj.invalidate_cache(cr, uid, ['remaining_hours'], [work.task_id.id], context=context)
return super(project_work,self).unlink(cr, uid, ids, context=context)
class account_analytic_account(osv.osv):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
_columns = {
'use_tasks': fields.boolean('Tasks',help="If checked, this contract will be available in the project menu and you will be able to manage tasks or track issues"),
'company_uom_id': fields.related('company_id', 'project_time_mode_id', type='many2one', relation='product.uom'),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['use_tasks'] = template.use_tasks
return res
def _trigger_project_creation(self, cr, uid, vals, context=None):
'''
This function is used to decide if a project needs to be automatically created or not when an analytic account is created. It returns True if it needs to be so, False otherwise.
'''
if context is None: context = {}
return vals.get('use_tasks') and not 'project_creation_in_progress' in context
def project_create(self, cr, uid, analytic_account_id, vals, context=None):
'''
This function is called at the time of analytic account creation and is used to create a project automatically linked to it if the conditions are meet.
'''
project_pool = self.pool.get('project.project')
project_id = project_pool.search(cr, uid, [('analytic_account_id','=', analytic_account_id)])
if not project_id and self._trigger_project_creation(cr, uid, vals, context=context):
project_values = {
'name': vals.get('name'),
'analytic_account_id': analytic_account_id,
'type': vals.get('type','contract'),
}
return project_pool.create(cr, uid, project_values, context=context)
return False
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('child_ids', False) and context.get('analytic_project_copy', False):
vals['child_ids'] = []
analytic_account_id = super(account_analytic_account, self).create(cr, uid, vals, context=context)
self.project_create(cr, uid, analytic_account_id, vals, context=context)
return analytic_account_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
vals_for_project = vals.copy()
for account in self.browse(cr, uid, ids, context=context):
if not vals.get('name'):
vals_for_project['name'] = account.name
if not vals.get('type'):
vals_for_project['type'] = account.type
self.project_create(cr, uid, account.id, vals_for_project, context=context)
return super(account_analytic_account, self).write(cr, uid, ids, vals, context=context)
def unlink(self, cr, uid, ids, *args, **kwargs):
project_obj = self.pool.get('project.project')
analytic_ids = project_obj.search(cr, uid, [('analytic_account_id','in',ids)])
if analytic_ids:
raise osv.except_osv(_('Warning!'), _('Please delete the project linked with this account first.'))
return super(account_analytic_account, self).unlink(cr, uid, ids, *args, **kwargs)
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if context is None:
context={}
if context.get('current_model') == 'project.project':
project_ids = self.search(cr, uid, args + [('name', operator, name)], limit=limit, context=context)
return self.name_get(cr, uid, project_ids, context=context)
return super(account_analytic_account, self).name_search(cr, uid, name, args=args, operator=operator, context=context, limit=limit)
class project_project(osv.osv):
_inherit = 'project.project'
_defaults = {
'use_tasks': True
}
class project_task_history(osv.osv):
"""
Tasks History, used for cumulative flow charts (Lean/Agile)
"""
_name = 'project.task.history'
_description = 'History of Tasks'
_rec_name = 'task_id'
_log_access = False
def _get_date(self, cr, uid, ids, name, arg, context=None):
result = {}
for history in self.browse(cr, uid, ids, context=context):
if history.type_id and history.type_id.fold:
result[history.id] = history.date
continue
cr.execute('''select
date
from
project_task_history
where
task_id=%s and
id>%s
order by id limit 1''', (history.task_id.id, history.id))
res = cr.fetchone()
result[history.id] = res and res[0] or False
return result
def _get_related_date(self, cr, uid, ids, context=None):
result = []
for history in self.browse(cr, uid, ids, context=context):
cr.execute('''select
id
from
project_task_history
where
task_id=%s and
id<%s
order by id desc limit 1''', (history.task_id.id, history.id))
res = cr.fetchone()
if res:
result.append(res[0])
return result
_columns = {
'task_id': fields.many2one('project.task', 'Task', ondelete='cascade', required=True, select=True),
'type_id': fields.many2one('project.task.type', 'Stage'),
'kanban_state': fields.selection([('normal', 'Normal'), ('blocked', 'Blocked'), ('done', 'Ready for next stage')], 'Kanban State', required=False),
'date': fields.date('Date', select=True),
'end_date': fields.function(_get_date, string='End Date', type="date", store={
'project.task.history': (_get_related_date, None, 20)
}),
'remaining_hours': fields.float('Remaining Time', digits=(16, 2)),
'planned_hours': fields.float('Planned Time', digits=(16, 2)),
'user_id': fields.many2one('res.users', 'Responsible'),
}
_defaults = {
'date': fields.date.context_today,
}
class project_task_history_cumulative(osv.osv):
_name = 'project.task.history.cumulative'
_table = 'project_task_history_cumulative'
_inherit = 'project.task.history'
_auto = False
_columns = {
'end_date': fields.date('End Date'),
'nbr_tasks': fields.integer('# of Tasks', readonly=True),
'project_id': fields.many2one('project.project', 'Project'),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'project_task_history_cumulative')
cr.execute(""" CREATE VIEW project_task_history_cumulative AS (
SELECT
history.date::varchar||'-'||history.history_id::varchar AS id,
history.date AS end_date,
*
FROM (
SELECT
h.id AS history_id,
h.date+generate_series(0, CAST((coalesce(h.end_date, DATE 'tomorrow')::date - h.date) AS integer)-1) AS date,
h.task_id, h.type_id, h.user_id, h.kanban_state,
count(h.task_id) as nbr_tasks,
greatest(h.remaining_hours, 1) AS remaining_hours, greatest(h.planned_hours, 1) AS planned_hours,
t.project_id
FROM
project_task_history AS h
JOIN project_task AS t ON (h.task_id = t.id)
GROUP BY
h.id,
h.task_id,
t.project_id
) AS history
)
""")
class project_category(osv.osv):
""" Category of project's task (or issue) """
_name = "project.category"
_description = "Category of project's task, issue, ..."
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
factorlibre/OCB
|
addons/project/project.py
|
Python
|
agpl-3.0
| 70,511
|
[
"VisIt"
] |
a7c632d17447a2b573e458db1e7dec1aea7a1abe9be4e67a71640e91fc8f0ed0
|
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import espressomd
from espressomd import thermostat
from espressomd import integrate
from espressomd import electrostatics
import numpy
# System parameters
#############################################################
box_l = 10.7437
density = 0.7
# Interaction parameters (repulsive Lennard Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 1.12246
lj_cap = 20
# Integration parameters
#############################################################
system = espressomd.System()
system.time_step = 0.01
system.skin = 0.4
system.box_l = [box_l, box_l, box_l]
thermostat.Thermostat().set_langevin(1.0, 1.0)
# warmup integration (with capped LJ potential)
warm_steps = 100
warm_n_times = 30
# do the warmup until the particles have at least the distance min__dist
min_dist = 0.9
# integration
int_steps = 1000
int_n_times = 10
# Non-Bonded Interaction setup
#############################################################
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.non_bonded_inter.set_force_cap(lj_cap)
# Particle setup
#############################################################
volume = box_l * box_l * box_l
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=numpy.random.random(3) * system.box_l)
# Assingn charge to particles
for i in range(n_part / 2 - 1):
system.part[2 * i].q = -1.0
system.part[2 * i + 1].q = 1.0
# Warmup
#############################################################
lj_cap = 20
system.non_bonded_inter.set_force_cap(lj_cap)
i = 0
act_min_dist = system.analysis.mindist()
while (i < warm_n_times and act_min_dist < min_dist):
integrate.integrate(warm_steps)
# Warmup criterion
act_min_dist = system.analysis.mindist()
i += 1
lj_cap = lj_cap + 10
system.non_bonded_inter.set_force_cap(lj_cap)
lj_cap = 0
system.non_bonded_inter.set_force_cap(lj_cap)
# P3M setup after charge assigned
#############################################################
p3m = electrostatics.P3M(bjerrum_length=1.0, accuracy=1e-2)
system.actors.add(p3m)
#############################################################
# Integration #
#############################################################
for i in range(0, int_n_times):
integrate.integrate(int_steps)
energies = system.analysis.energy()
print energies
|
tbereau/espresso
|
samples/python/minimal-charged-particles.py
|
Python
|
gpl-3.0
| 3,219
|
[
"ESPResSo"
] |
cbc6dd00cfb2b3b317ca367e5b35d32e5187cf150b99e1872d49df52a5baeb5f
|
"""
scikit-learn style implementation of Relevance Vector Machine
based regression plus helper functions and example.
Eric Schmidt
e.schmidt@cantab.net
2017-10-12
"""
from __future__ import print_function
from sklearn import linear_model, utils, preprocessing
import sklearn
import numpy as np
from scipy import stats, misc, linalg
import time
import matplotlib.pylab as plt
from math import log
def fun_wrapper(fun, k, k_der=0, dx=1.):
def _fun_wrapped(x):
return misc.derivative(fun, x*k, dx=dx, n=k_der)
return _fun_wrapped
def dis_wrapper(dis,dx=1.,k_der=1):
def _dis_wrapped(x):
return misc.derivative(dis.pdf,x,dx=dx,n=k_der)
return _dis_wrapped
def cheb_wrapper(i,k):
# i = the non-zero coefficient
# k = the number of coefficients (incl. the bias)
vec = np.zeros(k)
vec[i] = 1
def _cheb_wrapped(x):
return np.polynomial.chebyshev.chebval(x,vec)
return _cheb_wrapped
class GaussianFeatures(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
"""Generate Gaussian features.
Generate a design matrix of k Gaussians starting at mu0, separated
by dmu all with the same scale.
Parameters
----------
k : int, optional, default 10
The number of Gaussian.
mu0 : float, optional, default 0
The starting point for placing the first Gaussian.
dmu : float, optional, default 1
The increment to use separating the Gaussians.
scale : float, optional, default 1
The scale of all Gaussians.
include_bias : boolean, optional, default True
The design matrix includes a bias column if True.
Example
--------
>>> x = np.linspace(-np.pi,np.pi,100)
>>> trafo = GaussianFeatures(k=30,mu0=-3,dmu=.2)
>>> X = trafo.fit_transform(x.reshape((-1,1)))
"""
def __init__(self,k=10,mu0=0,dmu=1.,scale=1.,include_bias=True,k_der=0):
self.k = k
self.mu0 = mu0
self.dmu = dmu
self.scale = scale
self.include_bias = include_bias
self.k_der = k_der
@staticmethod
def _basis_functions(n_features, k, include_bias=True, mu0=0., dmu=.5, scale=1.,
k_der=0):
"""Generates a np.ndarray of Gaussian basis functions.
Parameters
----------
n_features : int
number of features for each observation
k : int
number of basis functions
include_bias : boolean, optional, default True
whether or not to include a bias function (function that returns 1)
mu0 : float, optional, default 0
position of the first Gaussian
dmu : float, optional, default .5
increment to shift the Gaussians by
scale : float, optional, default 1
scale of all Gaussians
k_der : int, optional, default 0
kth Derivative of the basis functions.
Returns
-------
basis : np.ndarray of callables of shape (k(+1),)
"""
if k_der == 0:
bias = np.array([lambda x: np.ones(x.shape[0])])
else: # the bias is a constant for X_0 and therefore 0 for X_1
bias = np.array([lambda x: np.zeros(x.shape[0])])
G = np.array([dis_wrapper(stats.norm(loc=mu0+_k*dmu,scale=scale),k_der=k_der) for _k in range(k)])
if include_bias:
basis = np.concatenate((bias,G))
else:
basis = G
return basis
def fit(self,X,y=None):
"""Compute number of output features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
Returns
-------
self : instance
"""
n_samples, n_features = utils.check_array(X).shape
self.n_input_features_ = n_features
self.n_output_features_ = len(self._basis_functions(n_features,self.k,
self.include_bias, self.mu0, self.dmu, self.scale, k_der=self.k_der))
return self
def transform(self,X):
"""Applies the basis functions.
Parameters
----------
X : np.ndarray of shape (n_samples, n_input_features)
Returns
-------
XP : np.ndarray of shape (n_samples, n_output_features)
The design matrix.
Note
----
Requires prior execution of self.fit.
"""
sklearn.utils.validation.check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = sklearn.utils.validation.check_array(X, dtype=sklearn.utils.validation.FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
basis = self._basis_functions(self.n_input_features_, self.k, self.include_bias,
self.mu0, self.dmu, self.scale, k_der=self.k_der)
for i,b in enumerate(basis):
XP[:,i] = b(X).ravel()
return XP
def fit_transform(self,X):
"""Calls fit and transform on X.
"""
self.fit(X)
return self.transform(X)
class FourierFeatures(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
"""Creates the design matrix X from x using the Fourier basis set.
Parameters
----------
k : int, optional, default 10
number of basis functions for both sine and cosine, plus the possible bias
include_bias : boolean, optional, default True
whether or not to include a bias function (function that returns 1)
k_der : int, optional, default 0
kth Derivative of the basis functions.
Example
-------
>>> x = np.linspace(-np.pi,np.pi,100)
>>> trafo = FourierFeatures(k=10)
>>> X = trafo.fit_transform(x.reshape((-1,1)))
"""
def __init__(self, k=10, include_bias=True, k_der=0):
self.k = k
self.k_der = k_der
self.include_bias = include_bias
@staticmethod
def _basis_functions(n_features, k, include_bias, k_der=0):
"""Generates a np.ndarray of sine and cosine basis functions.
Parameters
----------
n_features : int
number of features for each observation
k : int
number of basis functions for each sine and cosine
include_bias : boolean, optional, default True
whether or not to include a bias function (function that returns 1)
Returns
-------
basis : np.ndarray of callables of shape (2*k(+1),)
"""
bias = np.array([lambda x: np.ones(x.shape[0])]) if k_der==0 \
else np.array([lambda x: np.zeros(x.shape[0])])
sin = np.array([fun_wrapper(np.sin,_k, k_der=k_der) for _k in range(1,k)])
cos = np.array([fun_wrapper(np.cos,_k, k_der=k_der) for _k in range(1,k)])
if include_bias:
basis = np.concatenate((bias,sin,cos))
else:
basis = np.concatenate((sin,cos))
return basis
def fit(self,X,y=None):
"""
Compute number of output features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
Returns
-------
self : instance
"""
n_samples, n_features = utils.check_array(X).shape
self.n_input_features_ = n_features
self.n_output_features_ = len(self._basis_functions(n_features,self.k,self.include_bias))
return self
def transform(self,X):
"""Applies the basis functions.
Parameters
----------
X : np.ndarray of shape (n_samples, n_input_features)
Returns
-------
XP : np.ndarray of shape (n_samples, n_output_features)
The design matrix.
Note
----
Requires prior execution of self.fit.
"""
sklearn.utils.validation.check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = sklearn.utils.validation.check_array(X, dtype=sklearn.utils.validation.FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
basis = self._basis_functions(self.n_input_features_, self.k, self.include_bias,\
k_der=self.k_der)
for i,b in enumerate(basis):
XP[:,i] = b(X).ravel()
return XP
def fit_transform(self,X):
"""Calls fit and transform on X.
"""
self.fit(X)
return self.transform(X)
class ChebyshevFeatures(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
"""Creates the design matrix X from x using Chebyshev polynomials.
Parameters
----------
k : int, optional, default 10
number of basis functions , plus the possible bias
include_bias : boolean, optional, default True
whether or not to include a bias function (function that returns 1)
Example
-------
>>> x = np.linspace(-np.pi,np.pi,100)
>>> trafo = ChebyshevFeatures(k=10)
>>> X = trafo.fit_transform(x.reshape((-1,1)))
"""
def __init__(self,k=10,include_bias=True):
self.k = k
self.include_bias = include_bias
@staticmethod
def _basis_functions(n_features, k, include_bias):
"""Generates a np.ndarray of Chebyshev polynomials.
Parameters
----------
n_features : int
number of features for each observation
k : int
number of basis functionse
include_bias : boolean, optional, default True
whether or not to include a bias function (function that returns 1)
Returns
-------
basis : np.ndarray of callables of shape (k(+1),)
"""
bias = np.array([lambda x: np.ones(x.shape[0])])
T = np.array([cheb_wrapper(_k,k) for _k in range(1,k)])
if include_bias:
basis = np.concatenate((bias,T))
else:
basis = T
return basis
def fit(self,X,y=None):
"""
Compute number of output features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
Returns
-------
self : instance
"""
n_samples, n_features = utils.check_array(X).shape
self.n_input_features_ = n_features
self.n_output_features_ = len(self._basis_functions(n_features,self.k,self.include_bias))
return self
def transform(self,X):
"""Applies the basis functions.
Parameters
----------
X : np.ndarray of shape (n_samples, n_input_features)
Returns
-------
XP : np.ndarray of shape (n_samples, n_output_features)
The design matrix.
Note
----
Requires prior execution of self.fit.
"""
sklearn.utils.validation.check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = sklearn.utils.validation.check_array(X, dtype=sklearn.utils.validation.FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
basis = self._basis_functions(self.n_input_features_,self.k,self.include_bias)
for i,b in enumerate(basis):
XP[:,i] = b(X).ravel()
return XP
def fit_transform(self,X):
"""Calls fit and transform on X.
"""
self.fit(X)
return self.transform(X)
def full_weight_vector(w, active, inactive):
"""Returns a zero-padded weights vector for RVM weights.
Parameters
----------
w : float np.ndarray of shape [n_active]
Weights vector obtained with an RVM containing only non-zero values.
active : int np.ndarray of shape [n_active]
Index vector indicating the positions of the 'w' values in the
full weights vector.
inactive : int np.ndarray of shape [n_features - n_active]
Index vector indicating the positions of 0s in the full weights
vector.
Returns
-------
w_full : float np.ndarray of shape [n_features]
Full weights vector.
"""
w_full = np.zeros(len(active)+len(inactive))
w_full[active] = w
return w_full
class RelevanceVectorMachine(linear_model.base.LinearModel,sklearn.base.RegressorMixin):
"""Relevance vector machine regression.
Fits the weights of a linear model. The weights of the model are assumed to
be normally distributed. RVMs also estimate the parameters alpha (precisions
of the distributions of the weights) and beta (precision of the distribution
of the noise) using type-II maximum likelihood or evidence maximization pruning
weights, thus leading to sparse weights vectors.
The algorithm is implemented as described by Faul and Tipping, 2003, AISTAT,
https://pdfs.semanticscholar.org/11f4/d997de8e35a1daf8b115439345d9994cfb69.pdf.
Parameters
----------
n_iter : int
maximum number of iterations
tol : float, optional, default 1.e-3
weights convergence tolerance threshold
compute_score : boolean, optional, default True
whether or not to compute mse and estimate and standard
deviation of the deviation
fit_itnercept : boolean, optional, default True
whether or not to fit the intercept
normalize : boolean, optional, default False
copy_X : boolean, optional, default True
verbose : boolean, optional, default False
init_beta : float or callable, optional, default None
if float needs to be bigger than 0
elif callable then the function needs to return a single value
init_alphas : np.ndarray list or tuple of float or callable, optional, default None
same as for init_beta but for an vector of values
do_logbook : boolean
Wether or not to keep the logbook during regression.
Format logbook = {"L":[],"alphas":[],"beta":[],"weights":[],"weights_full":[],"mse":[],"tse":[],"min":[],"max":[],"Sigma":[],
"dev_est":[],"dev_std":[],"median_se":[]}
Note that if do-logbook is True then the weights and hyperparameters with the smallest
mse will be stored for future predictions. This is because under some circumstances
the convergence may fluctuate strongly.
beta_every : int, optional, default 1
The noise precision is update every 'beta_every' iterations.
update_pct : float, optional, default 1
The percentage of alphas to be updated every iteration. This can be useful to prevent
RVMs removing all weights in one iteration.
Attributes
----------
beta_ : float
noise precision
alphas_ : np.ndarray (n_features,) of float
weight precisions
active : np.ndarray (n_active,) of int
indices to places in the full weights vector to currently active weights
inactive : np.ndarray (n_active,) of int
indices to places in the full weights vector to currently inactive weights
n_iter : int
maximum number of iterations
tol : float
weight covergence tolerance
compute_score : boolean
stores mse_, dev_est and dev_std if true
mse_ : list of float
mean square errors = (t-y)**2/n_samples
dev_est : list of float
estimate of deviation = (t-y)/n_samples
dev_std : list of float
one standard deviation of the deviatons = np.std(t-y,ddof=1)
sigma_ : np.ndarray (n_features,n_features) of float
contains the posterior covariance matrix of p(t|Xw,beta)*p(w|alphas)
do_logbook : boolean
logbook : dict of lists
Example
-------
>>> from my_linear_model import RelevanceVectorMachine
>>> from sklearn import preprocessing
>>> import numpy as np
>>> from scipy import stats
>>> x = np.linspace(-np.pi,np.pi,100)
>>> x_pred = np.linspace(-np.pi,np.pi,200)
>>> epsilon = stats.norm(loc=0,scale=0.01)
>>> t = np.exp(-x**2) + epsilon.rvs(size=x.shape[0])
>>> k = 5
>>> trafo = preprocessing.PolynomialFeatures(k)
>>> X = trafo.fit_transform(x.reshape((-1,1)))
>>> init_beta = 1./ np.var(t) # (that's the default start)
>>> init_alphas = np.ones(X.shape[1])
>>> init_alphas[1:] = np.inf
>>> model = RelevanceVectorMachine(n_iter=50,verbose=False,compute_score=True,init_beta=init_beta,
... init_alphas=init_alphas)
>>> model.fit(X,t)
RelevanceVectorMachine(compute_score=True, copy_X=True, fit_intercept=True,
init_alphas=array([ 1., inf, inf, inf, inf, inf]),
init_beta=8.2821399938358535, n_iter=50, normalize=False,
tol=0.001, verbose=False)
>>> y, yerr = model.predict(X,return_std=True)
Notes
-----
The notation here is adopted from Tipping 2001, Faul and Tipping 2003 and Bishop's "Pattern
Recognition and Machine Learning" book. No jumping in the sewer!
References
----------
Mike Tipping's favorite implementation: http://www.miketipping.com/downloads.htm
David MacKay's 1992, Bayesian Interpolation
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf
http://statweb.stanford.edu/~tibs/sta305files/Rudyregularization.pdf -> Ridge regression and SVD
http://www.statisticshowto.com/wp-content/uploads/2017/07/lecture-notes.pdf -> Ridge regression and SVD and Woodbury
"""
def __init__(self, n_iter=300, tol=1.e-3, compute_score=False,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False,init_beta=None,init_alphas=None,do_logbook=False,
convergence_condition=None, beta_every=1, update_pct=1.):
self.n_iter = n_iter
self.tol = tol
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
self.init_beta = init_beta
self.init_alphas = init_alphas
self.beta_every = int(beta_every)
assert 0<=update_pct<=1, "'update_pct' has to be between 0 and 1."
self.update_pct = update_pct
self.mse_ = []
self.dev_est = [] # deviation estimate
self.dev_std = [] # deviation standard deviation
self.dev_mvs_95pct = [] # bayesian mean, variance and standard deviations and confidence intervals
self.do_logbook = do_logbook
self.logbook = {"L":[],"alphas":[],"beta":[],"weights":[],"weights_full":[],"mse":[],"tse":[],"min":[],"max":[],"Sigma":[],
"dev_est":[],"dev_std":[],"median_se":[],"dev_mvs_95pct":[]}
if not convergence_condition is None:
assert callable(convergence_condition), "The passed 'convergence_condition' parameter needs to be callable."
self.convergence_condition = convergence_condition
@staticmethod
def _initialize_beta(y, init_beta=None, verbose=False):
beta_ = 1. / np.var(y) # default
if not init_beta is None:
if callable(init_beta):
if verbose: print("Setting beta_ = init_beta()")
beta_ = init_beta()
assert beta_ > 0., "init_beta() produced an invalid beta_ value = {}".format(beta_)
elif isinstance(init_beta,(int,float)):
if verbose: print("Setting beta_ = init_beta")
beta_ = np.copy(init_beta)
else:
raise ValueError("Do not understand self.init_beta = {}".format(init_beta))
else:
if verbose:
print("Setting default beta_ = 1/var(t)")
return beta_
@staticmethod
def _initialize_alphas(X, init_alphas=None, verbose=False):
n_samples, n_features = X.shape
alphas_ = np.ones(n_features) # default
alphas_[1:] = np.inf # setting all but one basis function as inactive (see Faul and Tipping 2003 p.4)
if not init_alphas is None:
if callable(init_alphas):
if verbose: print("Setting alphas_ = init_alphas()")
alphas_ = init_alphas(X)
assert (alphas_ > 0.).all(), "init_alphas() produced an invalid alphas_ array = {}".format(alphas_)
elif isinstance(init_alphas,(list,tuple,np.ndarray)):
if verbose: print("Setting alphas_ = init_alphas")
alphas_ = np.copy(init_alphas)
else:
raise ValueError("Do not understand self.init_alphas = {}".format(init_alphas))
else:
if verbose:
print("Setting default alphas_ = [1,inf,inf,...]")
return alphas_
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values. Will be cast to X's dtype if necessary
Returns
-------
self : returns an instance of self.
"""
self.mse_ = []
self.dev_est = []
self.dev_std = []
X, y = utils.check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
n_samples, n_features = X.shape
verbose = self.verbose
# Initialization of the hyperparameters
beta_ = self._initialize_beta(y,init_beta=self.init_beta,verbose=self.verbose)
alphas_ = self._initialize_alphas(X,init_alphas=self.init_alphas,verbose=self.verbose)
new_alphas_ = np.copy(alphas_)
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
# Convergence loop of the RVM regression
N, M = X.shape
for iter_ in range(self.n_iter):
# (in-)active basis functions
active = np.where(np.isfinite(alphas_))[0]
n_active = active.shape[0]
inactive = np.where(np.isinf(alphas_))[0]
if verbose:
print("{}: active / inactive functions = {} / {} ".format(iter_+1, len(active),len(inactive)))
# corresponding Sigma matrix (weights hyperprior covariance matrix)
Sigma = np.diag(alphas_)
Sigma_a = np.diag(alphas_[active]) # active part of Sigma -> numpy select?
X_a = X[:,active] # active part of the design matrix
# weights posterior mean (w_new) and covariance (A_new)
A_new = np.linalg.inv(beta_ * X_a.T.dot(X_a) + Sigma_a)
w_new = beta_ * A_new.dot(X_a.T.dot(y))
# mse
dt = y - np.dot(X_a, w_new)
#mse_ = np.linalg.norm(dt)**2
mse_ = (dt**2).sum()
# Compute objective function: Gaussian for p(w|X,t,alphas,beta) \propto p(t|Xw,beta)p(w|alphas)
if self.compute_score:
log_prefactor = n_features*(beta_ - 2.*np.pi) - alphas_[active].sum() - 2.*np.pi
log_likelihood = -beta_ * mse_
log_prior = - w_new.T.dot(Sigma_a.dot(w_new))
log_posterior = .5 * (log_prefactor + log_likelihood + log_prior)
self.scores_.append(log_posterior)
self.mse_.append(float(mse_/n_samples))
self.dev_est.append(dt.mean())
self.dev_std.append(dt.std(ddof=1))
self.dev_mvs_95pct.append(stats.bayes_mvs(dt,alpha=.95))
if self.do_logbook:
logbook = {"L":[],"alphas":[],"beta":[],
"weights":[],"weights_full":[],"mse":[],"tse":[],"min":[],"max":[],"Sigma":[],
"dev_est":[],"dev_std":[],"median_se":[]}
if self.compute_score:
self.logbook["L"].append(self.scores_[-1])
else:
log_prefactor = n_features*(beta_ - 2.*np.pi) - alphas_[active].sum() - 2.*np.pi
log_likelihood = -beta_ * mse_
log_prior = - w_new.T.dot(Sigma_a.dot(w_new))
log_posterior = .5 * (log_prefactor + log_likelihood + log_prior)
self.logbook["L"].append(log_posterior)
self.logbook["alphas"].append(alphas_)
self.logbook["beta"].append(beta_)
self.logbook["weights"].append(w_new)
self.logbook["weights_full"].append(full_weight_vector(w_new,active,inactive))
self.logbook["mse"].append(mse_/n_samples)
self.logbook["tse"].append(mse_)
self.logbook["min"].append(np.amin(dt))
self.logbook["max"].append(np.amax(dt))
self.logbook["dev_est"].append(dt.mean())
self.logbook["dev_std"].append(dt.std())
self.logbook["dev_mvs_95pct"].append(stats.bayes_mvs(dt,alpha=.95))
self.logbook["median_se"].append(np.median(dt))
# Check for convergence
if iter_ != 0:
coef_new_full = full_weight_vector(np.copy(w_new),active,inactive)
if self.convergence_condition is None:
if np.sum(np.abs(coef_new_full - coef_old_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
else:
if self.convergence_condition(coef_new_full,coef_old_,self):
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
# end of the rope
if iter_ >= self.n_iter-1:
if verbose:
print("Iteration terminated after n_iter = {} step(s)".format(self.n_iter))
break
coef_old_ = full_weight_vector(np.copy(w_new),active,inactive)
# Recompute beta
beta_old_ = np.copy(beta_)
if iter_ % self.beta_every == 0:
beta_ = (n_samples - n_active + np.sum(alphas_[active]*np.diag(A_new)))
beta_ /= mse_
"""
# Compute S and Q (Faul and Tipping 2003 eqs. 24 & 25)
S0_tilde = beta_old_ * np.einsum("nm,nm->m", X, X) # in R^(n_features)
S1_tilde = - beta_old_**2 * np.einsum("mn,na->ma",X.T,np.dot(X_a,A_new)) # in R^(n_features x n_active)
S2_tilde = np.einsum("na,nm->am",X_a, X) # in R^(n_active x n_features)
S = S0_tilde + np.einsum("ma,am->m",S1_tilde,S2_tilde)
Q0_tilde = beta_old_ * np.einsum("nm,n->m", X, y) # in R^(n_features)
Q2_tilde = np.einsum("na,n->a",X_a, y) # in R^(n_active)
Q = Q0_tilde + np.einsum("ma,a->m",S1_tilde,Q2_tilde)
# Compute s and q (note the lower case)
s = np.copy(S)
q = np.copy(Q)
s[active] = alphas_[active]*S[active]/(alphas_[active]-S[active])
q[active] = alphas_[active]*Q[active]/(alphas_[active]-S[active])
# Recompute alphas using pruning
active = np.where(q**2>s)[0]
inactive = np.where(np.logical_not(q**2>s))[0]
new_alphas_[inactive] = np.inf
new_alphas_[active] = s[active]**2/(q[active]**2-s[active])
"""
# alternative version
tmp = beta_old_ * X.T - beta_old_**2 * np.dot(X.T.dot(X_a), A_new.dot(X_a.T))
Q = np.dot(tmp,y)
Q = np.reshape(Q,(-1,))
S = np.einsum("ij,ji->i",tmp,X)
q, s = np.zeros(M), np.zeros(M)
q[inactive] = Q[inactive]
q[active] = alphas_[active]*Q[active]/(alphas_[active]-S[active])
s[inactive] = S[inactive]
s[active] = alphas_[active]*S[active]/(alphas_[active]-S[active])
q2_larger_s = np.where(q**2>s)[0]
q2_smaller_s = np.where(q**2<=s)[0]
new_alphas_[q2_larger_s] = s[q2_larger_s]**2/(q[q2_larger_s]**2-s[q2_larger_s])
new_alphas_[q2_smaller_s] = np.inf
if self.update_pct < 1:
ix = np.random.choice(np.arange(M),replace=False,size=int(M*self.update_pct))
alphas_[ix] = new_alphas_[ix]
else:
alphas_ = np.copy(new_alphas_)
if self.do_logbook:
ix = np.argsort(np.array(self.logbook["mse"]))[0]
alphas_ = np.array(self.logbook["alphas"][ix])
beta_ = self.logbook["beta"][ix]
active = np.where(np.isfinite(alphas_))[0]
inactive = np.where(np.isinf(alphas_))[0]
Sigma = np.diag(alphas_)
Sigma_a = np.diag(alphas_[active]) # active part of Sigma -> numpy select?
X_a = X[:,active] # active part of the design matrix
# weights posterior mean (w_new) and covariance (A_new)
A_new = np.linalg.inv(beta_ * X_a.T.dot(X_a) + Sigma_a)
w_new = beta_ * A_new.dot(X_a.T.dot(y))
self.coef_ = w_new
self.active = active
self.inactive = inactive
self.sigma_ = A_new
self.beta_ = beta_
self._set_intercept(X_offset_[active], y_offset_, X_scale_[active])
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
X_a = X[:,self.active]
y_mean = self._decision_function(X_a)
if return_std is False:
return y_mean
else:
if self.normalize:
X_a = (X_a - self.X_offset_) / self.X_scale_
sigmas_squared_data = (X_a.dot(self.sigma_) * X_a).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.beta_))
return y_mean, y_std
def get_full_weights_vector(self):
return full_weight_vector(self.coef_,self.active,self.inactive)
def get_logbook(self):
assert self.do_logbook, "Logbook empty because do_logbook = {}.".format(self.do_logbook)
return self.logbook
def iscomplex(a, verbose=False, atol=1e-20):
tmp = np.absolute(a.imag).sum()
if verbose:
return not np.isclose(tmp, 0, atol=atol), tmp
return not np.isclose(tmp, 0, atol=atol)
class BayesianRidge(linear_model.base.LinearModel, sklearn.base.RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : float
estimated precision of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
For an example, see :ref:`examples/linear_model/plot_bayesian_ridge.py
<sphx_glr_auto_examples_linear_model_plot_bayesian_ridge.py>`.
References
----------
D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,
Vol. 4, No. 3, 1992.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our ``self.alpha_``
Their alpha is our ``self.lambda_``
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False, kind="sk"):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
# modification
implemented_kinds = ["sk","naive"]
assert kind in implemented_kinds, "Given 'kind' parameter (%s) not recognized! Implemented kinds: %s" % (kind,implemented_kinds)
self.kind = kind
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values. Will be cast to X's dtype if necessary
Returns
-------
self : returns an instance of self.
"""
if all([isinstance(X,np.ndarray), isinstance(y,np.ndarray)]):
multiple_alphas = False
elif all([isinstance(X,list), isinstance(y,list)]):
assert len(X) == len(y), "The length of X (%i) and y (%i) have to be identical!" % (len(X), len(y))
assert len(set([_x.shape[1] for _x in X]))==1, "The number of features has to be the same for all X!"
N_alphas = len(X)
if all([isinstance(_x, np.ndarray) for _x in X]) and all([isinstance(_y, np.ndarray) for _y in y]):
for i in range(N_alphas):
assert len(X[i])==len(y[i]), "The number of entries in X[%i] (%i) and y[%i] (%i) has to be equal!" % (len(X[i]), len(y[i]))
multiple_alphas = True
else:
raise ValueError("X (%s) and y (%s) both have to contain only np.ndarrays!" %([type(_x) for _x in X], [type(_y) for _y in y]))
else:
raise ValueError("X (%s) and y (%s) both need to be either numpy arrays of lists or numpy arrays!" %(type(X),type(y)))
if not multiple_alphas:
X, y = utils.check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
n_samples, n_features = X.shape
else:
X_offset_, y_offset_, X_scale_ = [None for v in range(N_alphas)],\
[None for v in range(N_alphas)],\
[None for v in range(N_alphas)]
n_samples = [None for v in range(N_alphas)]
for i in range(N_alphas):
X[i], y[i] = utils.check_X_y(X[i], y[i], dtype=np.float64, y_numeric=True)
X[i], y[i], X_offset_[i], y_offset_[i], X_scale_[i] = self._preprocess_data(
X[i], y[i], self.fit_intercept, self.normalize, self.copy_X)
n_samples[i], n_features = X[i].shape
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
# Initialization of the values of the parameters
if not multiple_alphas:
alpha_ = 1. / np.var(y) # alpha is the noise precision parameter (commonly beta)
else:
alpha_ = [1. / np.var(y[i]) for i in range(N_alphas)]
lambda_ = 1. # lambda is the weights prior precision parameter (commonly alpha)
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
if not multiple_alphas:
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
XT_X = X.T.dot(X)
else:
XT_y = [None for v in range(N_alphas)]
U = [None for v in range(N_alphas)]
S = [None for v in range(N_alphas)]
Vh = [None for v in range(N_alphas)]
eigen_vals_ = [None for v in range(N_alphas)]
XT_X = [None for v in range(N_alphas)]
for i in range(N_alphas):
XT_y[i] = np.dot(X[i].T, y[i])
U[i], S[i], Vh[i] = linalg.svd(X[i], full_matrices=False)
eigen_vals_[i] = S[i] ** 2
XT_X[i] = X[i].T.dot(X[i])
# Convergence loop of the bayesian ridge regression
N_g_M = n_samples > n_features if not multiple_alphas else all([n_samples[i]>n_features for i in range(N_alphas)])
for iter_ in range(self.n_iter):
# Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if not multiple_alphas:
if self.kind == "sk":
if N_g_M:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ +
lambda_ / alpha_)[:, np.newaxis])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
elif self.kind == "naive":
sigma_inv_ = alpha_ * XT_X + lambda_ * np.eye(n_features)
sigma_ = np.linalg.inv(sigma_inv_)
coef_ = sigma_.dot(alpha_*XT_y)
if self.compute_score:
logdet_sigma = - np.sum(np.log(sigma_))
else:
if self.kind == "sk":
if N_g_M:
#coef_ = [np.dot(Vh[i].T,
# Vh[i] / (alpha_[i]*eigen_vals_[i] +
# lambda_ )[:, np.newaxis]) for i in range(N_alphas)]
coef_ = [np.dot(Vh[i].T, (alpha_[i]*eigen_vals_[i]) * Vh[i]) \
for i in range(N_alphas)]
coef_ = np.linalg.inv(sum(coef_) + lambda_*np.eye(n_features))
XT_y_ = sum([XT_y[i]*alpha_[i] for i in range(N_alphas)])
coef_ = np.dot(coef_, XT_y_)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + sum([alpha_[i] * eigen_vals_[i] for i in range(N_alphas)])))
else:
raise NotImplementedError
coef_ = [np.dot(X[i].T, np.dot(
U[i] / (eigen_vals_[i]*alpha_[i] + lambda_ )[None, :], U[i].T))
for i in range(N_alphas)]
y_ = [y[i]*alpha_[i] for i in range(N_alphas)]
coef_ = np.dot(sum(coef_), sum(y))
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += sum(alpha_ * eigen_vals_)
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
elif self.kind == "naive":
alpha_XT_X = sum([alpha_[i] * XT_X[i] for i in range(N_alphas)])
alpha_XT_y = sum([alpha_[i] * XT_y[i] for i in range(N_alphas)])
sigma_inv_ = alpha_XT_X + lambda_ * np.eye(n_features)
sigma_ = np.linalg.inv(sigma_inv_)
coef_ = sigma_.dot(alpha_XT_y)
if self.compute_score:
logdet_sigma = - np.sum(np.log(sigma_))
if iscomplex(coef_):
raise ValueError("Found complex model parameters! coef_ =",coef_)
# Preserve the alpha and lambda values that were used to
# calculate the final coefficients
self.alpha_ = alpha_
self.lambda_ = lambda_
# Update alpha and lambda
if not multiple_alphas:
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_) /
(lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /
(rmse_ + 2 * alpha_2))
# Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse_ -
(lambda_ * np.sum(coef_ ** 2)) -
logdet_sigma_ -
n_samples * log(2 * np.pi))
self.scores_.append(s)
else:
rmse_ = [np.sum((y[i] - np.dot(X[i], coef_)) ** 2) for i in range(N_alphas)]
sum_alpha_XT_X = sum([alpha_[i]*XT_X[i] for i in range(N_alphas)])
#eig_val_sum_alpha_XT_X, eig_vec_sum_alpha_XT_X = np.linalg.eig(sum_alpha_XT_X)
_u, _s, _v = linalg.svd(sum_alpha_XT_X)
eig_val_sum_alpha_XT_X = _s
gamma_ = (np.sum((eig_val_sum_alpha_XT_X) /
(lambda_ + eig_val_sum_alpha_XT_X)))
gamma_k = [(np.sum((alpha_[i] * eigen_vals_[i]) /
(lambda_ + alpha_[i] * eigen_vals_[i]))) for i in range(N_alphas)]
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = [((n_samples[i] - gamma_k[i] + 2 * alpha_1) /
(rmse_[i] + 2 * alpha_2)) for i in range(N_alphas)]
# Compute the objective function
if self.compute_score:
if not multiple_alphas:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse_ -
(lambda_ * np.sum(coef_ ** 2)) -
logdet_sigma_ -
n_samples * log(2 * np.pi))
else:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
for i in range(N_alphas):
s += alpha_1 * log(alpha_[i]) - alpha_2 * alpha_[i]
s += 0.5 * (n_features * log(lambda_) +
sum([n_samples[i] * log(alpha_[i]) -
alpha_[i] * rmse_[i] for i in range(N_alphas)]) -
(lambda_ * np.sum(coef_ ** 2)) -
logdet_sigma_ -
n_samples * log(2 * np.pi))
self.scores_.append(s)
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
if not multiple_alphas:
if self.kind == "sk":
sigma_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis])
sigma_ /= alpha_
elif self.kind == "naive":
sigma_inv_ = alpha_ * XT_X + lambda_ * np.eye(n_features)
sigma_ = np.linalg.inv(sigma_inv_)
self.sigma_ = sigma_
else:
if self.kind == "sk":
sigma_ = sum([np.dot(Vh[i].T,
Vh[i] * (eigen_vals_[i]*alpha_[i])) for i in range(N_alphas)])
sigma_ = np.linalg.inv(sigma_ + lambda_*np.eye(n_features))
elif self.kind == "naive":
sigma_inv_ = sum([alpha_[i] * XT_X[i] for i in range(N_alphas)])
sigma_ = np.linalg.inv(sigma_inv_ + lambda_ * np.eye(n_features))
self.sigma_ = sigma_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def _set_intercept(self, X_offset, y_offset, X_scale):
"""Set the intercept_
"""
if isinstance(self.alpha_,list):
if self.fit_intercept:
self.coef_ = self.coef_ #/ X_scale
self.intercept_ = [0 for v in range(len(self.alpha_))]
else:
self.intercept_ = 0.
else:
if self.fit_intercept:
self.coef_ = self.coef_ / X_scale
self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T)
else:
self.intercept_ = 0.
def _decision_function(self, X):
utils.validation.check_is_fitted(self, "coef_")
if isinstance(self.alpha_,list):
N = len(self.alpha_)
X = [utils.check_array(X[i], accept_sparse=['csr', 'csc', 'coo']) for i in range(N)]
return [utils.extmath.safe_sparse_dot(X[i], self.coef_.T,
dense_output=True) for i in range(N)]
else:
X = utils.check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return utils.extmath.safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
multiple_alphas = isinstance(self.alpha_,list)
if multiple_alphas:
N = len(self.alpha_)
assert (isinstance(X,list) and len(X)==len(self.alpha_)) and all([isinstance(_x,np.ndarray) for _x in X]),\
"'alpha_' is a list, hence X needs to be a list of the same length containing numpy arrays."
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
if not multiple_alphas:
X = (X - self.X_offset_) / self.X_scale_
if not multiple_alphas:
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
else:
sigmas_squared_data = [(np.dot(X[i], self.sigma_) * X[i]).sum(axis=1) \
for i in range(N)]
y_std = [np.sqrt(sigmas_squared_data[i] + (1. / self.alpha_[i])) \
for i in range(N)]
return y_mean, y_std
class LinearRegression(linear_model.base.LinearModel, sklearn.base.RegressorMixin):
"""
Ordinary least squares Linear Regression.
Parameters
----------
fit_intercept : boolean, optional, default True
whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit`` on
an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
n_jobs : int, optional, default 1
The number of jobs to use for the computation.
If -1 all CPUs are used. This will only provide speedup for
n_targets > 1 and sufficient large problems.
Attributes
----------
coef_ : array, shape (n_features, ) or (n_targets, n_features)
Estimated coefficients for the linear regression problem.
If multiple targets are passed during the fit (y 2D), this
is a 2D array of shape (n_targets, n_features), while if only
one target is passed, this is a 1D array of length n_features.
intercept_ : array
Independent term in the linear model.
Notes
-----
From the implementation point of view, this is just plain Ordinary
Least Squares (scipy.linalg.lstsq) wrapped as a predictor object.
"""
def __init__(self, fit_intercept=True, normalize=False, copy_X=True,
n_jobs=1, kind="svd"):
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.n_jobs = n_jobs
implemented_kinds = ["svd","naive"]
assert kind in implemented_kinds, "Given 'kind' parameter (%s) not recognized! Implemented kinds: %s" % (kind,implemented_kinds)
self.kind = kind
def fit(self, X, y, sample_weight=None):
"""
Fit linear model.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, n_targets]
Target values. Will be cast to X's dtype if necessary
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample
.. versionadded:: 0.17
parameter *sample_weight* support to LinearRegression.
Returns
-------
self : returns an instance of self.
"""
if all([isinstance(X,np.ndarray), isinstance(y,np.ndarray)]):
multiple_alphas = False
elif all([isinstance(X,list), isinstance(y,list)]):
assert len(X) == len(y), "The length of X (%i) and y (%i) have to be identical!" % (len(X), len(y))
assert len(set([_x.shape[1] for _x in X]))==1, "The number of features has to be the same for all X!"
N_alphas = len(X)
self.N_alphas = N_alphas
if all([isinstance(_x, np.ndarray) for _x in X]) and all([isinstance(_y, np.ndarray) for _y in y]):
for i in range(N_alphas):
assert len(X[i])==len(y[i]), "The number of entries in X[%i] (%i) and y[%i] (%i) has to be equal!" % (len(X[i]), len(y[i]))
multiple_alphas = True
else:
raise ValueError("X (%s) and y (%s) both have to contain only np.ndarrays!" %([type(_x) for _x in X], [type(_y) for _y in y]))
else:
raise ValueError("X (%s) and y (%s) both need to be either numpy arrays of lists or numpy arrays!" %(type(X),type(y)))
self.multiple_alphas = multiple_alphas
n_jobs_ = self.n_jobs
if not multiple_alphas:
X, y = utils.check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
n_samples, n_features = X.shape
else:
X_offset_, y_offset_, X_scale_ = [None for v in range(N_alphas)],\
[None for v in range(N_alphas)],\
[None for v in range(N_alphas)]
n_samples = [None for v in range(N_alphas)]
for i in range(N_alphas):
X[i], y[i] = utils.check_X_y(X[i], y[i], dtype=np.float64, y_numeric=True)
X[i], y[i], X_offset_[i], y_offset_[i], X_scale_[i] = self._preprocess_data(
X[i], y[i], self.fit_intercept, self.normalize, self.copy_X)
n_samples[i], n_features = X[i].shape
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
# X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
# y_numeric=True, multi_output=True)
if sample_weight is not None and np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
# X, y, X_offset, y_offset, X_scale = self._preprocess_data(
# X, y, fit_intercept=self.fit_intercept, normalize=self.normalize,
# copy=self.copy_X, sample_weight=sample_weight)
# if sample_weight is not None:
# # Sample weight can be implemented via a simple rescaling.
# X, y = _rescale_data(X, y, sample_weight)
import scipy.sparse as sp
if not isinstance(X,list) and sp.issparse(X):
if y.ndim < 2:
out = sparse_lsqr(X, y)
self.coef_ = out[0]
self._residues = out[3]
else:
# sparse_lstsq cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(sparse_lsqr)(X, y[:, j].ravel())
for j in range(y.shape[1]))
self.coef_ = np.vstack(out[0] for out in outs)
self._residues = np.vstack(out[3] for out in outs)
else:
if multiple_alphas:
XT_y = [None for v in range(N_alphas)]
U = [None for v in range(N_alphas)]
S = [None for v in range(N_alphas)]
Vh = [None for v in range(N_alphas)]
eigen_vals_ = [None for v in range(N_alphas)]
XT_X = [None for v in range(N_alphas)]
for i in range(N_alphas):
XT_y[i] = np.dot(X[i].T, y[i])
U[i], S[i], Vh[i] = linalg.svd(X[i], full_matrices=False)
eigen_vals_[i] = S[i] ** 2
XT_X[i] = X[i].T.dot(X[i])
# more samples than features?
N_g_M = n_samples > n_features if not multiple_alphas else all([n_samples[i]>n_features for i in range(N_alphas)])
if self.kind == "svd":
if N_g_M:
#coef_ = [np.dot(Vh[i].T,
# Vh[i] / (alpha_[i]*eigen_vals_[i] +
# lambda_ )[:, np.newaxis]) for i in range(N_alphas)]
coef_ = [np.dot(Vh[i].T, eigen_vals_[i] * Vh[i]) \
for i in range(N_alphas)]
coef_ = np.linalg.inv(sum(coef_))
XT_y_ = sum(XT_y)
coef_ = np.dot(coef_, XT_y_)
self._residues = [y[i]-X[i].dot(coef_) for i in range(N_alphas)]
self.rank_ = [np.linalg.matrix_rank(X[i]) for i in range(N_alphas)]
self.singular = S
else:
raise NotImplementedError
elif self.kind == "naive":
coef_ = [np.dot(X[i].T, X[i]) \
for i in range(N_alphas)]
coef_ = np.linalg.inv(sum(coef_))
XT_y_ = sum(XT_y)
coef_ = np.dot(coef_, XT_y_)
self._residues = [y[i]-X[i].dot(coef_) for i in range(N_alphas)]
self.rank_ = [np.linalg.matrix_rank(X[i]) for i in range(N_alphas)]
self.singular = S
else:
coef_, self._residues, self.rank_, self.singular_ = \
linalg.lstsq(X, y)
coef_ = coef_.T
self.coef_ = coef_
if not isinstance(X,list) and y.ndim == 1:
self.coef_ = np.ravel(self.coef_)
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def _set_intercept(self, X_offset, y_offset, X_scale):
"""Set the intercept_
"""
if self.multiple_alphas:
if self.fit_intercept:
self.coef_ = self.coef_ #/ X_scale
self.intercept_ = [0 for v in range(self.N_alphas)]
else:
self.intercept_ = 0.
else:
if self.fit_intercept:
self.coef_ = self.coef_ / X_scale
self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T)
else:
self.intercept_ = 0.
def _decision_function(self, X):
utils.validation.check_is_fitted(self, "coef_")
if self.multiple_alphas:
N = self.N_alphas
X = [utils.check_array(X[i], accept_sparse=['csr', 'csc', 'coo']) for i in range(N)]
return [utils.extmath.safe_sparse_dot(X[i], self.coef_.T,
dense_output=True) for i in range(N)]
else:
X = utils.check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return utils.extmath.safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
"""
if self.multiple_alphas:
N = self.N_alphas
assert (isinstance(X,list) and len(X)==N) and all([isinstance(_x,np.ndarray) for _x in X]),\
"'alpha_' is a list, hence X needs to be a list of the same length containing numpy arrays."
return self._decision_function(X)
def distribution_wrapper(dis,size=None,single=True):
"""Wraps scipy.stats distributions for RVM initialization.
Parameters
----------
size : int
How many samples to draw (if given, see 'single').
single : boolean
Whether or not a single float value is to be returned or an array of values.
If single == False then either 'size' samples are drawn or otherwise if the
design matrix is provided as an argument of the wrapped function 'samples'
then as M samples are drawn (N, M = X.shape).
"""
def samples(X=None):
if single:
return dis.rvs(size=1)[0]
else:
if isinstance(size,int):
return dis.rvs(size=size)
elif isinstance(X,np.ndarray):
return dis.rvs(size=X.shape[1])
else:
raise ValueError("size is not properly specified")
return samples
def repeated_regression(x,base_trafo,model_type,model_kwargs,t=None,tfun=None,
epsilon=None,Nruns=100,return_coefs=False,return_models=False,base_trafo_1=None):
"""Repeats regressions.
This can be used to do multiple regressions on freshly regenerated
data (requires passing of a scipy.stats.rv_continuous object as epsilon,
and a callable tfun) or simply on the same data over an over.
Parameters
----------
x : np.ndarray
input / estimators
tfun : callable
t = tfun(x)
epsilon : scipy.stats distribution object
noise random variable
base_trafo : callable
for example the sklearn.preprocessing function such as PolynomialFeatures
to transform x into X
model : instance of regression class like RelevanceVectorMachine
Example
-------
>>> model_type = linear_model.RelevanceVectorMachine
>>> model_kwargs = dict(n_iter=250,verbose=False,compute_score=True,init_beta=init_beta,
init_alphas=init_alphas,fit_intercept=False)
>>> runtimes, coefs, models = repeated_regression(x,base_trafo,model_type,t=t,tfun=None,epsilon=None,
model_kwargs=model_kwargs,Nruns=Nruns,return_coefs=True,return_models=True)
"""
X = base_trafo(x.reshape((-1,1)))
if callable(base_trafo_1):
_X = base_trafo_1(x.reshape((-1,1)))
X = np.vstack((X,_X))
assert not t is None or not (tfun is None and epsilon is None), "Either 't' has to be given or 'tfun' and 'epsilon'!"
if t is None:
t = tfun(x) + epsilon.rvs(size=x.shape[0])
runtimes = np.zeros(Nruns)
coefs, models = [], []
for i in range(Nruns):
t0 = time.time()
model = model_type(**model_kwargs)
model.fit(X,t)
runtimes[i] = time.time() - t0
if return_coefs:
coefs.append(model.get_full_weights_vector())
if return_models:
models.append(model)
if return_coefs and not return_models:
return runtimes, np.array(coefs)
elif return_coefs and return_models:
return runtimes, np.array(coefs), models
elif not return_coefs and return_models:
return runtimes, models
return runtimes
def print_run_stats(base_trafo,x,runtimes,coefs,Nruns,show_coefs=True):
print("\n================================================")
s = "X = {} & Nruns = {}:".format(base_trafo(x.reshape((-1,1))).shape,Nruns)
print(s)
print("-"*len(s))
print("\ntime: estimate = {:.4f}s, 2*std = {:.4f}s".format(runtimes.mean(),2*np.std(runtimes,ddof=1)))
if show_coefs:
print("\ncoefs (estimate +- 2*std):")
for i in range(coefs.shape[1]):
print(" {}: {:.4f} +- {:.4f}".format(i,coefs[:,i].mean(axis=0),
2*np.std(coefs[:,i],axis=0,ddof=1)))
def plot_summary(models,noise,x,t,X,coefs,base_trafo,X_1=None):
N = X.shape[0]
xlim = (x.min(),x.max())
ys = np.array([m.predict(X) for m in models])
y = ys.mean(axis=0)
yerr = 2*ys.std(axis=0,ddof=1)
if not X_1 is None:
ys_1 = np.array([m.predict(X_1) for m in models])
y_1 = ys_1.mean(axis=0)
yerr_1 = 2*ys_1.std(axis=0,ddof=1)
fig = plt.figure(figsize=(5,7))
# summarizing all predictions
ax = fig.add_subplot(221)
ax.fill_between(x,y-yerr,y+yerr,label="95\%",alpha=0.1,color="red")
ax.plot(x,y,'-',label="estimate")
if not X_1 is None:
ax.fill_between(x,y_1-yerr_1,y_1+yerr_1,label="95\% X\_1",alpha=0.1,color="orange")
ax.plot(x,y_1,'-',label="estimate X\_1")
ax.plot(x,t[:N],'o',label="true",markerfacecolor="None",ms=2.,alpha=.75)
if not X_1 is None:
ax.plot(x,t[N:],'o',label="true X\_1",markerfacecolor="None",ms=2.,alpha=.75)
ax.set_xlabel("input")
ax.set_ylabel("output")
ax.set_title("y vs t")
plt.legend(loc=0)
coef_est = coefs.mean(axis=0)
coef_err = 2*coefs.std(ddof=1,axis=0)
# summarizing variation of weights
ax2 = fig.add_subplot(222)
ax2.errorbar(np.arange(coef_est.shape[0]),y=coef_est,yerr=coef_err,fmt="o",
markerfacecolor="None",label="RVM w",capsize=3.)
ax2.set_xlabel("weight index")
ax2.set_ylabel("weights")
ax2.set_title("Variation of weights")
plt.legend(loc=0)
# noise precision: model vs true noise
beta2scale = lambda beta: np.sqrt(2./beta)
noise2scale = lambda noise,axis: np.sqrt(2.)*np.std(noise,axis=axis,ddof=1)
betas = np.array([m.beta_ for m in models])
ax3 = fig.add_subplot(223)
ax3.hist(noise,label="true noise",normed=True,bins=100,range=xlim)
xlim = ax3.get_xlim()
_xp = np.linspace(xlim[0],xlim[1],N)
for model in models:
norm_rvm = stats.norm(loc=0,scale=beta2scale(model.beta_))
ax3.plot(_xp,norm_rvm.pdf(_xp),'-k',linewidth=.1)
ax3.set_xlabel("noise")
ax3.set_ylabel("frequency")
ax3.set_title("Noise precision:\nmodel vs true noise")
ax3.text(-5,.8,"true scale = {:.3f}".format(noise2scale(noise,0)))
ax3.text(-5,.3,"est. scale = {:.3f}+-{:.3f}".format(beta2scale(betas).mean(),2.*beta2scale(betas).std(ddof=1)))
# noise precision: error distribution vs true noise
ax4 = fig.add_subplot(224)
bins = 100
ax4.hist(noise,label="true noise",normed=True,bins=bins,range=xlim)
_xp = np.linspace(xlim[0],xlim[1],N)
_X = base_trafo(_xp.reshape((-1,1)))
pred_noise = []
for model in models:
n = model.predict(_X)-t[:N]
pred_noise.append(n)
ax4.hist(n,bins=bins,histtype="step",linewidth=.1,normed=True,range=xlim,color="k")
pred_noise = np.array(pred_noise)
ax4.set_xlabel("noise")
ax4.set_ylabel("frequency")
ax4.set_title("Noise precision:\nerr. dis. vs true noise")
ax4.text(-5,1.,"true scale = {:.3f}".format(noise2scale(noise,0)))
ax4.text(-5,.3,"pred scale = {:.3f}+-{:.3f}".format(noise2scale(pred_noise,1).mean(),noise2scale(pred_noise,1).std(ddof=1)*2))
plt.tight_layout()
plt.show()
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111)
for m in models:
ax.plot(m.mse_,'k-',alpha=.5,lw=.1)
ax.set_xlabel("iteration")
ax.set_ylabel("MSE")
ax.set_yscale("log")
ax.set_title("MSE curves of all regressions")
plt.tight_layout()
plt.show()
if __name__ == "__main__":
epsilon = stats.norm(loc=0,scale=0.01)
tfun = lambda x: np.sin(x) + np.cos(2.*x)
init_beta = distribution_wrapper(stats.halfnorm(scale=1),size=1,single=True)
init_alphas = distribution_wrapper(stats.halfnorm(scale=1),single=False)
Nruns = 100
N = 100
Ms = [3,5,10,20,50]
t_est, t_err = [], []
for M in Ms:
x = np.linspace(0,1,N)
k = M
trafo = FourierFeatures(k=k)
base_trafo = trafo.fit_transform
model_type = RelevanceVectorMachine
model_kwargs = dict(n_iter=250,verbose=False,compute_score=True,init_beta=init_beta,
init_alphas=init_alphas)
runtimes, coefs = repeated_regression(x,base_trafo,model_type,t=None,tfun=tfun,epsilon=epsilon,
model_kwargs=model_kwargs,Nruns=Nruns,return_coefs=True)
print_run_stats(base_trafo,x,runtimes,coefs,Nruns)
|
Hamstard/RVMs
|
linear_model.py
|
Python
|
mit
| 72,466
|
[
"Gaussian"
] |
7401469f91978f58482add6be0e5a71dbfaa9cc89166380a65485baeed16c156
|
import fileinput
import random
def weighted_choice(items): # [(key, count)]
total = sum(x[1] for x in items)
index = random.randint(0, total - 1)
offset = 0
for key, count in items:
offset += count
if index < offset:
return key
class Node(object):
def __init__(self):
self.children = {}
self.white = 0
self.black = 0
self.draw = 0
self.total = 0
def add_result(self, result):
self.total += 1
if result == '1-0':
self.white += 1
elif result == '0-1':
self.black += 1
elif result == '1/2-1/2':
self.draw += 1
else:
raise ValueError(result)
def do_move(self, move):
if move not in self.children:
self.children[move] = Node()
return self.children[move]
def output(self, depth=0):
padding = ' ' * depth
items = self.children.items()
items.sort(key=lambda x: x[1].total, reverse=True)
total = sum(x[1].total for x in items)
for key, value in items:
if value.total < 2:
continue
pct = 100.0 * value.total / total
white = 100.0 * value.white / value.total
draw = 100.0 * value.draw / value.total
black = 100.0 * value.black / value.total
print '%s%s W=%.1f%% D=%.1f%% B=%.1f%% [%.1f%% %d]' % (
padding, key, white, draw, black, pct, value.total)
value.output(depth + 1)
def visit(self, wtm):
items = self.children.items()
items.sort(key=lambda x: x[1].total, reverse=True)
total = sum(x[1].total for x in items)
for key, value in items:
pct = 100.0 * value.total / total
white = 100.0 * value.white / value.total
draw = 100.0 * value.draw / value.total
black = 100.0 * value.black / value.total
print '%8s [%3d%% %3d%% %3d%%] %3d%% %d' % (
key, white, draw, black, pct, value.total)
print
if wtm:
move = raw_input('WHITE > ')
else:
move = raw_input('BLACK > ')
if move in self.children:
self.children[move].visit(not wtm)
def random(self):
items = self.children.items()
total = sum(x[1].total for x in items)
if total < 10000:
return
pcts = []
for key, value in items:
pct = int(round(100.0 * value.total / total))
if pct >= 20:
pcts.append((key, pct))
if not pcts:
return
move = weighted_choice(pcts)
print move,
if move in self.children:
self.children[move].random()
def main():
results = {
'1-0': 'W',
'0-1': 'B',
'1/2-1/2': 'D'
}
root = Node()
for line in fileinput.input():
line = line.strip()
if not line.startswith('1.'):
continue
result = line[line.index('}')+1:].strip()
moves = line[:line.index('{')].strip().split()
del moves[::3]
print results[result], ' '.join(moves)
continue
node = root
node.add_result(result)
for move in moves[:10]:
node = node.do_move(move)
node.add_result(result)
print root.total, root.white, root.draw, root.black
# while True:
# root.visit(True)
# root.random()
# print
if __name__ == '__main__':
main()
|
UIKit0/MisterQueen
|
scripts/parser.py
|
Python
|
mit
| 3,540
|
[
"VisIt"
] |
596b2c5cd923ec3cb697ac25551bf1c88b72ae06b509b1f436f7ec546e6daf56
|
import numpy as np
def makeGaussian(size, fwhm = 3, center=None):
""" Make a square gaussian kernel.
size is the length of a side of the square
fwhm is full-width-half-maximum, which
can be thought of as an effective radius.
"""
x = np.arange(0, size, 1, float)
y = x[:,np.newaxis]
if center is None:
x0 = y0 = size // 2
else:
x0 = center[0]
y0 = center[1]
return np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)
def moveGaussian(size,fwhm,center,timestep):
z=np.zeros([timestep,size,size])
for tt in range(0,timestep):
z[tt,:,:]=makeGaussian(size, fwhm, (center[tt,0],center[tt,1]))
return z
|
Josue-Martinez-Moreno/trackeddy
|
trackeddy/utils/gaussian_field_functions.py
|
Python
|
mit
| 691
|
[
"Gaussian"
] |
97960824dc7841060e6bac84d49980d9945c615486ee7b6d0befb4d0aa765143
|
import datetime
import json
import logging
import time
from typing import Tuple, Any, Set
import random
import requests
import yaml
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
from src.session import Session
URL = {
'root': 'https://www.instagram.com/',
'login': 'https://www.instagram.com/accounts/login/ajax/',
'logout': 'https://www.instagram.com/accounts/logout/',
'tag': 'https://www.instagram.com/explore/tags/',
'photo': 'https://www.instagram.com/p/',
'like': 'https://www.instagram.com/web/likes/%s/like/',
'unlike': 'https://www.instagram.com/web/likes/%s/unlike/',
'comment': 'https://www.instagram.com/web/comments/%s/add/',
'follow': 'https://www.instagram.com/web/friendships/%s/follow/',
'unfollow': 'https://www.instagram.com/web/friendships/%s/unfollow/',
}
requests.packages.urllib3.disable_warnings()
WAIT_SERVER_RESPONSE_TIME = 10 # how long to wait for a server response // 10s a 30s
class InstaBot:
"""Main class of Instabot"""
def __init__(self, config_path: str) -> None:
start_time = datetime.datetime.now()
username, password, tags, total_likes, likes_per_user = self.get_credentials(config_path)
logging.info('InstaBot v0.1 started at %s:' % (start_time.strftime("%d.%m.%Y %H:%M")))
# gaussian distribution: mu = 0, sig = 100, round to nearest int
self.total_likes = total_likes + self.iround(min(total_likes / 2, max(0, random.gauss(0, 100))))
logging.info('InstaBot v0.1 will like %s photos in total' % self.total_likes)
self.likes_per_user = likes_per_user # type: int
self.liked_photos: Set = set()
self.session = Session(username, password, logging)
self.run(tags)
self.session.logout()
end_time = datetime.datetime.now()
logging.info('InstaBot v0.1 stopped at %s:' % (end_time.strftime("%d.%m.%Y %H:%M")))
logging.info('InstaBot v0.1 took ' + str(end_time - start_time) + ' in total')
@staticmethod
def get_credentials(config: str) -> Tuple:
config_data = yaml.safe_load(open(config, "r"))
return (
config_data['CREDENTIALS']['USERNAME'],
config_data['CREDENTIALS']['PASSWORD'],
config_data['TAGS'],
config_data['TOTAL_LIKES'],
config_data['LIKES_PER_USER']
)
@staticmethod
def get_html(url: str) -> Any:
time_between_requests = random.randint(2, 5)
logging.info('Fetching ' + url)
response = requests.get(url, verify=False, timeout=WAIT_SERVER_RESPONSE_TIME)
html = response.text
time.sleep(time_between_requests)
return html
@staticmethod
def get_data_from_html(html) -> Any:
try:
finder_start = '<script type="text/javascript">window._sharedData = '
finder_end = ';</script>'
data_start = html.find(finder_start)
data_end = html.find(finder_end, data_start + 1)
json_str = html[data_start + len(finder_start):data_end]
data = json.loads(json_str)
return data
except Exception as e:
logging.info('Error parsing json string: ' + str(e))
def get_recent_tag_photos(self, tag: str) -> Any:
url = URL['tag'] + tag
photos = list()
min_likes = 5
max_likes = 500
min_comments = 1
max_comments = 50
try:
html = self.get_html(url)
data = self.get_data_from_html(html)
# get data from recent posts only
photos_json = list(data['entry_data']['TagPage'][0]['tag']['media']['nodes'])
for photo_json in photos_json:
photo_id = photo_json['code']
likes = photo_json['likes']['count']
comments = photo_json['comments']['count']
likes_in_range = (min_likes <= likes <= max_likes)
comments_in_range = (min_comments <= comments <= max_comments)
if photo_id not in self.liked_photos and likes_in_range and comments_in_range:
photos.append(photo_id)
if len(photos) == 10:
break
# fill up rest of photos list with top posts, until list has 10 potential people to be liked
if len(photos) < 10:
photos_json = list(data['entry_data']['TagPage'][0]['tag']['top_posts']['nodes'])
for photo_json in photos_json:
photo_id = photo_json['code']
likes = photo_json['likes']['count']
comments = photo_json['comments']['count']
likes_in_range = (min_likes <= likes <= max_likes)
comments_in_range = (min_comments <= comments <= max_comments)
if photo_id not in self.liked_photos and likes_in_range and comments_in_range:
photos.append(photo_id)
if len(photos) == 10:
break
except (KeyError, IndexError, TypeError) as e:
logging.info('Error parsing url: ' + url + ' - ' + str(e))
time.sleep(10)
return photos
def get_photo_owner(self, photo_id: str) -> Any:
try:
photo_url = URL['photo'] + photo_id
html = self.get_html(photo_url)
data = self.get_data_from_html(html)
owner_name = data['entry_data']['PostPage'][0]['media']['owner']['username']
return owner_name
except (KeyError, IndexError, TypeError) as e:
logging.info('Error parsing url:' + str(e))
time.sleep(10)
return None
def get_recent_tag_owners(self, tag):
photos_ids = self.get_recent_tag_photos(tag)
owners_names = list()
for photo_id in photos_ids:
owner_name = self.get_photo_owner(photo_id)
owners_names.append(owner_name)
return owners_names
# get owner recent photos only if he/she meets requirements
def get_owner_recent_photos(self, owner_name: str):
photos = list()
min_followed_by = 300
max_followed_by = 50000
min_follows = 50
max_follows = 7500 # instagram limit
min_follow_ratio = 0.01
max_follow_ratio = 8
owner_url = URL['root'] + owner_name
html = self.get_html(owner_url)
try:
data = self.get_data_from_html(html)
follows = data['entry_data']['ProfilePage'][0]['user']['follows']['count']
followed_by = data['entry_data']['ProfilePage'][0]['user']['followed_by']['count']
if follows == 0:
follows = 1
follow_ratio = followed_by / follows
follows_in_range = (min_follows <= follows <= max_follows)
if (follows_in_range and
followed_by >= min_followed_by and followed_by <= max_followed_by and
follow_ratio >= min_follow_ratio and follow_ratio <= max_follow_ratio):
logging.info('Fetching user [' + owner_name + '] photo urls. (Follows: ' + str(
follows) + ', Followed By: ' + str(followed_by) + ', Ratio: ' + str(follow_ratio) + ')')
photos_json = data['entry_data']['ProfilePage'][0]['user']['media']['nodes']
log_str = 'Photo codes: '
for i, photo_json in enumerate(photos_json):
if i == self.likes_per_user:
break
photo_id = photo_json['id']
photo_code = photo_json['code']
if photo_id not in self.liked_photos:
photos.append(photo_id)
log_str += photo_code + ' '
logging.info(log_str)
logging.info('Photo IDs: ' + str(photos))
else:
logging.info('User [' + owner_name + '] doesn\'t meet requirements. (Follows: ' + str(
follows) + ', Followed By: ' + str(followed_by) + ')')
except (KeyError, IndexError, TypeError) as e:
logging.info('Error parsing url: ' + owner_url + ' - ' + str(e))
time.sleep(10)
return photos
def get_photos_to_like_from_tag(self, tag):
photos_to_like = list()
recent_photos = self.get_recent_tag_photos(tag)
for recent_photo in recent_photos:
owner_name = self.get_photo_owner(recent_photo)
if owner_name is not None:
photos_to_like += self.get_owner_recent_photos(owner_name)
return photos_to_like
def get_photos_to_like(self, tags):
photos_to_like = list()
for tag in tags:
logging.info('Finding photos with tag: #' + tag)
photos_to_like += self.get_photos_to_like_from_tag(tag)
logging.info('There are ' + str(len(photos_to_like)) + ' photos in the like queue')
return photos_to_like
def like(self, photo_id):
if self.session.login_status:
url = (URL['like'] % photo_id)
try:
status = self.session.post(url)
except Exception as e:
status = 0
logging.info("Like failed: " + e + url)
return status
@staticmethod
def iround(x):
return int(round(x) - .5) + (x > 0) # round a number to the nearest integer
def run(self, tags):
likes = 0
error_400 = 0
error_400_to_ban = 3
ban_sleep_time = 2 * 60 * 60
while True:
like_queue = self.get_photos_to_like(tags)
if not self.session.login_status:
self.session.login()
while len(like_queue) > 0:
logging.info('There are ' + str(len(like_queue)) + ' photos in the like queue')
likes_per_cycle = self.iround(min(20, max(0, random.gauss(10,
2)))) # gaussian distribution: mu = 10, sig = 2, round to nearest int
like_next, like_queue = like_queue[:likes_per_cycle], like_queue[likes_per_cycle:]
for photo_id in like_next:
status = self.like(photo_id)
if status == 200:
self.liked_photos.add(photo_id)
likes += 1
if likes > self.total_likes:
logging.info('Success! Reached total number of likes. InstaBot is shutting down...')
return
error_400 = 0
logging.info('Total likes: ' + str(likes))
elif status == 400:
if error_400 < error_400_to_ban:
error_400 += 1
logging.info('Error 400 - # ' + str(error_400))
else:
logging.info('Error 400 - # ' + str(
error_400) + '- You might have been banned. InstaBot will sleep for 2 hours...')
time.sleep(ban_sleep_time)
# sleep after one like (from 2s to 5s)
wait = random.randint(1, 3)
time.sleep(wait)
# sleep after liking cycle (from 25s to 50s)
wait = random.randint(10, 20)
logging.info('Finished liking cycle. Sleeping for ' + str(wait) + ' seconds...')
time.sleep(wait)
# sleep after liking all tags (from 5min to 15min)
wait = random.randint(1, 5) * 60
logging.info('Finished liking tags. Sleeping for ' + str(int(wait / 60)) + ' minutes...')
time.sleep(wait)
|
vinitkumar/instabote
|
src/instabot.py
|
Python
|
mit
| 11,863
|
[
"Gaussian"
] |
2c94396da0aec1aaef25d00fc4c821b58fc86c3c9d7c0a8b05d101ff6c68d001
|
# Nemubot is a smart and modulable IM bot.
# Copyright (C) 2012-2015 Mercier Pierre-Olivier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class AbstractVisitor:
def visit(self, obj):
"""Visit a node"""
method_name = "visit_%s" % obj.__class__.__name__
method = getattr(self, method_name)
return method(obj)
|
nbr23/nemubot
|
nemubot/message/visitor.py
|
Python
|
agpl-3.0
| 958
|
[
"VisIt"
] |
1a5170dbd417c31137f51f85334e9466092113be426b4d96d7e4edbcd0910930
|
# Copyright (C) 2004-2008 Paul Cochrane
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
Class and functions associated with a pyvisi OffsetPlot objects
"""
# generic imports
from pyvisi.renderers.vtk.common import debugMsg
import copy
# module specific imports
from pyvisi.renderers.vtk.plot import Plot
__revision__ = '$Revision$'
class OffsetPlot(Plot):
"""
Offset plot
"""
def __init__(self, scene):
"""
Initialisation of the OffsetPlot class
@param scene: The Scene to render the plot in
@type scene: Scene object
"""
debugMsg("Called OffsetPlot.__init__()")
Plot.__init__(self, scene)
self.renderer = scene.renderer
self.renderer.addToInitStack("# OffsetPlot.__init__()")
self.renderer.addToInitStack("_plot = vtk.vtkXYPlotActor()")
self.title = None
self.xlabel = None
self.ylabel = None
# the extra separation between curves (user set)
self.sep = None
# the default values for shared info
self.fname = None
self.format = None
self.scalars = None
# add the plot to the scene
scene.add(self)
def setData(self, *dataList, **options):
"""
Set data to the plot
@param dataList: List of data to set to the plot
@type dataList: tuple
@param options: Dictionary of extra options
@type options: dict
@keyword fname: Filename of the input vtk file
@type fname: string
@keyword format: Format of the input vtk file ('vtk' or 'vtk-xml')
@type format: string
@keyword scalars: the name of the scalar data in the vtk file to use
@type scalars: string
"""
debugMsg("Called setData() in OffsetPlot()")
self.renderer.runString("# OffsetPlot.setData()")
# process the options, if any
## fname
if options.has_key('fname'):
fname = options['fname']
else:
fname = None
## format
if options.has_key('format'):
format = options['format']
else:
format = None
## scalars
if options.has_key('scalars'):
scalars = options['scalars']
else:
scalars = None
# we want to pass this info around
self.fname = fname
self.format = format
self.scalars = scalars
# do some sanity checking on the inputs
if len(dataList) == 0 and fname is None:
raise ValueError, \
"You must specify a data list or an input filename"
if len(dataList) != 0 and fname is not None:
raise ValueError, \
"You cannot specify a data list as well as an input file"
if fname is not None and scalars is None:
debugMsg("No scalars specified; using default in vtk")
if fname is not None and format is None:
raise ValueError, "You must specify an input file format"
if fname is None and format is not None:
raise ValueError, "Format specified, but no input filename"
# do some sanity checking on the data
if len(dataList) > 3 or len(dataList) < 1:
raise ValueError, \
"Must have either one, two or three input arrays"
# the data is y values located at different x positions, changing
# over time, so the normal x-direction is t, the normal y direction
# is both x and y; y basically being offset by the x values
# therefore will refer to tData, xData and yData
# compare the shapes of the input vectors.
# assume that the first one is the t data, and that the first
# dimension of the second one is the same length as the t data
# length
if len(dataList) == 1:
yData = dataList[0]
elif len(dataList) == 2:
tData = dataList[0]
yData = dataList[1]
if tData.shape[0] != yData.shape[0]:
raise ValueError, \
"Input arrays don't have the correct shape"
elif len(dataList) == 3:
tData = dataList[0]
xData = dataList[1]
yData = dataList[2]
if tData.shape[0] != yData.shape[0]:
raise ValueError, \
"First dim of third arg doesn't agree with first arg"
if len(yData.shape) == 1:
if xData.shape[0] != 1:
raise ValueError, \
"Second arg must be scalar when third arg is vector"
elif len(yData.shape) == 2:
if xData.shape[0] != yData.shape[1]:
raise ValueError, \
"Second dim of 3rd arg doesn't agree with 2nd arg"
# if only have one array input, then autogenerate tData
if len(dataList) == 1:
tData = range(1, len(dataList[0])+1)
if len(tData) != len(dataList[0]):
errorString = "Autogenerated xData array length not "
errorString += "equal to input array length"
raise ValueError, errorString
## pass around the t data
self.renderer.renderDict['_t'] = copy.deepcopy(tData)
# if have two arrays to plot, the first one is the t data
elif len(dataList) == 2:
tData = dataList[0]
## pass around the t data
self.renderer.renderDict['_t'] = copy.deepcopy(tData)
# don't need the first element of the dataList, so get rid of it
dataList = dataList[1:]
elif len(dataList) == 3:
## pass around the t data
self.renderer.renderDict['_t'] = copy.deepcopy(tData)
## pass around the x data
self.renderer.renderDict['_x'] = copy.deepcopy(xData)
else:
# shouldn't get to here, but raise an error anyway
raise ValueError, "Incorrect number of arguments"
# set up the vtkDataArray object for the t data
self.renderer.runString(
"_tData = vtk.vtkDataArray.CreateDataArray(vtk.VTK_FLOAT)")
self.renderer.runString(
"_tData.SetNumberOfTuples(len(_t))")
## now to handle the y data
if len(yData.shape) == 1:
dataLen = 1
elif len(yData.shape) == 2:
dataLen = yData.shape[1]
else:
raise ValueError, \
"The last setData argument has the incorrect shape"
# share around the y data
for i in range(dataLen):
yDataVar = "_y%d" % i
if len(yData.shape) == 1:
self.renderer.renderDict[yDataVar] = copy.deepcopy(yData)
else:
self.renderer.renderDict[yDataVar] = \
copy.deepcopy(yData[:, i])
# check that the data here is a 1-D array
if len(self.renderer.renderDict[yDataVar].shape) != 1:
raise ValueError, "Can only handle 1D arrays at present"
if fname is not None:
# now handle the case when we have a file as input
raise NotImplementedError, "Sorry, can't handle file input yet"
# concatenate the data
evalString = "_yAll = concatenate(["
for i in range(dataLen-1):
evalString += "_y%d," % i
evalString += "_y%d])" % int(dataLen-1)
self.renderer.runString(evalString)
# grab the min and max values
self.renderer.runString("_yMax = max(_yAll)")
self.renderer.runString("_yMin = min(_yAll)")
# keep the data apart a bit
if self.sep is None:
self.renderer.runString("_const = 0.1*(_yMax - _yMin)")
else:
evalString = "_const = %f" % self.sep
self.renderer.runString(evalString)
# behave differently with the shift if we have xData as to not
if len(dataList) == 3:
# this is for when we have xData
self.renderer.runString("_yMaxAbs = max(abs(_yAll))")
# calculate the minimum delta x
x1 = xData[:-1]
x2 = xData[1:]
minDeltax = min(x2 - x1)
evalString = "_scale = %f/(2.0*_yMaxAbs)" % minDeltax
self.renderer.runString(evalString)
for i in range(dataLen):
evalString = "_y%d = _scale*_y%d + _x[%d]" % (i, i, i)
self.renderer.runString(evalString)
else:
# shift the data up
self.renderer.runString("_shift = _yMax - _yMin + _const")
for i in range(dataLen):
evalString = "_y%d = _y%d + %f*_shift" % (i, i, i)
self.renderer.runString(evalString)
# set up the vtkDataArray objects
for i in range(dataLen):
evalString = \
"_y%dData = vtk.vtkDataArray.CreateDataArray(vtk.VTK_FLOAT)\n" % i
evalString += "_y%dData.SetNumberOfTuples(len(_y%d))" % (i, i)
self.renderer.runString(evalString)
## t data
# put the data into the data arrays
self.renderer.runString("for _i in range(len(_t)):")
# need to be careful here to remember to indent the code properly
evalString = " _tData.SetTuple1(_i,_t[_i])"
self.renderer.runString(evalString)
## y data
# put the data into the data arrays
self.renderer.runString("for _i in range(len(_t)):")
# need to be careful here to remember to indent the code properly
for i in range(dataLen):
evalString = " _y%dData.SetTuple1(_i,_y%d[_i])" % (i, i)
self.renderer.runString(evalString)
for i in range(dataLen):
# create the field data object
evalString = "_fieldData%d = vtk.vtkFieldData()\n" % i
evalString += "_fieldData%d.AllocateArrays(2)\n" % i
evalString += "_fieldData%d.AddArray(_tData)\n" % i
evalString += "_fieldData%d.AddArray(_y%dData)" % (i, i)
self.renderer.runString(evalString)
for i in range(dataLen):
# now put the field data into a data object
evalString = "_dataObject%d = vtk.vtkDataObject()\n" % i
evalString += "_dataObject%d.SetFieldData(_fieldData%d)\n" % (i, i)
# the actor should be set up, so add the data object to the actor
evalString += "_plot.AddDataObjectInput(_dataObject%d)" % i
self.renderer.runString(evalString)
# tell the actor to use the x values for the x values (rather than
# the index)
self.renderer.runString("_plot.SetXValuesToValue()")
# set which parts of the data object are to be used for which axis
self.renderer.runString("_plot.SetDataObjectXComponent(0,0)")
for i in range(dataLen):
evalString = "_plot.SetDataObjectYComponent(%d,1)" % i
self.renderer.runString(evalString)
# note: am ignoring zlabels as vtk xyPlot doesn't support that
# dimension for line plots (I'll have to do something a lot more
# funky if I want that kind of functionality)
return
def render(self):
"""
Does OffsetPlot object specific (pre)rendering stuff
"""
debugMsg("Called OffsetPlot.render()")
self.renderer.runString("# OffsetPlot.render()")
self.renderer.runString("_renderer.AddActor2D(_plot)")
# set the title if set
if self.title is not None:
evalString = "_plot.SetTitle(\'%s\')" % self.title
self.renderer.runString(evalString)
# if an xlabel is set, add it
if self.xlabel is not None:
evalString = "_plot.SetXTitle(\'%s\')" % self.xlabel
self.renderer.runString(evalString)
# if an ylabel is set, add it
if self.ylabel is not None:
evalString = "_plot.SetYTitle(\'%s\')" % self.ylabel
self.renderer.runString(evalString)
return
# vim: expandtab shiftwidth=4:
|
paultcochrane/pyvisi
|
pyvisi/renderers/vtk/offset_plot.py
|
Python
|
gpl-2.0
| 12,830
|
[
"VTK"
] |
77d625e9d4f858355dc61e83a12ed73cf1fa84d01b85391c1007c23c02865feb
|
from os import path
from collections import OrderedDict
from IPython.display import display, clear_output
from matplotlib import pyplot as plt
from matplotlib import transforms
from matplotlib import rcParams
from numpy import linspace
import ipywidgets as widgets
from pysces import ModelMap
from pysces import output_dir as psc_out_dir
import pysces
import gzip
import cPickle as pickle
from ..misc import *
from ...latextools import LatexExpr
from ... import modeltools
def save_data2d(data_2dobj, file_name):
"""
Saves a Data2D object to a gzipped cPickle to a specified file name.
"""
mod = data_2dobj.mod
data_2dobj.mod = data_2dobj.mod.ModelFile
with gzip.open(file_name, 'wb') as f:
pickle.dump(data_2dobj, f)
data_2dobj.mod = mod
def load_data2d(file_name, mod=None, ltxe=None):
"""
Loads a gzipped cPickle file containing a Data2D object. Optionally
a model can be provided (which is useful when loading data that
reference the same model. For the same reason a LatexExpr object
can be supplied.
"""
with gzip.open(file_name, 'rb') as f:
data_2dobj = pickle.load(f)
if not mod:
data_2dobj.mod = pysces.model(data_2dobj.mod)
else:
data_2dobj.mod = mod
if ltxe:
del data_2dobj._ltxe
data_2dobj._ltxe = ltxe
return data_2dobj
#matplotlib 1.5 breaks set_color_cycle functionality
#now we need cycler
from matplotlib import __version__ as mpl_version
use_cycler = False
from distutils.version import LooseVersion
if LooseVersion(mpl_version) >= LooseVersion('1.5.0'):
from cycler import cycler
use_cycler = True
exportLAWH = silence_print(pysces.write.exportLabelledArrayWithHeader)
"""
This whole module is fd in the a
"""
__all__ = ['LineData',
'ScanFig',
'Data2D',
'load_data2d',
'save_data2d',
'SimpleData2D']
def _add_legend_viewlim(ax, **kwargs):
""" Reset the legend in ax to only display lines that are
currently visible in plot area """
# THIS FUNCTION COMES FROM
# http://matplotlib.1069221.n5.nabble.com/
# Re-Limit-legend-to-visible-data-td18335.html
label_objs = []
label_texts = []
# print "viewLim:", ax.viewLim
for line in ax.lines:
line_label = line.get_label()
cond = line.get_visible() and \
line_label and not line_label.startswith("_")
if cond:
line_bbox = transforms.Bbox.unit()
line_bbox.update_from_data_xy(line.get_xydata())
if ax.viewLim.overlaps(line_bbox):
# print line_label, line_bbox
label_objs.append(line)
label_texts.append(line_label)
if label_objs:
return ax.legend(label_objs, label_texts, **kwargs)
elif ax.get_legend():
ax.get_legend().set_visible(False)
else:
ax.legend().set_visible(False)
class LineData(object):
"""
An object that contains data and metadata used by ``ScanFig`` to draw a
``matplotlib`` line with interactivity.
This object is used to initialise a ``ScanFig`` object together with a
``Data2D`` object. Once a ``ScanFig`` instance is initialised, the
``LineData`` objects are saved in a list ``_raw_line_data``. Changing
any values there will have no effect on the output of the ``ScanFig``
instance. Actual x,y data, ``matplotlib`` line metadata, and ``ScanFig``
category metadata is stored.
Parameters
----------
name : str
The name of the line. Will be used as a label if none is specified.
x_data : array_like
The x data.
y_data : array_like
The y data.
categories : list, optional
A list of categories that a line falls into. This will be used by
ScanFig to draw buttons that enable/disable the line.
properties : dict, optional
A dictionary of properties of the line to be drawn. This dictionary
will be used by the generic ``set()`` function of
``matplotlib.Lines.Line2D`` to set the properties of the line.
See Also
--------
ScanFig
Data2D
RateChar
"""
def __init__(self, name, x_data, y_data, categories=None, properties=None):
super(LineData, self).__init__()
self.name = name
self.x = x_data
self.y = y_data
if categories:
self.categories = categories
else:
self.categories = [self.name]
if properties:
self.properties = properties
else:
self.properties = {}
self._update_attach_properties()
def _update_attach_properties(self):
"""
Attaches all properties in ``self.properties`` to the ``self``
namespace.
"""
# TODO Figure out why the properties are (or need to be) attached in this way. It seems unnecessary
for k, v in self.properties.iteritems():
setattr(self, k, v)
def add_property(self, key, value):
"""
Adds a property to the ``properties`` dictionary of the
``LineData`` object.
The ``properties`` dictionary of ``LineData`` will be used by the
generic ``set()`` function of ``matplotlib.Lines.Line2D``
to set the properties of the line.
Parameters
----------
key : str
The name of the ``matplotlib.Lines.Line2D`` property to be set.
value : sting, int, bool
The value of the property to be set. The type depends on the
property.
"""
self.properties.update({key, value})
self._update_attach_properties()
class SimpleData2D(object):
def __init__(self, column_names, data_array, mod=None):
super(SimpleData2D, self).__init__()
self.mod = mod
if self.mod:
self._ltxe = LatexExpr(mod)
else:
self._ltxe = None
self.scan_results = DotDict()
self.scan_results['scan_in'] = column_names[0]
self.scan_results['scan_out'] = column_names[1:]
self.scan_results['scan_range'] = data_array[:, 0]
self.scan_results['scan_results'] = data_array[:, 1:]
self.scan_results['scan_points'] = len(self.scan_results.scan_range)
self._column_names = column_names
self._scan_results = data_array
self._setup_lines()
def _setup_lines(self):
"""
Sets up ``LineData`` objects that will be used to populate ``ScanFig``
objects created by the ``plot`` method of ``Data2D``. These objects
are stored in a list: ``self._lines``
``ScanFig`` takes a list of ``LineData`` objects as an argument and
this method sets up that list. The ``self._column_categories``
dictionary is used here.
"""
lines = []
for i, each in enumerate(self.scan_results.scan_out):
if self._ltxe:
label = self._ltxe.expression_to_latex(each)
else:
label = each
line = LineData(name=each,
x_data=self.scan_results.scan_range,
y_data=self.scan_results.scan_results[:, i],
categories=[each],
properties={'label': '$%s$' % (label),
'linewidth': 1.6})
lines.append(line)
self._lines = lines
def plot(self):
"""
Creates a ``ScanFig`` object using the data stored in the current
instance of ``Data2D``
Returns
-------
``ScanFig``
A `ScanFig`` object that is used to visualise results.
"""
base_name = 'scan_fig'
scan_fig = ScanFig(self._lines,
base_name=base_name,
ax_properties={'xlabel':
self.scan_results.scan_in})
return scan_fig
def save_results(self, file_name=None, separator=',',fmt='%f'):
"""
Saves data stores in current instance of ``Data2D`` as a comma
separated file.
Parameters
----------
file_name : str, Optional (Default : None)
The file name, extension and path under which data should be saved.
If None the name will default to scan_data.csv and will be saved
either under the directory specified under the directory specified
in ``folder``.
separator : str, Optional (Default : ',')
The symbol which should be used to separate values in the output
file.
format : str, Optional (Default : '%f')
Format for the data.
"""
file_name = modeltools.get_file_path(working_dir=None,
internal_filename='scan_fig',
fmt='csv',
fixed=self.scan_results.scan_in,
file_name=file_name)
scan_results = self._scan_results
column_names = self._column_names
try:
exportLAWH(scan_results,
names=None,
header=column_names,
fname=file_name,
sep=separator,
format=fmt)
except IOError as e:
print e.strerror
class Data2D(object):
"""
An object that wraps results from a PySCeS parameter scan.
Results from parameter scan of timecourse are used to initialise this
object which in turn is used to create a ``ScanFig`` object. Here results
can easily be accessed and saved to disk.
The ``Data2D`` is also responsible for setting up a ``ScanFig`` object from
analysis results and therefore contains optional parameters for setting
up this object.
Parameters
----------
mod : PysMod
The model for which the parameter scan was performed.
column_names : list of str
The names of each column in the data_array. Columns should be arranged
with the input values (scan_in, time) in the first column and the
output values (scan_out) in the columns that follow.
data_array : ndarray
An array containing results from a parameter scan or tome simulation.
Arranged as described above.
ltxe : LatexExpr, optional (Default : None)
A LatexExpr object that is used to convert PySCeS compatible
expressions to LaTeX math. If None is supplied a new LatexExpr object
will be instantiated. Sharing a single instance saves memory.
analysis_method : str, Optional (Default : None)
A string that indicates the name of the analysis method used to
generate the results that populate ``Data2D``. This will determine
where results are saved by ``Data2D`` as well as any ``ScanFig``
objects that are produced by it.
ax_properties : dict, Optional (Default : None)
A dictionary of properties that will be used by ``ScanFig`` to adjust
the appearance of plots. These properties should compatible with
``matplotlib.axes.AxesSubplot'' object in a way that its ``set``
method can be used to change its properties. If none, a default
``ScanFig`` object is produced by the ``plot`` method.
file_name : str, Optional (Default : None)
The name that should be prepended to files produced any ``ScanFig``
objects produced by ``Data2D``. If None, defaults to 'scan_fig'.
additional_cat_classes : dict, Optional (Default : None)
A dictionary containing additional line class categories for
``ScanFig`` construction. Each ``data_array`` column contains results
representing a specific category of result (elasticity, flux,
concentration) which in turn fall into a larger class of data types
(All Coefficients). This dictionary defines which line classes fall
into which class category. (k = category class; v = line categories)
additional_cats : dict, Optional (Default : None)
A dictionary that defines additional result categories as well as the
lines that fall into these categories. (k = line category, v =
lines in category).
num_of_groups : int, Optional (Default : None)
A number that defines the number of groups of lines. Used to ensure
that the lines that are closely related (e.g. elasticities for one
reaction) have colors assigned to them that are easily differentiable.
working_dir : str, Optional (Default : None)
This string sets the working directory directly and if provided
supersedes ``analysis_method``.
See Also
--------
ScanFig
Data2D
RateChar
"""
def __init__(self,
mod,
column_names,
data_array,
ltxe=None,
analysis_method=None,
ax_properties=None,
file_name=None,
additional_cat_classes=None,
additional_cats=None,
num_of_groups=None,
working_dir=None,
category_manifest=None,
axvline=True):
self.scan_results = DotDict()
self.scan_results['scan_in'] = column_names[0]
self.scan_results['scan_out'] = column_names[1:]
self.scan_results['scan_range'] = data_array[:, 0]
self.scan_results['scan_results'] = data_array[:, 1:]
self.scan_results['scan_points'] = len(self.scan_results.scan_range)
self._column_names = column_names
self._scan_results = data_array
if not category_manifest:
category_manifest = {}
self._category_manifest = category_manifest
self.mod = mod
scan_in = self.scan_results.scan_in
if not analysis_method:
if scan_in.lower() == 'time':
analysis_method = 'simulation'
elif hasattr(self.mod, scan_in):
analysis_method = 'parameter_scan'
else:
analysis_method = 'custom'
self._analysis_method = analysis_method
if scan_in.lower() != 'time':
try:
self.mod.doMcaRC()
except:
pass
if axvline:
self._vline_val = None
if scan_in.lower() != 'time' and hasattr(self.mod, scan_in):
self._vline_val = getattr(self.mod, scan_in)
if not ltxe:
ltxe = LatexExpr(mod)
self._ltxe = ltxe
#TODO check if this is even needed
self._fname_specified = False
if not file_name:
self._fname = 'scan_data'
else:
self._fname = file_name
self._fname_specified = True
#This is here specifically for the do_mca_scan method of pysces. If
if not working_dir:
working_dir = modeltools.make_path(mod=self.mod,
analysis_method=self._analysis_method)
self._working_dir = working_dir
self._ax_properties_ = ax_properties
# So in order for ScanFig to have all those nice buttons that are
# organised so well we need to set it up beforehand. Basically
# each different line has different categories of lines that it falls
# into. Then each each of these categories falls into a category class.
# Each ``_category_classes`` key represents a category class and the
# value is a list of categories that fall into a class.
#
# The dictionary ``_scan_types`` contains the different categories that
# a line can fall into (in addition to the category containing itself).
# Here a keys is a category and value is a list of lines in this
# category.
#
# Buttons will be arranged so that a category class is a label under
# which all the buttons that toggle a certain category is arranged
# under. For instance under the label'All Coefficients' will be the
# buttons 'Elasticity Coefficients', 'Control Coefficients',
# 'Response Coefficients etc.
#
# We also add _scan_types to the ``_category_classes`` so that each
# individual line has its own button.
# There will therefore be a button called 'Control Coefficients' that
# fall under the 'All Coefficients' category class label as well as a
# label for the category class called 'Control Coefficients' under
# which all the different control coefficient buttons will be
# arranged.
if not additional_cat_classes:
additional_cat_classes = {}
self._additional_cat_classes = additional_cat_classes
if not additional_cats:
additional_cats = {}
self._additional_cats = additional_cats
self._setup_lines()
if num_of_groups:
self._lines = group_sort(self._lines, num_of_groups)
@property
def _category_classes(self):
category_classes = OrderedDict([('All Coefficients',
['Elasticity Coefficients',
'Control Coefficients',
'Response Coefficients',
'Partial Response Coefficients',
'Control Patterns']),
('All Fluxes/Reactions/Species/Parameters',
['Flux Rates',
'Reaction Rates',
'Species Concentrations',
'Steady-State Species Concentrations',
'Parameters'])])
additional_cat_classes = self._additional_cat_classes
for k, v in additional_cat_classes.iteritems():
if k in category_classes:
lst = category_classes[k]
new_lst = list(set(lst + v))
category_classes[k] = new_lst
else:
category_classes[k] = v
category_classes.update(self._scan_types)
return category_classes
@property
def _scan_types(self):
scan_types = OrderedDict([
('Flux Rates', ['J_' + reaction for reaction in self.mod.reactions]),
('Reaction Rates', [reaction for reaction in self.mod.reactions]),
('Species Concentrations', self.mod.species + self.mod.fixed_species),
('Steady-State Species Concentrations',[sp + '_ss' for sp in self.mod.species]),
('Elasticity Coefficients', ec_list(self.mod)),
('Control Coefficients', cc_list(self.mod)),
('Response Coefficients', rc_list(self.mod)),
('Partial Response Coefficients', prc_list(self.mod)),
('Control Patterns', ['CP{:3}'.format(n).replace(' ','0')
for n in range(1, len(self._column_names))]),
('Parameters', self.mod.parameters)])
additional_cats = self._additional_cats
if additional_cats:
for k, v in additional_cats.iteritems():
if k in scan_types:
lst = scan_types[k]
new_lst = list(set(lst + v))
scan_types[k] = new_lst
else:
scan_types[k] = v
return scan_types
@property
def _column_categories(self):
"""
This method sets up the categories for each data column stored by this
object. These categories are stored in a dictionary as
``self._column_categories``.
Each line falls into its own category as well as another category
depending on what type of data it represents. So 'Species1' will
fall into the category 'Species1' as well as 'Species Concentrations'
Therefore the ``ScanFig`` buttons labelled 'Species1' and 'Species
Concentrations' need to be toggled on for the line representing
the parameter scan results of Species1 to be visible on the
``ScanFig`` figure.
"""
scan_types = self._scan_types
column_categories = {}
for column in self.scan_results.scan_out:
column_categories[column] = [column]
for k, v in scan_types.iteritems():
if column in v:
column_categories[column].append(k)
break
return column_categories
def _setup_lines(self):
"""
Sets up ``LineData`` objects that will be used to populate ``ScanFig``
objects created by the ``plot`` method of ``Data2D``. These objects
are stored in a list: ``self._lines``
``ScanFig`` takes a list of ``LineData`` objects as an argument and
this method sets up that list. The ``self._column_categories``
dictionary is used here.
"""
_column_categories = self._column_categories
lines = []
for i, each in enumerate(self.scan_results.scan_out):
line = LineData(name=each,
x_data=self.scan_results.scan_range,
y_data=self.scan_results.scan_results[:, i],
categories=_column_categories[each],
properties={'label':
'$%s$' %
(self._ltxe.expression_to_latex(
each)),
'linewidth': 1.6})
lines.append(line)
self._lines = lines
@property
def _ax_properties(self):
"""
Internal property of ``Data2D``. If no ``ax_properties`` argument is
specified in __init__ this property defines the xlabel of the
``ScanFig`` object depending on the value of ``self.scan_in``.
"""
if not self._ax_properties_:
self._ax_properties_ = {'xlabel': self._x_name}
return self._ax_properties_
@property
def _x_name(self):
mm = ModelMap(self.mod)
species = mm.hasSpecies()
x_name = ''
# TODO Enable lower case "time" as well as well as making generic for minutes/hours
if self.scan_results.scan_in.lower() == 'time':
x_name = 'Time'
elif self.scan_results.scan_in in species:
x_name = '[%s]' % self.scan_results.scan_in
elif self.scan_results.scan_in in self.mod.parameters:
x_name = self.scan_results.scan_in
return x_name
def plot(self):
"""
Creates a ``ScanFig`` object using the data stored in the current
instance of ``Data2D``
Returns
-------
``ScanFig``
A `ScanFig`` object that is used to visualise results.
"""
if self._fname_specified:
base_name = self._fname
else:
base_name = 'scan_fig'
scan_fig = ScanFig(self._lines,
category_classes=self._category_classes,
ax_properties=self._ax_properties,
working_dir=path.join(self._working_dir,
self.scan_results.scan_in, ),
base_name=base_name, )
for k,v in self._category_manifest.iteritems():
scan_fig.toggle_category(k,v)
if self._vline_val:
scan_fig.ax.axvline(self._vline_val, ls=':', color='gray')
return scan_fig
def save_results(self, file_name=None, separator=',',fmt='%f'):
"""
Saves data stores in current instance of ``Data2D`` as a comma
separated file.
Parameters
----------
file_name : str, Optional (Default : None)
The file name, extension and path under which data should be saved.
If None the name will default to scan_data.csv and will be saved
either under the directory specified under the directory specified
in ``folder``.
separator : str, Optional (Default : ',')
The symbol which should be used to separate values in the output
file.
format : str, Optional (Default : '%f')
Format for the data.
"""
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename=self._fname,
fmt='csv',
fixed=self.scan_results.scan_in,
file_name=file_name)
scan_results = self._scan_results
column_names = self._column_names
try:
exportLAWH(scan_results,
names=None,
header=column_names,
fname=file_name,
sep=separator,
format=fmt)
except IOError as e:
print e.strerror
class ScanFig(object):
"""
Uses data in the form of a list of LineData objects to display interactive
plots.
Interactive plots can be customised in terms of which data is visible at
any one time by simply clicking a button to toggle a line. Matplotlib
figures are used internally, therefore ScanFig figures can be altered
by changing the properties of the internal figure.
Parameters
----------
line_data_list : list of LineData objects
A LineData object contains the information needed to draw a single
curve on a matplotlib figure. Here a list of these objects are used
to populate the internal matplotlib figure with the various curves
that represent the results of a parameter scan or simulation.
category_classes : dict, Optional (Default : None)
Each line on a ScanFig plot falls into a different category. Each of
these categories in turn fall into a different class. Each category
represents a button which toggles the lines which fall into the
category while the button is arranged under a label which is
represented by a category class. Each key in this dict is a category
class and the value is a list of categories that fall into this class.
If None all categories will fall into the same class.
fig_properties : dict, Optional (Default : None)
A dictionary of properties that will be used to adjust the appearance
of the figure. These properties should compatible with
``matplotlib.figure.Figure'' object in a way that its ``set``
method can be used to change its properties. If None, default
matplotlib figure properties will be used.
ax_properties : dict, Optional (Default : None)
A dictionary of properties that will be used to adjust the appearance
of plot axes. These properties should compatible with
``matplotlib.axes.AxesSubplot'' object in a way that its ``set``
method can be used to change its properties. If None default matplotlib
axes properties will be used.
base_name : str, Optional (Default : None)
Base name that will be used when an image is saved by ``ScanFig``. If
None, then ``scan_fig`` will be used.
working_dir : str, Optional (Default : None)
The directory in which files figures will be saved. If None, then it
will default to the directory specified in ``pysces.output_dir``.
See Also
--------
LineData
Data2D
"""
def __init__(self, line_data_list,
category_classes=None,
fig_properties=None,
ax_properties=None,
base_name=None,
working_dir=None):
super(ScanFig, self).__init__()
rcParams.update({'font.size': 16})
self._categories_ = None
self._categories_status = None
self._lines_ = None
self._widgets_ = None
self._figure_widgets_ = None
self._raw_line_data = line_data_list
# figure setup
plt.ioff()
self.fig = plt.figure(figsize=(10, 5.72))
if fig_properties:
self.fig.set(**fig_properties)
# axis setup
self.ax = self.fig.add_subplot(111)
if ax_properties:
self.ax.set(**ax_properties)
# colourmap_setup
# at the moment this is very basic and could be expanded
# it would be useful to set it up based on category somehow
cmap = plt.get_cmap('Set1')(
linspace(0, 1.0, len(line_data_list)))
if use_cycler:
col_cycler = cycler('color',cmap)
self.ax.set_prop_cycle(col_cycler)
else:
self.ax.set_color_cycle(cmap)
if category_classes:
new_cat_classes = OrderedDict()
for k, v in category_classes.iteritems():
for each in self._categories.iterkeys():
if each in v:
if not k in new_cat_classes:
new_cat_classes[k] = []
new_cat_classes[k].append(each)
self._category_classes = new_cat_classes
else:
self._category_classes = {'': [k for k in self._categories]}
if base_name:
self._base_name = base_name
else:
self._base_name = 'scan_fig'
if working_dir:
self._working_dir = working_dir
else:
self._working_dir = psc_out_dir
self._save_counter = 0
self._lines
if 'backend_inline' in rcParams['backend']:
plt.close()
self._save_button_ = None
@property
def _save_button(self):
if not self._save_button_:
def save(clicked):
self.save()
self._save_button_ = widgets.Button()
self._save_button_.description = 'Save'
self._save_button_.on_click(save)
return self._save_button_
def show(self):
"""
Displays the figure.
Depending on the matplotlib backend this function will either display
the figure inline if running in an ``IPython`` notebook with the
``--pylab=inline`` switch or with the %matplotlib inline IPython line
magic, alternately it will display the figure as determined by the
``rcParams['backend']`` option of ``matplotlib``. Either the inline or
nbagg backends are recommended.
See Also
--------
interact
adjust_figure
"""
_add_legend_viewlim(
self.ax,
bbox_to_anchor=(0, -0.17),
ncol=3,
loc=2,
borderaxespad=0.)
if 'backend_inline' in rcParams['backend']:
clear_output(wait=True)
display(self.fig)
else:
self.fig.show()
def save(self, file_name=None, dpi=None, fmt=None, include_legend=True):
"""
Saves the figure in it's current configuration.
Parameters
----------
file_name : str, Optional (Default : None)
The file name to be used. If None is provided the file will be saved
to ``working_dir/base_name.fmt``
dpi : int, Optional (Default : None)
The dpi to use. Defaults to 180.
fmt : str, Optional (Default : None)
The image format to use. Defaults to ``svg``. If ``file_name``
contains a valid extension it will supersede ``fmt``.
"""
if not fmt:
fmt = 'svg'
if not dpi:
dpi = 180
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename=self._base_name,
fmt=fmt,
file_name=file_name)
fmt = modeltools.get_fmt(file_name)
if include_legend:
self.fig.savefig(file_name,
format=fmt,
dpi=dpi,
bbox_extra_artists=(self.ax.get_legend(),),
bbox_inches='tight')
else:
leg = self.ax.legend_
self.ax.legend_ = None
self.fig.savefig(file_name,
format=fmt,
dpi=dpi,)
self.ax.legend_ = leg
@property
def _widgets(self):
if not self._widgets_:
widget_classes = OrderedDict()
for k in self._category_classes.iterkeys():
box = widgets.HBox()
box.layout.display = 'flex-flow'
widget_classes[k] = box
def oc(cat):
def on_change(name, value):
self.toggle_category(cat, value)
self.show()
return on_change
width = self._find_button_width()
for each in self._categories:
w = widgets.ToggleButton()
w.description = each
w.width = width
w.value = self.categories_status[each]
on_change = oc(each)
w.on_trait_change(on_change, 'value')
for k, v in self._category_classes.iteritems():
if each in v:
widget_classes[k].children += (w),
# this is needed to sort widgets according to alphabetical order
for k, v in widget_classes.iteritems():
children_list = list(v.children)
names = [getattr(widg, 'description')
for widg in children_list]
names.sort()
new_children_list = []
for name in names:
for child in children_list:
if child.description == name:
new_children_list.append(child)
v.children = tuple(new_children_list)
self._widgets_ = widget_classes
return self._widgets_
@property
def _figure_widgets(self):
"""
Instantiates the widgets that will be used to adjust the figure.
At the moment widgets for manipulating the following paramers
are available:
minimum and maximum x values on the x axis
minimum and maximum y values on the y axis
the scale of the x and y axis i.e. log vs linear
The following are possible TODOs:
figure size
y label
x label
figure title
"""
def convert_scale(val):
"""
Converts between str and bool for the strings 'log' and 'linear'
The string 'log' returns True, while True returns 'log'.
The string 'linear' returns False, while False returns 'linear'
Parameters
----------
val : str, bool
The value to convert.
Returns
-------
value : str, bool
The conversion of the parameter ``val``
Examples
--------
>>> convert_scale('log')
True
>>> convert_scale(False)
'linear'
"""
if type(val) == bool:
if val is True:
return 'log'
elif val is False:
return 'linear'
else:
if val == 'log':
return True
elif val == 'linear':
return False
def c_v(val):
if val <= 0:
return 0.001
else:
return val
if not self._figure_widgets_:
min_x = widgets.FloatText()
max_x = widgets.FloatText()
min_x.value, max_x.value = self.ax.get_xlim()
min_x.description = 'min'
max_x.description = 'max'
min_y = widgets.FloatText()
max_y = widgets.FloatText()
min_y.value, max_y.value = self.ax.get_ylim()
min_y.description = 'min'
max_y.description = 'max'
log_x = widgets.Checkbox()
log_y = widgets.Checkbox()
log_x.value = convert_scale(self.ax.get_xscale())
log_y.value = convert_scale(self.ax.get_yscale())
log_x.description = 'x_log'
log_y.description = 'y_log'
apply_btn = widgets.Button()
apply_btn.description = 'Apply'
def set_values(clicked):
if log_x.value is True:
min_x.value = c_v(min_x.value)
max_x.value = c_v(max_x.value)
self.ax.set_xlim([min_x.value, max_x.value])
if log_y.value is True:
min_y.value = c_v(min_y.value)
max_y.value = c_v(max_y.value)
self.ax.set_ylim([min_y.value, max_y.value])
self.ax.set_xscale(convert_scale(log_x.value))
self.ax.set_yscale(convert_scale(log_y.value))
self.show()
apply_btn.on_click(set_values)
x_lims = widgets.HBox(children=[min_x, max_x])
y_lims = widgets.HBox(children=[min_y, max_y])
lin_log = widgets.HBox(children=[log_x, log_y])
apply_con = widgets.HBox(children=[apply_btn])
_figure_widgets_ = OrderedDict()
_figure_widgets_['X axis limits'] = x_lims
_figure_widgets_['Y axis limits'] = y_lims
_figure_widgets_['Axis scale'] = lin_log
_figure_widgets_[' '] = apply_con
self._figure_widgets_ = _figure_widgets_
return self._figure_widgets_
@property
def _categories(self):
if not self._categories_:
main_cats = []
cats = []
for each in self._raw_line_data:
cats += each.categories
main_cats.append(each.categories[0])
cats = list(set(cats))
cat_dict = {}
for each in cats:
cat_dict[each] = []
for each in self._raw_line_data:
line = self._lines[each.name]
for cat in each.categories:
cat_dict[cat].append(line)
self._categories_ = cat_dict
return self._categories_
@property
def category_names(self):
return self._categories.keys()
@property
def categories_status(self):
if not self._categories_status:
cat_stat_dict = {}
for each in self._categories:
cat_stat_dict[each] = False
self._categories_status = cat_stat_dict
return self._categories_status
@property
def _lines(self):
if not self._lines_:
lines = {}
for i, each in enumerate(self._raw_line_data):
line, = self.ax.plot(each.x, each.y)
# set width to a default width of 2
# bc the default value of one is too low
line.set_linewidth(2)
if each.properties:
line.set(**each.properties)
else:
line.set_label(each.name)
line.set_visible(False)
lines[each.name] = line
self._lines_ = lines
return self._lines_
@property
def line_names(self):
lines = self._lines.keys()
lines.sort()
return lines
def toggle_line(self, name, value):
"""
Changes the visibility of a certain line.
When used a specific line's visibility is changed according to the
``value`` provided.
Parameters
----------
name: str
The name of the line to change.
value: bool
The visibility status to change the line to (True for visible,
False for invisible).
See Also
--------
toggle_category
"""
self._lines[name].set_visible(value)
def toggle_category(self, cat, value):
"""
Changes the visibility of all the lines in a certain line category.
When used all lines in the provided category's visibility is changed
according to the ``value`` provided.
Parameters
----------
cat: str
The name of the category to change.
value: bool
The visibility status to change the lines to (True for visible,
False for invisible).
See Also
--------
toggle_line
"""
# get the visibility status of the category eg. True/False
self.categories_status[cat] = value
# get all the other categories
other_cats = self._categories.keys()
other_cats.pop(other_cats.index(cat))
# self.categories is a dict with categories as keys
# and list of lines that fall within a category
# as a value. So for each line that falls in a cat
for line in self._categories[cat]:
# The visibility for a line has not changed at the start of
# the loop
in_other_cats = False
# A line can also fall within another category
other_cat_stats = []
for each in other_cats:
if line in self._categories[each]:
other_cat_stats.append(self.categories_status[each])
in_other_cats = True
# If a line is never in any other categories
# just set its visibility as it is dictated by
# its category status.
if in_other_cats:
visibility = all([value] + other_cat_stats)
line.set_visible(visibility)
else:
line.set_visible(value)
def interact(self):
"""
Displays the figure in a IPython/Jupyter notebook together with buttons
to toggle the visibility of certain lines.
See Also
--------
show
adjust_figure
"""
self.show()
for k, v in self._widgets.iteritems():
if len(v.children) > 0:
head = widgets.Label(value=k)
display(head)
display(v)
v._css = [(None, 'flex-wrap', 'wrap'), ]
# v.remove_class('vbox')
# v.add_class('hbox')
# v.set_css({'flex-wrap': 'wrap'})
display(widgets.Label(value='$~$'))
display(self._save_button)
for boxes in self._widgets.itervalues():
for button in boxes.children:
button.value = self.categories_status[button.description]
# self._save_button.remove_class('vbox')
# self._save_button.add_class('hbox')
def adjust_figure(self):
"""
Provides widgets to set the limits and scale (log/linear) of the figure.
As with ``interact``, the plot is displayed in the notebook. Here
no widgets are provided the change the visibility of the data
displayed on the plot, rather controls to set the limits and scale are
provided.
See Also
--------
show
interact
"""
self.show()
for k, v in self._figure_widgets.iteritems():
if len(v.children) > 0:
head = widgets.Label(value=k)
display(head)
display(v)
# v.remove_class('vbox')
# v.add_class('hbox')
v._css = [(None, 'flex-wrap', 'wrap'), ]
display(widgets.Label(value='$~$'))
display(self._save_button)
# self._save_button.remove_class('vbox')
# self._save_button.add_class('hbox')
def _find_button_width(self):
longest = sorted([len(each) for each in self._categories])[-1]
if longest > 14:
width_px = (longest - 14) * 5 + 145
width = str(width_px) + 'px'
else:
width = '145px'
return width
|
exe0cdc/PyscesToolbox
|
psctb/utils/plotting/_plotting.py
|
Python
|
bsd-3-clause
| 44,476
|
[
"PySCeS"
] |
5a90b8fd880a4ec4b60a73d31a9b83fb901661df19aca1914f8b99c09e446e44
|
"""
Course Outline page in Studio.
"""
import datetime
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from .course_page import CoursePage
from .container import ContainerPage
from .utils import set_input_value_and_save, set_input_value, click_css, confirm_prompt
class CourseOutlineItem(object):
"""
A mixin class for any :class:`PageObject` shown in a course outline.
"""
BODY_SELECTOR = None
EDIT_BUTTON_SELECTOR = '.xblock-field-value-edit'
NAME_SELECTOR = '.item-title'
NAME_INPUT_SELECTOR = '.xblock-field-input'
NAME_FIELD_WRAPPER_SELECTOR = '.xblock-title .wrapper-xblock-field'
STATUS_MESSAGE_SELECTOR = '> div[class$="status"] .status-message'
CONFIGURATION_BUTTON_SELECTOR = '.action-item .configure-button'
def __repr__(self):
# CourseOutlineItem is also used as a mixin for CourseOutlinePage, which doesn't have a locator
# Check for the existence of a locator so that errors when navigating to the course outline page don't show up
# as errors in the repr method instead.
try:
return "{}(<browser>, {!r})".format(self.__class__.__name__, self.locator)
except AttributeError:
return "{}(<browser>)".format(self.__class__.__name__)
def _bounded_selector(self, selector):
"""
Returns `selector`, but limited to this particular `CourseOutlineItem` context
"""
# If the item doesn't have a body selector or locator, then it can't be bounded
# This happens in the context of the CourseOutlinePage
if self.BODY_SELECTOR and hasattr(self, 'locator'):
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
else:
return selector
@property
def name(self):
"""
Returns the display name of this object.
"""
name_element = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).first
if name_element:
return name_element.text[0]
else:
return None
@property
def has_status_message(self):
"""
Returns True if the item has a status message, False otherwise.
"""
return self.q(css=self._bounded_selector(self.STATUS_MESSAGE_SELECTOR)).first.visible
@property
def status_message(self):
"""
Returns the status message of this item.
"""
return self.q(css=self._bounded_selector(self.STATUS_MESSAGE_SELECTOR)).text[0]
@property
def has_staff_lock_warning(self):
""" Returns True if the 'Contains staff only content' message is visible """
return self.status_message == 'Contains staff only content' if self.has_status_message else False
@property
def is_staff_only(self):
""" Returns True if the visiblity state of this item is staff only (has a black sidebar) """
return "is-staff-only" in self.q(css=self._bounded_selector(''))[0].get_attribute("class")
def edit_name(self):
"""
Puts the item's name into editable form.
"""
self.q(css=self._bounded_selector(self.EDIT_BUTTON_SELECTOR)).first.click()
def enter_name(self, new_name):
"""
Enters new_name as the item's display name.
"""
set_input_value(self, self._bounded_selector(self.NAME_INPUT_SELECTOR), new_name)
def change_name(self, new_name):
"""
Changes the container's name.
"""
self.edit_name()
set_input_value_and_save(self, self._bounded_selector(self.NAME_INPUT_SELECTOR), new_name)
self.wait_for_ajax()
def finalize_name(self):
"""
Presses ENTER, saving the value of the display name for this item.
"""
self.q(css=self._bounded_selector(self.NAME_INPUT_SELECTOR)).results[0].send_keys(Keys.ENTER)
self.wait_for_ajax()
def set_staff_lock(self, is_locked):
"""
Sets the explicit staff lock of item on the container page to is_locked.
"""
modal = self.edit()
modal.is_explicitly_locked = is_locked
modal.save()
def in_editable_form(self):
"""
Return whether this outline item's display name is in its editable form.
"""
return "is-editing" in self.q(
css=self._bounded_selector(self.NAME_FIELD_WRAPPER_SELECTOR)
)[0].get_attribute("class")
def edit(self):
self.q(css=self._bounded_selector(self.CONFIGURATION_BUTTON_SELECTOR)).first.click()
modal = CourseOutlineModal(self)
EmptyPromise(lambda: modal.is_shown(), 'Modal is shown.')
return modal
@property
def release_date(self):
element = self.q(css=self._bounded_selector(".status-release-value"))
return element.first.text[0] if element.present else None
@property
def due_date(self):
element = self.q(css=self._bounded_selector(".status-grading-date"))
return element.first.text[0] if element.present else None
@property
def policy(self):
element = self.q(css=self._bounded_selector(".status-grading-value"))
return element.first.text[0] if element.present else None
def publish(self):
"""
Publish the unit.
"""
click_css(self, self._bounded_selector('.action-publish'), require_notification=False)
modal = CourseOutlineModal(self)
EmptyPromise(lambda: modal.is_shown(), 'Modal is shown.')
modal.publish()
@property
def publish_action(self):
"""
Returns the link for publishing a unit.
"""
return self.q(css=self._bounded_selector('.action-publish')).first
class CourseOutlineContainer(CourseOutlineItem):
"""
A mixin to a CourseOutline page object that adds the ability to load
a child page object by title or by index.
CHILD_CLASS must be a :class:`CourseOutlineChild` subclass.
"""
CHILD_CLASS = None
ADD_BUTTON_SELECTOR = '> .outline-content > .add-item a.button-new'
def child(self, title, child_class=None):
"""
:type self: object
"""
if not child_class:
child_class = self.CHILD_CLASS
return child_class(
self.browser,
self.q(css=child_class.BODY_SELECTOR).filter(
lambda el: title in [inner.text for inner in
el.find_elements_by_css_selector(child_class.NAME_SELECTOR)]
).attrs('data-locator')[0]
)
def children(self, child_class=None):
"""
Returns all the children page objects of class child_class.
"""
if not child_class:
child_class = self.CHILD_CLASS
return self.q(css=self._bounded_selector(child_class.BODY_SELECTOR)).map(
lambda el: child_class(self.browser, el.get_attribute('data-locator'))).results
def child_at(self, index, child_class=None):
"""
Returns the child at the specified index.
:type self: object
"""
if not child_class:
child_class = self.CHILD_CLASS
return self.children(child_class)[index]
def add_child(self, require_notification=True):
"""
Adds a child to this xblock, waiting for notifications.
"""
click_css(
self,
self._bounded_selector(self.ADD_BUTTON_SELECTOR),
require_notification=require_notification,
)
def toggle_expand(self):
"""
Toggle the expansion of this subsection.
"""
self.browser.execute_script("jQuery.fx.off = true;")
def subsection_expanded():
add_button = self.q(css=self._bounded_selector(self.ADD_BUTTON_SELECTOR)).first.results
return add_button and add_button[0].is_displayed()
currently_expanded = subsection_expanded()
self.q(css=self._bounded_selector('.ui-toggle-expansion i')).first.click()
EmptyPromise(
lambda: subsection_expanded() != currently_expanded,
"Check that the container {} has been toggled".format(self.locator)
).fulfill()
return self
@property
def is_collapsed(self):
"""
Return whether this outline item is currently collapsed.
"""
return "is-collapsed" in self.q(css=self._bounded_selector('')).first.attrs("class")[0]
class CourseOutlineChild(PageObject, CourseOutlineItem):
"""
A page object that will be used as a child of :class:`CourseOutlineContainer`.
"""
url = None
BODY_SELECTOR = '.outline-item'
def __init__(self, browser, locator):
super(CourseOutlineChild, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def delete(self, cancel=False):
"""
Clicks the delete button, then cancels at the confirmation prompt if cancel is True.
"""
click_css(self, self._bounded_selector('.delete-button'), require_notification=False)
confirm_prompt(self, cancel)
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `CourseOutlineChild` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
@property
def name(self):
titles = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).text
if titles:
return titles[0]
else:
return None
@property
def children(self):
"""
Will return any first-generation descendant items of this item.
"""
descendants = self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: CourseOutlineChild(self.browser, el.get_attribute('data-locator'))).results
# Now remove any non-direct descendants.
grandkids = []
for descendant in descendants:
grandkids.extend(descendant.children)
grand_locators = [grandkid.locator for grandkid in grandkids]
return [descendant for descendant in descendants if descendant.locator not in grand_locators]
class CourseOutlineUnit(CourseOutlineChild):
"""
PageObject that wraps a unit link on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-unit'
NAME_SELECTOR = '.unit-title a'
def go_to(self):
"""
Open the container page linked to by this unit link, and return
an initialized :class:`.ContainerPage` for that unit.
"""
return ContainerPage(self.browser, self.locator).visit()
def is_browser_on_page(self):
return self.q(css=self.BODY_SELECTOR).present
def children(self):
return self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: CourseOutlineUnit(self.browser, el.get_attribute('data-locator'))).results
class CourseOutlineSubsection(CourseOutlineContainer, CourseOutlineChild):
"""
:class`.PageObject` that wraps a subsection block on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-subsection'
NAME_SELECTOR = '.subsection-title'
NAME_FIELD_WRAPPER_SELECTOR = '.subsection-header .wrapper-xblock-field'
CHILD_CLASS = CourseOutlineUnit
def unit(self, title):
"""
Return the :class:`.CourseOutlineUnit with the title `title`.
"""
return self.child(title)
def units(self):
"""
Returns the units in this subsection.
"""
return self.children()
def unit_at(self, index):
"""
Returns the CourseOutlineUnit at the specified index.
"""
return self.child_at(index)
def add_unit(self):
"""
Adds a unit to this subsection
"""
self.q(css=self._bounded_selector(self.ADD_BUTTON_SELECTOR)).click()
class CourseOutlineSection(CourseOutlineContainer, CourseOutlineChild):
"""
:class`.PageObject` that wraps a section block on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-section'
NAME_SELECTOR = '.section-title'
NAME_FIELD_WRAPPER_SELECTOR = '.section-header .wrapper-xblock-field'
CHILD_CLASS = CourseOutlineSubsection
def subsection(self, title):
"""
Return the :class:`.CourseOutlineSubsection` with the title `title`.
"""
return self.child(title)
def subsections(self):
"""
Returns a list of the CourseOutlineSubsections of this section
"""
return self.children()
def subsection_at(self, index):
"""
Returns the CourseOutlineSubsection at the specified index.
"""
return self.child_at(index)
def add_subsection(self):
"""
Adds a subsection to this section
"""
self.add_child()
class ExpandCollapseLinkState:
"""
Represents the three states that the expand/collapse link can be in
"""
MISSING = 0
COLLAPSE = 1
EXPAND = 2
class CourseOutlinePage(CoursePage, CourseOutlineContainer):
"""
Course Outline page in Studio.
"""
url_path = "course"
CHILD_CLASS = CourseOutlineSection
EXPAND_COLLAPSE_CSS = '.button-toggle-expand-collapse'
BOTTOM_ADD_SECTION_BUTTON = '.outline > .add-section .button-new'
def is_browser_on_page(self):
return self.q(css='body.view-outline').present and self.q(css='div.ui-loading.is-hidden').present
def view_live(self):
"""
Clicks the "View Live" link and switches to the new tab
"""
click_css(self, '.view-live-button', require_notification=False)
self.browser.switch_to_window(self.browser.window_handles[-1])
def section(self, title):
"""
Return the :class:`.CourseOutlineSection` with the title `title`.
"""
return self.child(title)
def section_at(self, index):
"""
Returns the :class:`.CourseOutlineSection` at the specified index.
"""
return self.child_at(index)
def click_section_name(self, parent_css=''):
"""
Find and click on first section name in course outline
"""
self.q(css='{} .section-name'.format(parent_css)).first.click()
def get_section_name(self, parent_css='', page_refresh=False):
"""
Get the list of names of all sections present
"""
if page_refresh:
self.browser.refresh()
return self.q(css='{} .section-name'.format(parent_css)).text
def section_name_edit_form_present(self, parent_css=''):
"""
Check that section name edit form present
"""
return self.q(css='{} .section-name input'.format(parent_css)).present
def change_section_name(self, new_name, parent_css=''):
"""
Change section name of first section present in course outline
"""
self.click_section_name(parent_css)
self.q(css='{} .section-name input'.format(parent_css)).first.fill(new_name)
self.q(css='{} .section-name .save-button'.format(parent_css)).first.click()
self.wait_for_ajax()
def click_release_date(self):
"""
Open release date edit modal of first section in course outline
"""
self.q(css='div.section-published-date a.edit-release-date').first.click()
def sections(self):
"""
Returns the sections of this course outline page.
"""
return self.children()
def add_section_from_top_button(self):
"""
Clicks the button for adding a section which resides at the top of the screen.
"""
click_css(self, '.wrapper-mast nav.nav-actions .button-new')
def add_section_from_bottom_button(self, click_child_icon=False):
"""
Clicks the button for adding a section which resides at the bottom of the screen.
"""
element_css = self.BOTTOM_ADD_SECTION_BUTTON
if click_child_icon:
element_css += " .fa-plus"
click_css(self, element_css)
def toggle_expand_collapse(self):
"""
Toggles whether all sections are expanded or collapsed
"""
self.q(css=self.EXPAND_COLLAPSE_CSS).click()
@property
def bottom_add_section_button(self):
"""
Returns the query representing the bottom add section button.
"""
return self.q(css=self.BOTTOM_ADD_SECTION_BUTTON).first
@property
def has_no_content_message(self):
"""
Returns true if a message informing the user that the course has no content is visible
"""
return self.q(css='.outline .no-content').is_present()
@property
def has_rerun_notification(self):
"""
Returns true iff the rerun notification is present on the page.
"""
return self.q(css='.wrapper-alert.is-shown').is_present()
def dismiss_rerun_notification(self):
"""
Clicks the dismiss button in the rerun notification.
"""
self.q(css='.dismiss-button').click()
@property
def expand_collapse_link_state(self):
"""
Returns the current state of the expand/collapse link
"""
link = self.q(css=self.EXPAND_COLLAPSE_CSS)[0]
if not link.is_displayed():
return ExpandCollapseLinkState.MISSING
elif "collapse-all" in link.get_attribute("class"):
return ExpandCollapseLinkState.COLLAPSE
else:
return ExpandCollapseLinkState.EXPAND
def expand_all_subsections(self):
"""
Expands all the subsections in this course.
"""
for section in self.sections():
if section.is_collapsed:
section.toggle_expand()
for subsection in section.subsections():
if subsection.is_collapsed:
subsection.toggle_expand()
@property
def xblocks(self):
"""
Return a list of xblocks loaded on the outline page.
"""
return self.children(CourseOutlineChild)
class CourseOutlineModal(object):
MODAL_SELECTOR = ".wrapper-modal-window"
def __init__(self, page):
self.page = page
def _bounded_selector(self, selector):
"""
Returns `selector`, but limited to this particular `CourseOutlineModal` context.
"""
return " ".join([self.MODAL_SELECTOR, selector])
def is_shown(self):
return self.page.q(css=self.MODAL_SELECTOR).present
def find_css(self, selector):
return self.page.q(css=self._bounded_selector(selector))
def click(self, selector, index=0):
self.find_css(selector).nth(index).click()
def save(self):
self.click(".action-save")
self.page.wait_for_ajax()
def publish(self):
self.click(".action-publish")
self.page.wait_for_ajax()
def cancel(self):
self.click(".action-cancel")
def has_release_date(self):
return self.find_css("#start_date").present
def has_due_date(self):
return self.find_css("#due_date").present
def has_policy(self):
return self.find_css("#grading_type").present
def set_date(self, property_name, input_selector, date):
"""
Set `date` value to input pointed by `selector` and `property_name`.
"""
month, day, year = map(int, date.split('/'))
self.click(input_selector)
if getattr(self, property_name):
current_month, current_year = map(int, getattr(self, property_name).split('/')[1:])
else: # Use default timepicker values, which are current month and year.
current_month, current_year = datetime.datetime.today().month, datetime.datetime.today().year
date_diff = 12 * (year - current_year) + month - current_month
selector = "a.ui-datepicker-{}".format('next' if date_diff > 0 else 'prev')
for i in xrange(abs(date_diff)):
self.page.q(css=selector).click()
self.page.q(css="a.ui-state-default").nth(day - 1).click() # set day
self.page.wait_for_element_invisibility("#ui-datepicker-div", "datepicker should be closed")
EmptyPromise(
lambda: getattr(self, property_name) == u'{m}/{d}/{y}'.format(m=month, d=day, y=year),
"{} is updated in modal.".format(property_name)
).fulfill()
@property
def release_date(self):
return self.find_css("#start_date").first.attrs('value')[0]
@release_date.setter
def release_date(self, date):
"""
Date is "mm/dd/yyyy" string.
"""
self.set_date('release_date', "#start_date", date)
@property
def due_date(self):
return self.find_css("#due_date").first.attrs('value')[0]
@due_date.setter
def due_date(self, date):
"""
Date is "mm/dd/yyyy" string.
"""
self.set_date('due_date', "#due_date", date)
@property
def policy(self):
"""
Select the grading format with `value` in the drop-down list.
"""
element = self.find_css('#grading_type')[0]
return self.get_selected_option_text(element)
@policy.setter
def policy(self, grading_label):
"""
Select the grading format with `value` in the drop-down list.
"""
element = self.find_css('#grading_type')[0]
select = Select(element)
select.select_by_visible_text(grading_label)
EmptyPromise(
lambda: self.policy == grading_label,
"Grading label is updated.",
).fulfill()
@property
def is_explicitly_locked(self):
"""
Returns true if the explict staff lock checkbox is checked, false otherwise.
"""
return self.find_css('#staff_lock')[0].is_selected()
@is_explicitly_locked.setter
def is_explicitly_locked(self, value):
"""
Checks the explicit staff lock box if value is true, otherwise unchecks the box.
"""
if value != self.is_explicitly_locked:
self.find_css('label[for="staff_lock"]').click()
EmptyPromise(lambda: value == self.is_explicitly_locked, "Explicit staff lock is updated").fulfill()
def shows_staff_lock_warning(self):
"""
Returns true iff the staff lock warning is visible.
"""
return self.find_css('.staff-lock .tip-warning').visible
def get_selected_option_text(self, element):
"""
Returns the text of the first selected option for the element.
"""
if element:
select = Select(element)
return select.first_selected_option.text
else:
return None
|
olexiim/edx-platform
|
common/test/acceptance/pages/studio/overview.py
|
Python
|
agpl-3.0
| 23,236
|
[
"VisIt"
] |
c4106d421deb2c2d29f6f09837a40177cc4ebcc41570a9cd628843a3a06331d4
|
"""Tests for distutils.dist."""
import os
import io
import sys
import unittest
import warnings
import textwrap
from unittest import mock
from distutils.dist import Distribution, fix_help_options
from distutils.cmd import Command
from test.support import (
captured_stdout, captured_stderr, run_unittest
)
from .py38compat import TESTFN
from distutils.tests import support
from distutils import log
class test_dist(Command):
"""Sample distutils extension command."""
user_options = [
("sample-option=", "S", "help text"),
]
def initialize_options(self):
self.sample_option = None
class TestDistribution(Distribution):
"""Distribution subclasses that avoids the default search for
configuration files.
The ._config_files attribute must be set before
.parse_config_files() is called.
"""
def find_config_files(self):
return self._config_files
class DistributionTestCase(support.LoggingSilencer,
support.TempdirManager,
support.EnvironGuard,
unittest.TestCase):
def setUp(self):
super(DistributionTestCase, self).setUp()
self.argv = sys.argv, sys.argv[:]
del sys.argv[1:]
def tearDown(self):
sys.argv = self.argv[0]
sys.argv[:] = self.argv[1]
super(DistributionTestCase, self).tearDown()
def create_distribution(self, configfiles=()):
d = TestDistribution()
d._config_files = configfiles
d.parse_config_files()
d.parse_command_line()
return d
def test_command_packages_unspecified(self):
sys.argv.append("build")
d = self.create_distribution()
self.assertEqual(d.get_command_packages(), ["distutils.command"])
def test_command_packages_cmdline(self):
from distutils.tests.test_dist import test_dist
sys.argv.extend(["--command-packages",
"foo.bar,distutils.tests",
"test_dist",
"-Ssometext",
])
d = self.create_distribution()
# let's actually try to load our test command:
self.assertEqual(d.get_command_packages(),
["distutils.command", "foo.bar", "distutils.tests"])
cmd = d.get_command_obj("test_dist")
self.assertIsInstance(cmd, test_dist)
self.assertEqual(cmd.sample_option, "sometext")
@unittest.skipIf(
'distutils' not in Distribution.parse_config_files.__module__,
'Cannot test when virtualenv has monkey-patched Distribution.',
)
def test_venv_install_options(self):
sys.argv.append("install")
self.addCleanup(os.unlink, TESTFN)
fakepath = '/somedir'
with open(TESTFN, "w") as f:
print(("[install]\n"
"install-base = {0}\n"
"install-platbase = {0}\n"
"install-lib = {0}\n"
"install-platlib = {0}\n"
"install-purelib = {0}\n"
"install-headers = {0}\n"
"install-scripts = {0}\n"
"install-data = {0}\n"
"prefix = {0}\n"
"exec-prefix = {0}\n"
"home = {0}\n"
"user = {0}\n"
"root = {0}").format(fakepath), file=f)
# Base case: Not in a Virtual Environment
with mock.patch.multiple(sys, prefix='/a', base_prefix='/a') as values:
d = self.create_distribution([TESTFN])
option_tuple = (TESTFN, fakepath)
result_dict = {
'install_base': option_tuple,
'install_platbase': option_tuple,
'install_lib': option_tuple,
'install_platlib': option_tuple,
'install_purelib': option_tuple,
'install_headers': option_tuple,
'install_scripts': option_tuple,
'install_data': option_tuple,
'prefix': option_tuple,
'exec_prefix': option_tuple,
'home': option_tuple,
'user': option_tuple,
'root': option_tuple,
}
self.assertEqual(
sorted(d.command_options.get('install').keys()),
sorted(result_dict.keys()))
for (key, value) in d.command_options.get('install').items():
self.assertEqual(value, result_dict[key])
# Test case: In a Virtual Environment
with mock.patch.multiple(sys, prefix='/a', base_prefix='/b') as values:
d = self.create_distribution([TESTFN])
for key in result_dict.keys():
self.assertNotIn(key, d.command_options.get('install', {}))
def test_command_packages_configfile(self):
sys.argv.append("build")
self.addCleanup(os.unlink, TESTFN)
f = open(TESTFN, "w")
try:
print("[global]", file=f)
print("command_packages = foo.bar, splat", file=f)
finally:
f.close()
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(),
["distutils.command", "foo.bar", "splat"])
# ensure command line overrides config:
sys.argv[1:] = ["--command-packages", "spork", "build"]
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(),
["distutils.command", "spork"])
# Setting --command-packages to '' should cause the default to
# be used even if a config file specified something else:
sys.argv[1:] = ["--command-packages", "", "build"]
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(), ["distutils.command"])
def test_empty_options(self):
# an empty options dictionary should not stay in the
# list of attributes
# catching warnings
warns = []
def _warn(msg):
warns.append(msg)
self.addCleanup(setattr, warnings, 'warn', warnings.warn)
warnings.warn = _warn
dist = Distribution(attrs={'author': 'xxx', 'name': 'xxx',
'version': 'xxx', 'url': 'xxxx',
'options': {}})
self.assertEqual(len(warns), 0)
self.assertNotIn('options', dir(dist))
def test_finalize_options(self):
attrs = {'keywords': 'one,two',
'platforms': 'one,two'}
dist = Distribution(attrs=attrs)
dist.finalize_options()
# finalize_option splits platforms and keywords
self.assertEqual(dist.metadata.platforms, ['one', 'two'])
self.assertEqual(dist.metadata.keywords, ['one', 'two'])
attrs = {'keywords': 'foo bar',
'platforms': 'foo bar'}
dist = Distribution(attrs=attrs)
dist.finalize_options()
self.assertEqual(dist.metadata.platforms, ['foo bar'])
self.assertEqual(dist.metadata.keywords, ['foo bar'])
def test_get_command_packages(self):
dist = Distribution()
self.assertEqual(dist.command_packages, None)
cmds = dist.get_command_packages()
self.assertEqual(cmds, ['distutils.command'])
self.assertEqual(dist.command_packages,
['distutils.command'])
dist.command_packages = 'one,two'
cmds = dist.get_command_packages()
self.assertEqual(cmds, ['distutils.command', 'one', 'two'])
def test_announce(self):
# make sure the level is known
dist = Distribution()
args = ('ok',)
kwargs = {'level': 'ok2'}
self.assertRaises(ValueError, dist.announce, args, kwargs)
def test_find_config_files_disable(self):
# Ticket #1180: Allow user to disable their home config file.
temp_home = self.mkdtemp()
if os.name == 'posix':
user_filename = os.path.join(temp_home, ".pydistutils.cfg")
else:
user_filename = os.path.join(temp_home, "pydistutils.cfg")
with open(user_filename, 'w') as f:
f.write('[distutils]\n')
def _expander(path):
return temp_home
old_expander = os.path.expanduser
os.path.expanduser = _expander
try:
d = Distribution()
all_files = d.find_config_files()
d = Distribution(attrs={'script_args': ['--no-user-cfg']})
files = d.find_config_files()
finally:
os.path.expanduser = old_expander
# make sure --no-user-cfg disables the user cfg file
self.assertEqual(len(all_files)-1, len(files))
class MetadataTestCase(support.TempdirManager, support.EnvironGuard,
unittest.TestCase):
def setUp(self):
super(MetadataTestCase, self).setUp()
self.argv = sys.argv, sys.argv[:]
def tearDown(self):
sys.argv = self.argv[0]
sys.argv[:] = self.argv[1]
super(MetadataTestCase, self).tearDown()
def format_metadata(self, dist):
sio = io.StringIO()
dist.metadata.write_pkg_file(sio)
return sio.getvalue()
def test_simple_metadata(self):
attrs = {"name": "package",
"version": "1.0"}
dist = Distribution(attrs)
meta = self.format_metadata(dist)
self.assertIn("Metadata-Version: 1.0", meta)
self.assertNotIn("provides:", meta.lower())
self.assertNotIn("requires:", meta.lower())
self.assertNotIn("obsoletes:", meta.lower())
def test_provides(self):
attrs = {"name": "package",
"version": "1.0",
"provides": ["package", "package.sub"]}
dist = Distribution(attrs)
self.assertEqual(dist.metadata.get_provides(),
["package", "package.sub"])
self.assertEqual(dist.get_provides(),
["package", "package.sub"])
meta = self.format_metadata(dist)
self.assertIn("Metadata-Version: 1.1", meta)
self.assertNotIn("requires:", meta.lower())
self.assertNotIn("obsoletes:", meta.lower())
def test_provides_illegal(self):
self.assertRaises(ValueError, Distribution,
{"name": "package",
"version": "1.0",
"provides": ["my.pkg (splat)"]})
def test_requires(self):
attrs = {"name": "package",
"version": "1.0",
"requires": ["other", "another (==1.0)"]}
dist = Distribution(attrs)
self.assertEqual(dist.metadata.get_requires(),
["other", "another (==1.0)"])
self.assertEqual(dist.get_requires(),
["other", "another (==1.0)"])
meta = self.format_metadata(dist)
self.assertIn("Metadata-Version: 1.1", meta)
self.assertNotIn("provides:", meta.lower())
self.assertIn("Requires: other", meta)
self.assertIn("Requires: another (==1.0)", meta)
self.assertNotIn("obsoletes:", meta.lower())
def test_requires_illegal(self):
self.assertRaises(ValueError, Distribution,
{"name": "package",
"version": "1.0",
"requires": ["my.pkg (splat)"]})
def test_requires_to_list(self):
attrs = {"name": "package",
"requires": iter(["other"])}
dist = Distribution(attrs)
self.assertIsInstance(dist.metadata.requires, list)
def test_obsoletes(self):
attrs = {"name": "package",
"version": "1.0",
"obsoletes": ["other", "another (<1.0)"]}
dist = Distribution(attrs)
self.assertEqual(dist.metadata.get_obsoletes(),
["other", "another (<1.0)"])
self.assertEqual(dist.get_obsoletes(),
["other", "another (<1.0)"])
meta = self.format_metadata(dist)
self.assertIn("Metadata-Version: 1.1", meta)
self.assertNotIn("provides:", meta.lower())
self.assertNotIn("requires:", meta.lower())
self.assertIn("Obsoletes: other", meta)
self.assertIn("Obsoletes: another (<1.0)", meta)
def test_obsoletes_illegal(self):
self.assertRaises(ValueError, Distribution,
{"name": "package",
"version": "1.0",
"obsoletes": ["my.pkg (splat)"]})
def test_obsoletes_to_list(self):
attrs = {"name": "package",
"obsoletes": iter(["other"])}
dist = Distribution(attrs)
self.assertIsInstance(dist.metadata.obsoletes, list)
def test_classifier(self):
attrs = {'name': 'Boa', 'version': '3.0',
'classifiers': ['Programming Language :: Python :: 3']}
dist = Distribution(attrs)
self.assertEqual(dist.get_classifiers(),
['Programming Language :: Python :: 3'])
meta = self.format_metadata(dist)
self.assertIn('Metadata-Version: 1.1', meta)
def test_classifier_invalid_type(self):
attrs = {'name': 'Boa', 'version': '3.0',
'classifiers': ('Programming Language :: Python :: 3',)}
with captured_stderr() as error:
d = Distribution(attrs)
# should have warning about passing a non-list
self.assertIn('should be a list', error.getvalue())
# should be converted to a list
self.assertIsInstance(d.metadata.classifiers, list)
self.assertEqual(d.metadata.classifiers,
list(attrs['classifiers']))
def test_keywords(self):
attrs = {'name': 'Monty', 'version': '1.0',
'keywords': ['spam', 'eggs', 'life of brian']}
dist = Distribution(attrs)
self.assertEqual(dist.get_keywords(),
['spam', 'eggs', 'life of brian'])
def test_keywords_invalid_type(self):
attrs = {'name': 'Monty', 'version': '1.0',
'keywords': ('spam', 'eggs', 'life of brian')}
with captured_stderr() as error:
d = Distribution(attrs)
# should have warning about passing a non-list
self.assertIn('should be a list', error.getvalue())
# should be converted to a list
self.assertIsInstance(d.metadata.keywords, list)
self.assertEqual(d.metadata.keywords, list(attrs['keywords']))
def test_platforms(self):
attrs = {'name': 'Monty', 'version': '1.0',
'platforms': ['GNU/Linux', 'Some Evil Platform']}
dist = Distribution(attrs)
self.assertEqual(dist.get_platforms(),
['GNU/Linux', 'Some Evil Platform'])
def test_platforms_invalid_types(self):
attrs = {'name': 'Monty', 'version': '1.0',
'platforms': ('GNU/Linux', 'Some Evil Platform')}
with captured_stderr() as error:
d = Distribution(attrs)
# should have warning about passing a non-list
self.assertIn('should be a list', error.getvalue())
# should be converted to a list
self.assertIsInstance(d.metadata.platforms, list)
self.assertEqual(d.metadata.platforms, list(attrs['platforms']))
def test_download_url(self):
attrs = {'name': 'Boa', 'version': '3.0',
'download_url': 'http://example.org/boa'}
dist = Distribution(attrs)
meta = self.format_metadata(dist)
self.assertIn('Metadata-Version: 1.1', meta)
def test_long_description(self):
long_desc = textwrap.dedent("""\
example::
We start here
and continue here
and end here.""")
attrs = {"name": "package",
"version": "1.0",
"long_description": long_desc}
dist = Distribution(attrs)
meta = self.format_metadata(dist)
meta = meta.replace('\n' + 8 * ' ', '\n')
self.assertIn(long_desc, meta)
def test_custom_pydistutils(self):
# fixes #2166
# make sure pydistutils.cfg is found
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
temp_dir = self.mkdtemp()
user_filename = os.path.join(temp_dir, user_filename)
f = open(user_filename, 'w')
try:
f.write('.')
finally:
f.close()
try:
dist = Distribution()
# linux-style
if sys.platform in ('linux', 'darwin'):
os.environ['HOME'] = temp_dir
files = dist.find_config_files()
self.assertIn(user_filename, files)
# win32-style
if sys.platform == 'win32':
# home drive should be found
os.environ['USERPROFILE'] = temp_dir
files = dist.find_config_files()
self.assertIn(user_filename, files,
'%r not found in %r' % (user_filename, files))
finally:
os.remove(user_filename)
def test_fix_help_options(self):
help_tuples = [('a', 'b', 'c', 'd'), (1, 2, 3, 4)]
fancy_options = fix_help_options(help_tuples)
self.assertEqual(fancy_options[0], ('a', 'b', 'c'))
self.assertEqual(fancy_options[1], (1, 2, 3))
def test_show_help(self):
# smoke test, just makes sure some help is displayed
self.addCleanup(log.set_threshold, log._global_log.threshold)
dist = Distribution()
sys.argv = []
dist.help = 1
dist.script_name = 'setup.py'
with captured_stdout() as s:
dist.parse_command_line()
output = [line for line in s.getvalue().split('\n')
if line.strip() != '']
self.assertTrue(output)
def test_read_metadata(self):
attrs = {"name": "package",
"version": "1.0",
"long_description": "desc",
"description": "xxx",
"download_url": "http://example.com",
"keywords": ['one', 'two'],
"requires": ['foo']}
dist = Distribution(attrs)
metadata = dist.metadata
# write it then reloads it
PKG_INFO = io.StringIO()
metadata.write_pkg_file(PKG_INFO)
PKG_INFO.seek(0)
metadata.read_pkg_file(PKG_INFO)
self.assertEqual(metadata.name, "package")
self.assertEqual(metadata.version, "1.0")
self.assertEqual(metadata.description, "xxx")
self.assertEqual(metadata.download_url, 'http://example.com')
self.assertEqual(metadata.keywords, ['one', 'two'])
self.assertEqual(metadata.platforms, ['UNKNOWN'])
self.assertEqual(metadata.obsoletes, None)
self.assertEqual(metadata.requires, ['foo'])
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DistributionTestCase))
suite.addTest(unittest.makeSuite(MetadataTestCase))
return suite
if __name__ == "__main__":
run_unittest(test_suite())
|
iproduct/course-social-robotics
|
11-dnn-keras/venv/Lib/site-packages/setuptools/_distutils/tests/test_dist.py
|
Python
|
gpl-2.0
| 19,274
|
[
"Brian"
] |
a9c41f278eba480f65cbc0ebed009dccbf6054915aca3c8fe1d0e74cbc41854f
|
"""Synthesize a MIDI file, chiptunes-style! (using pure numpy and scipy)
Includes functions for synthesizing different drum types, bass instruments, and
all other instruments. Also for auto-arpeggiating chords.
"""
import numpy as np
import scipy.signal
import scipy.io.wavfile
import pretty_midi
import argparse
import sys
def tonal(fs, length, frequency, nonlinearity=1.):
'''
Synthesize a tonal drum.
Parameters
----------
fs : int
Sampling frequency
length : int
Length, in samples, of drum sound
frequency : float
Frequency, in Hz, of the drum
nonlinearity : float
Gain to apply for nonlinearity, default 1.
Returns
-------
drum_data : np.ndarray
Synthesized drum data
'''
# Amplitude envelope, decaying exponential
amp_envelope = np.exp(np.linspace(0, -10, length))
# Pitch envelope, starting with linear decay
pitch_envelope = np.linspace(1.0, .99, length)
# Also a quick exponential drop at the beginning for a click
pitch_envelope *= 100*np.exp(np.linspace(0, -100*frequency, length)) + 1
# Generate tone
drum_data = amp_envelope*np.sin(
2*np.pi*frequency*pitch_envelope*np.arange(length)/float(fs))
# Filter with leaky integrator with 3db point ~= note frequency
alpha = 1 - np.exp(-2*np.pi*(frequency)/float(fs))
drum_data = scipy.signal.lfilter([alpha], [1, alpha - 1], drum_data)
# Apply nonlinearity
drum_data = np.tanh(nonlinearity*drum_data)
return drum_data
def noise(length):
'''
Synthesize a noise drum.
Parameters
----------
length : int
Number of samples to synthesize.
Returns
-------
drum_data : np.ndarray
Synthesized drum data
'''
# Amplitude envelope, decaying exponential
amp_envelope = np.exp(np.linspace(0, -10, length))
# Synthesize gaussian random noise
drum_data = amp_envelope*np.random.randn(length)
return drum_data
def synthesize_drum_instrument(instrument, fs=44100):
'''
Synthesize a pretty_midi.Instrument object with drum sounds.
Parameters
----------
instrument : pretty_midi.Instrument
Instrument to synthesize
Returns
-------
synthesized : np.ndarray
Audio data of the instrument synthesized
'''
# Allocate audio data
synthesized = np.zeros(int((instrument.get_end_time() + 1)*fs))
for note in instrument.notes:
# Get the name of the drum
drum_name = pretty_midi.note_number_to_drum_name(note.pitch)
# Based on the drum name, synthesize using the tonal or noise functions
if drum_name in ['Acoustic Bass Drum', 'Bass Drum 1']:
d = tonal(fs, fs/2, 80, 8.)
elif drum_name in ['Side Stick']:
d = tonal(fs, fs/20, 400, 8.)
elif drum_name in ['Acoustic Snare', 'Electric Snare']:
d = .4*tonal(fs, fs/10, 200, 20.) + .6*noise(fs/10)
elif drum_name in ['Hand Clap', 'Vibraslap']:
d = .1*tonal(fs, fs/10, 400, 8.) + .9*noise(fs/10)
elif drum_name in ['Low Floor Tom', 'Low Tom', 'Low Bongo',
'Low Conga', 'Low Timbale']:
d = tonal(fs, fs/4, 120, 8.)
elif drum_name in ['Closed Hi Hat', 'Cabasa', 'Maracas',
'Short Guiro']:
d = noise(fs/20)
elif drum_name in ['High Floor Tom', 'High Tom', 'Hi Bongo',
'Open Hi Conga', 'High Timbale']:
d = tonal(fs, fs/4, 480, 4.)
elif drum_name in ['Pedal Hi Hat', 'Open Hi Hat', 'Crash Cymbal 1',
'Ride Cymbal 1', 'Chinese Cymbal', 'Crash Cymbal 2',
'Ride Cymbal 2', 'Tambourine', 'Long Guiro',
'Splash Cymbal']:
d = .8*noise(fs)
elif drum_name in ['Low-Mid Tom']:
d = tonal(fs, fs/4, 240, 4.)
elif drum_name in ['Hi-Mid Tom']:
d = tonal(fs, fs/4, 360, 4.)
elif drum_name in ['Mute Hi Conga', 'Mute Cuica', 'Cowbell',
'Low Agogo', 'Low Wood Block']:
d = tonal(fs, fs/10, 480, 4.)
elif drum_name in ['Ride Bell', 'High Agogo', 'Claves',
'Hi Wood Block']:
d = tonal(fs, fs/20, 960, 4.)
elif drum_name in ['Short Whistle']:
d = tonal(fs, fs/4, 480, 1.)
elif drum_name in ['Long Whistle']:
d = tonal(fs, fs, 480, 1.)
elif drum_name in ['Mute Triangle']:
d = tonal(fs, fs/10, 1960, 1.)
elif drum_name in ['Open Triangle']:
d = tonal(fs, fs, 1960, 1.)
else:
if drum_name is not '':
# This should never happen
print 'Unexpected drum {}'.format(drum_name)
continue
# Add in the synthesized waveform
start = int(note.start*fs)
synthesized[start:start+d.size] += d*note.velocity
return synthesized
def arpeggiate_instrument(instrument, arpeggio_time):
'''
Arpeggiate the notes of an instrument.
Parameters
----------
inst : pretty_midi.Instrument
Instrument object.
arpeggio_time : float
Time, in seconds, of each note in the arpeggio
Returns
-------
inst_arpeggiated : pretty_midi.Instrument
Instrument with the notes arpeggiated.
'''
# Make a copy of the instrument
inst_arpeggiated = pretty_midi.Instrument(program=instrument.program,
is_drum=instrument.is_drum)
for bend in instrument.pitch_bends:
inst_arpeggiated.pitch_bends.append(bend)
n = 0
while n < len(instrument.notes):
# Collect notes which are in this chord
chord_notes = [(instrument.notes[n].pitch,
instrument.notes[n].velocity)]
m = n + 1
while m < len(instrument.notes):
# It's in the chord if it starts before the current note ends
if instrument.notes[m].start < instrument.notes[n].end:
# Add in the pitch and velocity
chord_notes.append((instrument.notes[m].pitch,
instrument.notes[m].velocity))
# Move the start time of the note up so it gets used next time
if instrument.notes[m].end > instrument.notes[n].end:
instrument.notes[m].start = instrument.notes[n].end
m += 1
# Arpeggiate the collected notes
time = instrument.notes[n].start
pitch_index = 0
if len(chord_notes) > 2:
while time < instrument.notes[n].end:
# Get the pitch and velocity of this note, but mod the index
# to circulate
pitch, velocity = chord_notes[pitch_index % len(chord_notes)]
# Add this note to the new instrument
inst_arpeggiated.notes.append(
pretty_midi.Note(velocity, pitch, time,
time + arpeggio_time))
# Next pitch next time
pitch_index += 1
# Move forward by the supplied amount
time += arpeggio_time
else:
inst_arpeggiated.notes.append(instrument.notes[n])
time = instrument.notes[n].end
n += 1
# Find the next chord
while (n < len(instrument.notes) and
instrument.notes[n].start + arpeggio_time <= time):
n += 1
return inst_arpeggiated
def chiptunes_synthesize(midi, fs=44100):
'''
Synthesize a pretty_midi.PrettyMIDI object chiptunes style.
Parameters
----------
midi : pretty_midi.PrettyMIDI
PrettyMIDI object to synthesize
fs : int
Sampling rate of the synthesized audio signal, default 44100
Returns
-------
synthesized : np.ndarray
Waveform of the MIDI data, synthesized at fs
'''
# If there are no instruments, return an empty array
if len(midi.instruments) == 0:
return np.array([])
# Get synthesized waveform for each instrument
waveforms = []
for inst in midi.instruments:
# Synthesize as drum
if inst.is_drum:
waveforms.append(synthesize_drum_instrument(inst, fs=fs))
else:
# Call it a bass instrument when no notes are over 48 (130hz)
# or the program's name has the word "bass" in it
is_bass = (
np.max([n.pitch for i in midi.instruments
for n in i.notes]) < 48 or
'Bass' in pretty_midi.program_to_instrument_name(inst.program))
if is_bass:
# Synthesize as a sine wave (should be triangle!)
audio = inst.synthesize(fs=fs, wave=np.sin)
# Quantize to 5-bit
audio = np.digitize(
audio, np.linspace(-audio.min(), audio.max(), 32))
waveforms.append(audio)
else:
# Otherwise, it's a harmony/lead instrument, so arpeggiate it
# Arpeggio time of 30ms seems to work well
inst_arpeggiated = arpeggiate_instrument(inst, .03)
# These instruments sound louder because they're square,
# so scale down
waveforms.append(.5*inst_arpeggiated.synthesize(
fs=fs, wave=scipy.signal.square))
# Allocate output waveform, with #sample = max length of all waveforms
synthesized = np.zeros(np.max([w.shape[0] for w in waveforms]))
# Sum all waveforms in
for waveform in waveforms:
synthesized[:waveform.shape[0]] += waveform
# Normalize
synthesized /= np.abs(synthesized).max()
return synthesized
if __name__ == '__main__':
# Set up command-line argument parsing
parser = argparse.ArgumentParser(
description='Synthesize a MIDI file, chiptunes style.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('midi_file', action='store',
help='Path to the MIDI file to synthesize')
parser.add_argument('output_file', action='store',
help='Path where the synthesized wav will be written')
parser.add_argument('--fs', default=44100, type=int, action='store',
help='Output sampling rate to use')
# Parse command line arguments
parameters = vars(parser.parse_args(sys.argv[1:]))
print "Synthesizing {} ...".format(parameters['midi_file'])
# Load in MIDI data and synthesize using chiptunes_synthesize
midi_object = pretty_midi.PrettyMIDI(parameters['midi_file'])
synthesized = chiptunes_synthesize(midi_object, parameters['fs'])
print "Writing {} ...".format(parameters['output_file'])
# Write out
scipy.io.wavfile.write(
parameters['output_file'], parameters['fs'], synthesized)
|
douglaseck/pretty-midi
|
examples/chiptunes.py
|
Python
|
mit
| 11,000
|
[
"Gaussian"
] |
6015bedeaa9f8fadeeae2afba8d0001473a4a48e5a8ef6ff9fbb7daf7d944c7d
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Pipeline, the top-level Beam object.
A pipeline holds a DAG of data transforms. Conceptually the nodes of the DAG
are transforms (:class:`~apache_beam.transforms.ptransform.PTransform` objects)
and the edges are values (mostly :class:`~apache_beam.pvalue.PCollection`
objects). The transforms take as inputs one or more PValues and output one or
more :class:`~apache_beam.pvalue.PValue` s.
The pipeline offers functionality to traverse the graph. The actual operation
to be executed for each node visited is specified through a runner object.
Typical usage::
# Create a pipeline object using a local runner for execution.
with beam.Pipeline('DirectRunner') as p:
# Add to the pipeline a "Create" transform. When executed this
# transform will produce a PCollection object with the specified values.
pcoll = p | 'Create' >> beam.Create([1, 2, 3])
# Another transform could be applied to pcoll, e.g., writing to a text file.
# For other transforms, refer to transforms/ directory.
pcoll | 'Write' >> beam.io.WriteToText('./output')
# run() will execute the DAG stored in the pipeline. The execution of the
# nodes visited is done using the specified local runner.
"""
from __future__ import absolute_import
import abc
import logging
import os
import re
import shutil
import tempfile
from builtins import object
from builtins import zip
from future.utils import with_metaclass
from apache_beam import pvalue
from apache_beam.internal import pickler
from apache_beam.io.filesystems import FileSystems
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TypeOptions
from apache_beam.options.pipeline_options_validator import PipelineOptionsValidator
from apache_beam.portability import common_urns
from apache_beam.pvalue import PCollection
from apache_beam.pvalue import PDone
from apache_beam.runners import PipelineRunner
from apache_beam.runners import create_runner
from apache_beam.transforms import ptransform
#from apache_beam.transforms import external
from apache_beam.typehints import TypeCheckError
from apache_beam.typehints import typehints
from apache_beam.utils.annotations import deprecated
__all__ = ['Pipeline', 'PTransformOverride']
class Pipeline(object):
"""A pipeline object that manages a DAG of
:class:`~apache_beam.pvalue.PValue` s and their
:class:`~apache_beam.transforms.ptransform.PTransform` s.
Conceptually the :class:`~apache_beam.pvalue.PValue` s are the DAG's nodes and
the :class:`~apache_beam.transforms.ptransform.PTransform` s computing
the :class:`~apache_beam.pvalue.PValue` s are the edges.
All the transforms applied to the pipeline must have distinct full labels.
If same transform instance needs to be applied then the right shift operator
should be used to designate new names
(e.g. ``input | "label" >> my_tranform``).
"""
def __init__(self, runner=None, options=None, argv=None):
"""Initialize a pipeline object.
Args:
runner (~apache_beam.runners.runner.PipelineRunner): An object of
type :class:`~apache_beam.runners.runner.PipelineRunner` that will be
used to execute the pipeline. For registered runners, the runner name
can be specified, otherwise a runner object must be supplied.
options (~apache_beam.options.pipeline_options.PipelineOptions):
A configured
:class:`~apache_beam.options.pipeline_options.PipelineOptions` object
containing arguments that should be used for running the Beam job.
argv (List[str]): a list of arguments (such as :data:`sys.argv`)
to be used for building a
:class:`~apache_beam.options.pipeline_options.PipelineOptions` object.
This will only be used if argument **options** is :data:`None`.
Raises:
~exceptions.ValueError: if either the runner or options argument is not
of the expected type.
"""
if options is not None:
if isinstance(options, PipelineOptions):
self._options = options
else:
raise ValueError(
'Parameter options, if specified, must be of type PipelineOptions. '
'Received : %r' % options)
elif argv is not None:
if isinstance(argv, list):
self._options = PipelineOptions(argv)
else:
raise ValueError(
'Parameter argv, if specified, must be a list. Received : %r'
% argv)
else:
self._options = PipelineOptions([])
FileSystems.set_options(self._options)
if runner is None:
runner = self._options.view_as(StandardOptions).runner
if runner is None:
runner = StandardOptions.DEFAULT_RUNNER
logging.info(('Missing pipeline option (runner). Executing pipeline '
'using the default runner: %s.'), runner)
if isinstance(runner, str):
runner = create_runner(runner)
elif not isinstance(runner, PipelineRunner):
raise TypeError('Runner %s is not a PipelineRunner object or the '
'name of a registered runner.' % runner)
# Validate pipeline options
errors = PipelineOptionsValidator(self._options, runner).validate()
if errors:
raise ValueError(
'Pipeline has validations errors: \n' + '\n'.join(errors))
# set default experiments for portable runner
# (needs to occur prior to pipeline construction)
portable_runners = ['PortableRunner', 'FlinkRunner']
if self._options.view_as(StandardOptions).runner in portable_runners:
experiments = (self._options.view_as(DebugOptions).experiments or [])
if not 'beam_fn_api' in experiments:
experiments.append('beam_fn_api')
self._options.view_as(DebugOptions).experiments = experiments
# Default runner to be used.
self.runner = runner
# Stack of transforms generated by nested apply() calls. The stack will
# contain a root node as an enclosing (parent) node for top transforms.
self.transforms_stack = [AppliedPTransform(None, None, '', None)]
# Set of transform labels (full labels) applied to the pipeline.
# If a transform is applied and the full label is already in the set
# then the transform will have to be cloned with a new label.
self.applied_labels = set()
@property
@deprecated(since='First stable release',
extra_message='References to <pipeline>.options'
' will not be supported')
def options(self):
return self._options
def _current_transform(self):
"""Returns the transform currently on the top of the stack."""
return self.transforms_stack[-1]
def _root_transform(self):
"""Returns the root transform of the transform stack."""
return self.transforms_stack[0]
def _remove_labels_recursively(self, applied_transform):
for part in applied_transform.parts:
if part.full_label in self.applied_labels:
self.applied_labels.remove(part.full_label)
self._remove_labels_recursively(part)
def _replace(self, override):
assert isinstance(override, PTransformOverride)
output_map = {}
output_replacements = {}
input_replacements = {}
side_input_replacements = {}
class TransformUpdater(PipelineVisitor): # pylint: disable=used-before-assignment
""""A visitor that replaces the matching PTransforms."""
def __init__(self, pipeline):
self.pipeline = pipeline
def _replace_if_needed(self, original_transform_node):
if override.matches(original_transform_node):
assert isinstance(original_transform_node, AppliedPTransform)
replacement_transform = override.get_replacement_transform(
original_transform_node.transform)
if replacement_transform is original_transform_node.transform:
return
replacement_transform_node = AppliedPTransform(
original_transform_node.parent, replacement_transform,
original_transform_node.full_label,
original_transform_node.inputs)
# Transform execution could depend on order in which nodes are
# considered. Hence we insert the replacement transform node to same
# index as the original transform node. Note that this operation
# removes the original transform node.
if original_transform_node.parent:
assert isinstance(original_transform_node.parent, AppliedPTransform)
parent_parts = original_transform_node.parent.parts
parent_parts[parent_parts.index(original_transform_node)] = (
replacement_transform_node)
else:
# Original transform has to be a root.
roots = self.pipeline.transforms_stack[0].parts
assert original_transform_node in roots
roots[roots.index(original_transform_node)] = (
replacement_transform_node)
inputs = replacement_transform_node.inputs
# TODO: Support replacing PTransforms with multiple inputs.
if len(inputs) > 1:
raise NotImplementedError(
'PTransform overriding is only supported for PTransforms that '
'have a single input. Tried to replace input of '
'AppliedPTransform %r that has %d inputs'
% original_transform_node, len(inputs))
elif len(inputs) == 1:
input_node = inputs[0]
elif len(inputs) == 0:
input_node = pvalue.PBegin(self)
# We have to add the new AppliedTransform to the stack before expand()
# and pop it out later to make sure that parts get added correctly.
self.pipeline.transforms_stack.append(replacement_transform_node)
# Keeping the same label for the replaced node but recursively
# removing labels of child transforms of original transform since they
# will be replaced during the expand below. This is needed in case
# the replacement contains children that have labels that conflicts
# with labels of the children of the original.
self.pipeline._remove_labels_recursively(original_transform_node)
new_output = replacement_transform.expand(input_node)
new_output.element_type = None
self.pipeline._infer_result_type(replacement_transform, inputs,
new_output)
replacement_transform_node.add_output(new_output)
if not new_output.producer:
new_output.producer = replacement_transform_node
# We only support replacing transforms with a single output with
# another transform that produces a single output.
# TODO: Support replacing PTransforms with multiple outputs.
if (len(original_transform_node.outputs) > 1 or
not isinstance(original_transform_node.outputs[None],
(PCollection, PDone)) or
not isinstance(new_output, (PCollection, PDone))):
raise NotImplementedError(
'PTransform overriding is only supported for PTransforms that '
'have a single output. Tried to replace output of '
'AppliedPTransform %r with %r.'
% (original_transform_node, new_output))
# Recording updated outputs. This cannot be done in the same visitor
# since if we dynamically update output type here, we'll run into
# errors when visiting child nodes.
output_map[original_transform_node.outputs[None]] = new_output
self.pipeline.transforms_stack.pop()
def enter_composite_transform(self, transform_node):
self._replace_if_needed(transform_node)
def visit_transform(self, transform_node):
self._replace_if_needed(transform_node)
self.visit(TransformUpdater(self))
# Adjusting inputs and outputs
class InputOutputUpdater(PipelineVisitor): # pylint: disable=used-before-assignment
""""A visitor that records input and output values to be replaced.
Input and output values that should be updated are recorded in maps
input_replacements and output_replacements respectively.
We cannot update input and output values while visiting since that results
in validation errors.
"""
def __init__(self, pipeline):
self.pipeline = pipeline
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
if (None in transform_node.outputs and
transform_node.outputs[None] in output_map):
output_replacements[transform_node] = (
output_map[transform_node.outputs[None]])
replace_input = False
for input in transform_node.inputs:
if input in output_map:
replace_input = True
break
replace_side_inputs = False
for side_input in transform_node.side_inputs:
if side_input.pvalue in output_map:
replace_side_inputs = True
break
if replace_input:
new_input = [
input if not input in output_map else output_map[input]
for input in transform_node.inputs]
input_replacements[transform_node] = new_input
if replace_side_inputs:
new_side_inputs = []
for side_input in transform_node.side_inputs:
if side_input.pvalue in output_map:
side_input.pvalue = output_map[side_input.pvalue]
new_side_inputs.append(side_input)
else:
new_side_inputs.append(side_input)
side_input_replacements[transform_node] = new_side_inputs
self.visit(InputOutputUpdater(self))
for transform in output_replacements:
transform.replace_output(output_replacements[transform])
for transform in input_replacements:
transform.inputs = input_replacements[transform]
for transform in side_input_replacements:
transform.side_inputs = side_input_replacements[transform]
def _check_replacement(self, override):
class ReplacementValidator(PipelineVisitor):
def visit_transform(self, transform_node):
if override.matches(transform_node):
raise RuntimeError('Transform node %r was not replaced as expected.'
% transform_node)
self.visit(ReplacementValidator())
def replace_all(self, replacements):
""" Dynamically replaces PTransforms in the currently populated hierarchy.
Currently this only works for replacements where input and output types
are exactly the same.
TODO: Update this to also work for transform overrides where input and
output types are different.
Args:
replacements (List[~apache_beam.pipeline.PTransformOverride]): a list of
:class:`~apache_beam.pipeline.PTransformOverride` objects.
"""
for override in replacements:
assert isinstance(override, PTransformOverride)
self._replace(override)
# Checking if the PTransforms have been successfully replaced. This will
# result in a failure if a PTransform that was replaced in a given override
# gets re-added in a subsequent override. This is not allowed and ordering
# of PTransformOverride objects in 'replacements' is important.
for override in replacements:
self._check_replacement(override)
def run(self, test_runner_api=True):
"""Runs the pipeline. Returns whatever our runner returns after running."""
# When possible, invoke a round trip through the runner API.
if test_runner_api and self._verify_runner_api_compatible():
return Pipeline.from_runner_api(
self.to_runner_api(use_fake_coders=True),
self.runner,
self._options).run(False)
if self._options.view_as(TypeOptions).runtime_type_check:
from apache_beam.typehints import typecheck
self.visit(typecheck.TypeCheckVisitor())
if self._options.view_as(SetupOptions).save_main_session:
# If this option is chosen, verify we can pickle the main session early.
tmpdir = tempfile.mkdtemp()
try:
pickler.dump_session(os.path.join(tmpdir, 'main_session.pickle'))
finally:
shutil.rmtree(tmpdir)
return self.runner.run_pipeline(self, self._options)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_type:
self.run().wait_until_finish()
def visit(self, visitor):
"""Visits depth-first every node of a pipeline's DAG.
Runner-internal implementation detail; no backwards-compatibility guarantees
Args:
visitor (~apache_beam.pipeline.PipelineVisitor):
:class:`~apache_beam.pipeline.PipelineVisitor` object whose callbacks
will be called for each node visited. See
:class:`~apache_beam.pipeline.PipelineVisitor` comments.
Raises:
~exceptions.TypeError: if node is specified and is not a
:class:`~apache_beam.pvalue.PValue`.
~apache_beam.error.PipelineError: if node is specified and does not
belong to this pipeline instance.
"""
visited = set()
self._root_transform().visit(visitor, self, visited)
def apply(self, transform, pvalueish=None, label=None):
"""Applies a custom transform using the pvalueish specified.
Args:
transform (~apache_beam.transforms.ptransform.PTransform): the
:class:`~apache_beam.transforms.ptransform.PTransform` to apply.
pvalueish (~apache_beam.pvalue.PCollection): the input for the
:class:`~apache_beam.transforms.ptransform.PTransform` (typically a
:class:`~apache_beam.pvalue.PCollection`).
label (str): label of the
:class:`~apache_beam.transforms.ptransform.PTransform`.
Raises:
~exceptions.TypeError: if the transform object extracted from the
argument list is not a
:class:`~apache_beam.transforms.ptransform.PTransform`.
~exceptions.RuntimeError: if the transform object was already applied to
this pipeline and needs to be cloned in order to apply again.
"""
if isinstance(transform, ptransform._NamedPTransform):
return self.apply(transform.transform, pvalueish,
label or transform.label)
if not isinstance(transform, ptransform.PTransform):
raise TypeError("Expected a PTransform object, got %s" % transform)
if label:
# Fix self.label as it is inspected by some PTransform operations
# (e.g. to produce error messages for type hint violations).
try:
old_label, transform.label = transform.label, label
return self.apply(transform, pvalueish)
finally:
transform.label = old_label
full_label = '/'.join([self._current_transform().full_label,
label or transform.label]).lstrip('/')
if full_label in self.applied_labels:
raise RuntimeError(
'Transform "%s" does not have a stable unique label. '
'This will prevent updating of pipelines. '
'To apply a transform with a specified label write '
'pvalue | "label" >> transform'
% full_label)
self.applied_labels.add(full_label)
pvalueish, inputs = transform._extract_input_pvalues(pvalueish)
try:
inputs = tuple(inputs)
for leaf_input in inputs:
if not isinstance(leaf_input, pvalue.PValue):
raise TypeError
except TypeError:
raise NotImplementedError(
'Unable to extract PValue inputs from %s; either %s does not accept '
'inputs of this format, or it does not properly override '
'_extract_input_pvalues' % (pvalueish, transform))
current = AppliedPTransform(
self._current_transform(), transform, full_label, inputs)
self._current_transform().add_part(current)
self.transforms_stack.append(current)
type_options = self._options.view_as(TypeOptions)
if type_options.pipeline_type_check:
transform.type_check_inputs(pvalueish)
pvalueish_result = self.runner.apply(transform, pvalueish, self._options)
if type_options is not None and type_options.pipeline_type_check:
transform.type_check_outputs(pvalueish_result)
for result in ptransform.get_nested_pvalues(pvalueish_result):
assert isinstance(result, (pvalue.PValue, pvalue.DoOutputsTuple))
# Make sure we set the producer only for a leaf node in the transform DAG.
# This way we preserve the last transform of a composite transform as
# being the real producer of the result.
if result.producer is None:
result.producer = current
self._infer_result_type(transform, inputs, result)
assert isinstance(result.producer.inputs, tuple)
current.add_output(result)
if (type_options is not None and
type_options.type_check_strictness == 'ALL_REQUIRED' and
transform.get_type_hints().output_types is None):
ptransform_name = '%s(%s)' % (transform.__class__.__name__, full_label)
raise TypeCheckError('Pipeline type checking is enabled, however no '
'output type-hint was found for the '
'PTransform %s' % ptransform_name)
self.transforms_stack.pop()
return pvalueish_result
def _infer_result_type(self, transform, inputs, result_pcollection):
# TODO(robertwb): Multi-input, multi-output inference.
type_options = self._options.view_as(TypeOptions)
if (type_options is not None and type_options.pipeline_type_check
and isinstance(result_pcollection, pvalue.PCollection)
and (not result_pcollection.element_type
# TODO(robertwb): Ideally we'd do intersection here.
or result_pcollection.element_type == typehints.Any)):
input_element_type = (
inputs[0].element_type
if len(inputs) == 1
else typehints.Any)
type_hints = transform.get_type_hints()
declared_output_type = type_hints.simple_output_type(transform.label)
if declared_output_type:
input_types = type_hints.input_types
if input_types and input_types[0]:
declared_input_type = input_types[0][0]
result_pcollection.element_type = typehints.bind_type_variables(
declared_output_type,
typehints.match_type_variables(declared_input_type,
input_element_type))
else:
result_pcollection.element_type = declared_output_type
else:
result_pcollection.element_type = transform.infer_output_type(
input_element_type)
def __reduce__(self):
# Some transforms contain a reference to their enclosing pipeline,
# which in turn reference all other transforms (resulting in quadratic
# time/space to pickle each transform individually). As we don't
# require pickled pipelines to be executable, break the chain here.
return str, ('Pickled pipeline stub.',)
def _verify_runner_api_compatible(self):
if self._options.view_as(TypeOptions).runtime_type_check:
# This option is incompatible with the runner API as it requires
# the runner to inspect non-serialized hints on the transform
# itself.
return False
class Visitor(PipelineVisitor): # pylint: disable=used-before-assignment
ok = True # Really a nonlocal.
def enter_composite_transform(self, transform_node):
pass
def visit_transform(self, transform_node):
try:
# Transforms must be picklable.
pickler.loads(pickler.dumps(transform_node.transform,
enable_trace=False),
enable_trace=False)
except Exception:
Visitor.ok = False
def visit_value(self, value, _):
if isinstance(value, pvalue.PDone):
Visitor.ok = False
self.visit(Visitor())
return Visitor.ok
def to_runner_api(
self, return_context=False, context=None, use_fake_coders=False,
default_environment=None):
"""For internal use only; no backwards-compatibility guarantees."""
from apache_beam.runners import pipeline_context
from apache_beam.portability.api import beam_runner_api_pb2
if context is None:
context = pipeline_context.PipelineContext(
use_fake_coders=use_fake_coders,
default_environment=default_environment)
elif default_environment is not None:
raise ValueError(
'Only one of context or default_environment may be specified.')
# The RunnerAPI spec requires certain transforms and side-inputs to have KV
# inputs (and corresponding outputs).
# Currently we only upgrade to KV pairs. If there is a need for more
# general shapes, potential conflicts will have to be resolved.
# We also only handle single-input, and (for fixing the output) single
# output, which is sufficient.
class ForceKvInputTypes(PipelineVisitor):
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
if not transform_node.transform:
return
if transform_node.transform.runner_api_requires_keyed_input():
pcoll = transform_node.inputs[0]
pcoll.element_type = typehints.coerce_to_kv_type(
pcoll.element_type, transform_node.full_label)
if len(transform_node.outputs) == 1:
# The runner often has expectations about the output types as well.
output, = transform_node.outputs.values()
if not output.element_type:
output.element_type = transform_node.transform.infer_output_type(
pcoll.element_type
)
for side_input in transform_node.transform.side_inputs:
if side_input.requires_keyed_input():
side_input.pvalue.element_type = typehints.coerce_to_kv_type(
side_input.pvalue.element_type, transform_node.full_label,
side_input_producer=side_input.pvalue.producer.full_label)
self.visit(ForceKvInputTypes())
# Mutates context; placing inline would force dependence on
# argument evaluation order.
root_transform_id = context.transforms.get_id(self._root_transform())
proto = beam_runner_api_pb2.Pipeline(
root_transform_ids=[root_transform_id],
components=context.to_runner_api())
proto.components.transforms[root_transform_id].unique_name = (
root_transform_id)
if return_context:
return proto, context
else:
return proto
@staticmethod
def from_runner_api(proto, runner, options, return_context=False,
allow_proto_holders=False):
"""For internal use only; no backwards-compatibility guarantees."""
p = Pipeline(runner=runner, options=options)
from apache_beam.runners import pipeline_context
context = pipeline_context.PipelineContext(
proto.components, allow_proto_holders=allow_proto_holders)
root_transform_id, = proto.root_transform_ids
p.transforms_stack = [
context.transforms.get_by_id(root_transform_id)]
# TODO(robertwb): These are only needed to continue construction. Omit?
p.applied_labels = set([
t.unique_name for t in proto.components.transforms.values()])
for id in proto.components.pcollections:
pcollection = context.pcollections.get_by_id(id)
pcollection.pipeline = p
if not pcollection.producer:
raise ValueError('No producer for %s' % id)
# Inject PBegin input where necessary.
from apache_beam.io.iobase import Read
from apache_beam.transforms.core import Create
has_pbegin = [Read, Create]
for id in proto.components.transforms:
transform = context.transforms.get_by_id(id)
if not transform.inputs and transform.transform.__class__ in has_pbegin:
transform.inputs = (pvalue.PBegin(p),)
if return_context:
return p, context
else:
return p
class PipelineVisitor(object):
"""For internal use only; no backwards-compatibility guarantees.
Visitor pattern class used to traverse a DAG of transforms
(used internally by Pipeline for bookeeping purposes).
"""
def visit_value(self, value, producer_node):
"""Callback for visiting a PValue in the pipeline DAG.
Args:
value: PValue visited (typically a PCollection instance).
producer_node: AppliedPTransform object whose transform produced the
pvalue.
"""
pass
def visit_transform(self, transform_node):
"""Callback for visiting a transform leaf node in the pipeline DAG."""
pass
def enter_composite_transform(self, transform_node):
"""Callback for entering traversal of a composite transform node."""
pass
def leave_composite_transform(self, transform_node):
"""Callback for leaving traversal of a composite transform node."""
pass
class AppliedPTransform(object):
"""For internal use only; no backwards-compatibility guarantees.
A transform node representing an instance of applying a PTransform
(used internally by Pipeline for bookeeping purposes).
"""
def __init__(self, parent, transform, full_label, inputs):
self.parent = parent
self.transform = transform
# Note that we want the PipelineVisitor classes to use the full_label,
# inputs, side_inputs, and outputs fields from this instance instead of the
# ones of the PTransform instance associated with it. Doing this permits
# reusing PTransform instances in different contexts (apply() calls) without
# any interference. This is particularly useful for composite transforms.
self.full_label = full_label
self.inputs = inputs or ()
self.side_inputs = () if transform is None else tuple(transform.side_inputs)
self.outputs = {}
self.parts = []
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.full_label,
type(self.transform).__name__)
def replace_output(self, output, tag=None):
"""Replaces the output defined by the given tag with the given output.
Args:
output: replacement output
tag: tag of the output to be replaced.
"""
if isinstance(output, pvalue.DoOutputsTuple):
self.replace_output(output[output._main_tag])
elif isinstance(output, pvalue.PValue):
self.outputs[tag] = output
else:
raise TypeError("Unexpected output type: %s" % output)
def add_output(self, output, tag=None):
if isinstance(output, pvalue.DoOutputsTuple):
self.add_output(output[output._main_tag])
elif isinstance(output, pvalue.PValue):
# TODO(BEAM-1833): Require tags when calling this method.
if tag is None and None in self.outputs:
tag = len(self.outputs)
assert tag not in self.outputs
self.outputs[tag] = output
else:
raise TypeError("Unexpected output type: %s" % output)
def add_part(self, part):
assert isinstance(part, AppliedPTransform)
self.parts.append(part)
def is_composite(self):
"""Returns whether this is a composite transform.
A composite transform has parts (inner transforms) or isn't the
producer for any of its outputs. (An example of a transform that
is not a producer is one that returns its inputs instead.)
"""
return bool(self.parts) or all(
pval.producer is not self for pval in self.outputs.values())
def visit(self, visitor, pipeline, visited):
"""Visits all nodes reachable from the current node."""
for pval in self.inputs:
if pval not in visited and not isinstance(pval, pvalue.PBegin):
if pval.producer is not None:
pval.producer.visit(visitor, pipeline, visited)
# The value should be visited now since we visit outputs too.
assert pval in visited, pval
# Visit side inputs.
for pval in self.side_inputs:
if isinstance(pval, pvalue.AsSideInput) and pval.pvalue not in visited:
pval = pval.pvalue # Unpack marker-object-wrapped pvalue.
if pval.producer is not None:
pval.producer.visit(visitor, pipeline, visited)
# The value should be visited now since we visit outputs too.
assert pval in visited
# TODO(silviuc): Is there a way to signal that we are visiting a side
# value? The issue is that the same PValue can be reachable through
# multiple paths and therefore it is not guaranteed that the value
# will be visited as a side value.
# Visit a composite or primitive transform.
if self.is_composite():
visitor.enter_composite_transform(self)
for part in self.parts:
part.visit(visitor, pipeline, visited)
visitor.leave_composite_transform(self)
else:
visitor.visit_transform(self)
# Visit the outputs (one or more). It is essential to mark as visited the
# tagged PCollections of the DoOutputsTuple object. A tagged PCollection is
# connected directly with its producer (a multi-output ParDo), but the
# output of such a transform is the containing DoOutputsTuple, not the
# PCollection inside it. Without the code below a tagged PCollection will
# not be marked as visited while visiting its producer.
for pval in self.outputs.values():
if isinstance(pval, pvalue.DoOutputsTuple):
pvals = (v for v in pval)
else:
pvals = (pval,)
for v in pvals:
if v not in visited:
visited.add(v)
visitor.visit_value(v, self)
def named_inputs(self):
# TODO(BEAM-1833): Push names up into the sdk construction.
main_inputs = {str(ix): input
for ix, input in enumerate(self.inputs)
if isinstance(input, pvalue.PCollection)}
side_inputs = {'side%s' % ix: si.pvalue
for ix, si in enumerate(self.side_inputs)}
return dict(main_inputs, **side_inputs)
def named_outputs(self):
return {str(tag): output for tag, output in self.outputs.items()
if isinstance(output, pvalue.PCollection)}
def to_runner_api(self, context):
# External tranforms require more splicing than just setting the spec.
from apache_beam.transforms import external
if isinstance(self.transform, external.ExternalTransform):
return self.transform.to_runner_api_transform(context, self.full_label)
from apache_beam.portability.api import beam_runner_api_pb2
def transform_to_runner_api(transform, context):
if transform is None:
return None
else:
return transform.to_runner_api(context, has_parts=bool(self.parts))
# Iterate over inputs and outputs by sorted key order, so that ids are
# consistently generated for multiple runs of the same pipeline.
return beam_runner_api_pb2.PTransform(
unique_name=self.full_label,
spec=transform_to_runner_api(self.transform, context),
subtransforms=[context.transforms.get_id(part, label=part.full_label)
for part in self.parts],
inputs={tag: context.pcollections.get_id(pc)
for tag, pc in sorted(self.named_inputs().items())},
outputs={str(tag): context.pcollections.get_id(out)
for tag, out in sorted(self.named_outputs().items())},
# TODO(BEAM-115): display_data
display_data=None)
@staticmethod
def from_runner_api(proto, context):
def is_side_input(tag):
# As per named_inputs() above.
return tag.startswith('side')
main_inputs = [context.pcollections.get_by_id(id)
for tag, id in proto.inputs.items()
if not is_side_input(tag)]
# Ordering is important here.
indexed_side_inputs = [(int(re.match('side([0-9]+)(-.*)?$', tag).group(1)),
context.pcollections.get_by_id(id))
for tag, id in proto.inputs.items()
if is_side_input(tag)]
side_inputs = [si for _, si in sorted(indexed_side_inputs)]
result = AppliedPTransform(
parent=None,
transform=ptransform.PTransform.from_runner_api(proto.spec, context),
full_label=proto.unique_name,
inputs=main_inputs)
if result.transform and result.transform.side_inputs:
for si, pcoll in zip(result.transform.side_inputs, side_inputs):
si.pvalue = pcoll
result.side_inputs = tuple(result.transform.side_inputs)
result.parts = []
for transform_id in proto.subtransforms:
part = context.transforms.get_by_id(transform_id)
part.parent = result
result.parts.append(part)
result.outputs = {
None if tag == 'None' else tag: context.pcollections.get_by_id(id)
for tag, id in proto.outputs.items()}
# This annotation is expected by some runners.
if proto.spec.urn == common_urns.primitives.PAR_DO.urn:
result.transform.output_tags = set(proto.outputs.keys()).difference(
{'None'})
if not result.parts:
for tag, pcoll_id in proto.outputs.items():
if pcoll_id not in proto.inputs.values():
pc = context.pcollections.get_by_id(pcoll_id)
pc.producer = result
pc.tag = None if tag == 'None' else tag
return result
class PTransformOverride(with_metaclass(abc.ABCMeta, object)):
"""For internal use only; no backwards-compatibility guarantees.
Gives a matcher and replacements for matching PTransforms.
TODO: Update this to support cases where input and/our output types are
different.
"""
@abc.abstractmethod
def matches(self, applied_ptransform):
"""Determines whether the given AppliedPTransform matches.
Note that the matching will happen *after* Runner API proto translation.
If matching is done via type checks, to/from_runner_api[_parameter] methods
must be implemented to preserve the type (and other data) through proto
serialization.
Consider URN-based translation instead.
Args:
applied_ptransform: AppliedPTransform to be matched.
Returns:
a bool indicating whether the given AppliedPTransform is a match.
"""
raise NotImplementedError
@abc.abstractmethod
def get_replacement_transform(self, ptransform):
"""Provides a runner specific override for a given PTransform.
Args:
ptransform: PTransform to be replaced.
Returns:
A PTransform that will be the replacement for the PTransform given as an
argument.
"""
# Returns a PTransformReplacement
raise NotImplementedError
|
markflyhigh/incubator-beam
|
sdks/python/apache_beam/pipeline.py
|
Python
|
apache-2.0
| 39,747
|
[
"VisIt"
] |
7934395c3910852d0184e718056e3732004dea73265610f79eef3ba594c1db23
|
"""
# Notes:
- This simulation seeks to emulate the CUBA benchmark simulations of (Brette
et al. 2007) using the Brian2 simulator for speed benchmark comparison to
DynaSim. However, this simulation does NOT include synapses, for better
comparison to Figure 5 of (Goodman and Brette, 2008).
- The time taken to simulate will be indicated in the stdout log file
'~/batchdirs/brian_benchmark_CUBA_nosyn_1000/pbsout/brian_benchmark_CUBA_nosyn_1000.out'
- Note that this code has been slightly modified from the original (Brette et
al. 2007) benchmarking code, available here on ModelDB:
https://senselab.med.yale.edu/modeldb/showModel.cshtml?model=83319
in order to work with version 2 of the Brian simulator (aka Brian2), and also
modified to change the model being benchmarked, etc.
# References:
- Brette R, Rudolph M, Carnevale T, Hines M, Beeman D, Bower JM, et al.
Simulation of networks of spiking neurons: A review of tools and strategies.
Journal of Computational Neuroscience 2007;23:349–98.
doi:10.1007/s10827-007-0038-6.
- Goodman D, Brette R. Brian: a simulator for spiking neural networks in Python.
Frontiers in Neuroinformatics 2008;2. doi:10.3389/neuro.11.005.2008.
"""
from brian2 import *
# Parameters
cells = 1000
defaultclock.dt = 0.01*ms
taum=20*ms
Vt = -50*mV
Vr = -60*mV
El = -49*mV
# The model
eqs = Equations('''
dv/dt = ((v-El))/taum : volt
''')
P = NeuronGroup(cells, model=eqs,threshold="v>Vt",reset="v=Vr",refractory=5*ms,
method='euler')
proportion=int(0.8*cells)
Pe = P[:proportion]
Pi = P[proportion:]
# Initialization
P.v = Vr
# Record a few traces
trace = StateMonitor(P, 'v', record=[1, 10, 100])
totaldata = StateMonitor(P, 'v', record=True)
run(0.5 * second, report='text')
# plot(trace.t/ms, trace[1].v/mV)
# plot(trace.t/ms, trace[10].v/mV)
# plot(trace.t/ms, trace[100].v/mV)
# xlabel('t (ms)')
# ylabel('v (mV)')
# show()
# print("Saving TC cell voltages!")
# numpy.savetxt("foo_totaldata.csv", totaldata.v/mV, delimiter=",")
|
asoplata/dynasim-benchmark-brette-2007
|
output/Brian2/brian2_benchmark_CUBA_nosyn_1000/brian2_benchmark_CUBA_nosyn_1000.py
|
Python
|
gpl-3.0
| 2,006
|
[
"Brian"
] |
92085f37842bd4fd31ec6bfe69e1e7e96d75e52c7990d8261435b102ec293af4
|
#!/usr/bin/env python
import argparse
from Bio import AlignIO
from Bio.Alphabet import IUPAC, Gapped
# A simple converter from Phylip to fasta format using BioPython.
# Matt Gitzendanner
# University of Florida
parser = argparse.ArgumentParser()
parser.add_argument("-i", help="input Phylip formatted file")
parser.add_argument("-o", help="output filename")
parser.add_argument("-a", help="Alphabet: dna or aa, default=dna", default="dna")
args = parser.parse_args()
infile = args.i
outfile = args.o
alphabet = args.a
try:
IN=open(infile, 'r')
except IOError:
print "Can't open file", infile
try:
OUT=open(outfile, 'a')
except IOError:
print "Can't open file", outfile
if alphabet == "dna":
alignment = AlignIO.read(IN, "phylip-relaxed", alphabet=Gapped(IUPAC.ambiguous_dna))
AlignIO.write([alignment], OUT, "fasta")
elif alphabet == "aa":
alignment = AlignIO.read(IN, "phylip-relaxed", alphabet=Gapped(IUPAC.protein))
AlignIO.write([alignment], OUT, "fasta")
|
magitz/ToolBox
|
converters/phy_to_fasta.py
|
Python
|
mit
| 982
|
[
"Biopython"
] |
f9563d2aada6651caea3954a6a472245709709f6bb58bb0b26e666961b956145
|
###############################################################################
# lazyflow: data flow based lazy parallel computation framework
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
import os
import h5py
import numpy
import vigra
from lazyflow.graph import Graph
from lazyflow.operators import OpTrainRandomForestBlocked, OpPixelFeaturesPresmoothed
from lazyflow.operators.opBlockedSparseLabelArray import OpBlockedSparseLabelArray
import logging
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.INFO)
class TestOpTrainRandomForest(object):
def setUp(self):
rootLogger.setLevel(logging.INFO)
pass
def tearDown(self):
pass
def test(self):
graph = Graph()
testVolumePath = 'tinyfib_volume.h5'
# Unzip the data if necessary
if not os.path.exists(testVolumePath):
zippedTestVolumePath = testVolumePath + ".gz"
assert os.path.exists(zippedTestVolumePath)
os.system("gzip -d " + zippedTestVolumePath)
assert os.path.exists(testVolumePath)
f = h5py.File(testVolumePath, 'r')
data = f['data'][...]
data = data.view(vigra.VigraArray)
data.axistags = vigra.defaultAxistags('txyzc')
labels = f['labels'][...]
assert data.shape[:-1] == labels.shape[:-1]
assert labels.shape[-1] == 1
assert len(data.shape) == 5
f.close()
scales = [0.3, 0.7, 1, 1.6, 3.5, 5.0, 10.0]
featureIds = OpPixelFeaturesPresmoothed.DefaultFeatureIds
# The following conditions cause this test to *usually* fail, but *sometimes* pass:
# When using Structure Tensor EVs at sigma >= 3.5 (NaNs in feature matrix)
# When using Gaussian Gradient Mag at sigma >= 3.5 (inf in feature matrix)
# When using *any feature* at sigma == 10.0 (NaNs in feature matrix)
# sigma: 0.3 0.7 1.0 1.6 3.5 5.0 10.0
selections = numpy.array( [[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, True, False, False], # ST EVs
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False], # GGM
[False, False, False, False, False, False, False]] )
opFeatures = OpPixelFeaturesPresmoothed(graph=graph)
opFeatures.Input.setValue(data)
opFeatures.Scales.setValue(scales)
opFeatures.FeatureIds.setValue(featureIds)
opFeatures.Matrix.setValue(selections)
opTrain = OpTrainRandomForestBlocked(graph=graph)
opTrain.Images.resize(1)
opTrain.Images[0].connect(opFeatures.Output)
opTrain.Labels.resize(1)
opTrain.nonzeroLabelBlocks.resize(1)
# This test only fails when this flag is True.
use_sparse_label_storage = True
if use_sparse_label_storage:
opLabelArray = OpBlockedSparseLabelArray(graph=graph)
opLabelArray.inputs["shape"].setValue(labels.shape)
opLabelArray.inputs["blockShape"].setValue((1, 32, 32, 32, 1))
opLabelArray.inputs["eraser"].setValue(100)
opTrain.nonzeroLabelBlocks[0].connect(opLabelArray.nonzeroBlocks)
# Slice the label data into the sparse array storage
opLabelArray.Input[...] = labels[...]
opTrain.Labels[0].connect(opLabelArray.Output)
else:
# Skip the sparse storage operator and provide labels as one big block
opTrain.Labels[0].setValue(labels)
# One big block
opTrain.nonzeroLabelBlocks.resize(1)
opTrain.nonzeroLabelBlocks[0].setValue( [[slice(None, None, None)] * 5] )
# Sanity check: Make sure we configured the training operator correctly.
readySlots = [ slot.ready() for slot in opTrain.inputs.values() ]
assert all(readySlots)
# Generate the classifier
classifier = opTrain.Classifier.value
if __name__ == "__main__":
import sys
import nose
sys.argv.append("--nocapture") # Don't steal stdout. Show it on the console as usual.
sys.argv.append("--nologcapture") # Don't set the logging level to DEBUG. Leave it alone.
ret = nose.run(defaultTest=__file__)
if not ret: sys.exit(1)
|
stuarteberg/lazyflow
|
tests/broken/testOpTrainRandomForest.py
|
Python
|
lgpl-3.0
| 5,487
|
[
"Gaussian"
] |
5762510fb618c92b3304ce11647be2e3c260177764ca63211948d83c013c8452
|
#!/usr/bin/env python
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Bio.Wise contains modules for running and processing the output of
# some of the models in the Wise2 package by Ewan Birney available from:
# ftp://ftp.ebi.ac.uk/pub/software/unix/wise2/
# http://www.ebi.ac.uk/Wise2/
#
# Bio.Wise.psw is for protein Smith-Waterman alignments
# Bio.Wise.dnal is for Smith-Waterman DNA alignments
__version__ = "$Revision: 1.12 $"
import commands
import itertools
import os
import re
from Bio import Wise
_SCORE_MATCH = 4
_SCORE_MISMATCH = -1
_SCORE_GAP_START = -5
_SCORE_GAP_EXTENSION = -1
_CMDLINE_DNAL = ["dnal", "-alb", "-nopretty"]
def _build_dnal_cmdline(match, mismatch, gap, extension):
res = _CMDLINE_DNAL[:]
res.extend(["-match", str(match)])
res.extend(["-mis", str(mismatch)])
res.extend(["-gap", str(-gap)]) # negative: convert score to penalty
res.extend(["-ext", str(-extension)]) # negative: convert score to penalty
return res
_CMDLINE_FGREP_COUNT = "fgrep -c '%s' %s"
def _fgrep_count(pattern, file):
return int(commands.getoutput(_CMDLINE_FGREP_COUNT % (pattern, file)))
_re_alb_line2coords = re.compile(r"^\[([^:]+):[^\[]+\[([^:]+):")
def _alb_line2coords(line):
return tuple([int(coord)+1 # one-based -> zero-based
for coord
in _re_alb_line2coords.match(line).groups()])
def _get_coords(filename):
alb = file(filename)
start_line = None
end_line = None
for line in alb:
if line.startswith("["):
if not start_line:
start_line = line # rstrip not needed
else:
end_line = line
if end_line is None: # sequence is too short
return [(0, 0), (0, 0)]
return zip(*map(_alb_line2coords, [start_line, end_line])) # returns [(start0, end0), (start1, end1)]
def _any(seq, pred=bool):
"Returns True if pred(x) is True at least one element in the iterable"
return True in itertools.imap(pred, seq)
class Statistics(object):
"""
Calculate statistics from an ALB report
"""
def __init__(self, filename, match, mismatch, gap, extension):
self.matches = _fgrep_count('"SEQUENCE" %s' % match, filename)
self.mismatches = _fgrep_count('"SEQUENCE" %s' % mismatch, filename)
self.gaps = _fgrep_count('"INSERT" %s' % gap, filename)
if gap == extension:
self.extensions = 0
else:
self.extensions = _fgrep_count('"INSERT" %s' % extension, filename)
self.score = (match*self.matches +
mismatch*self.mismatches +
gap*self.gaps +
extension*self.extensions)
if _any([self.matches, self.mismatches, self.gaps, self.extensions]):
self.coords = _get_coords(filename)
else:
self.coords = [(0, 0), (0,0)]
def identity_fraction(self):
return self.matches/(self.matches+self.mismatches)
header = "identity_fraction\tmatches\tmismatches\tgaps\textensions"
def __str__(self):
return "\t".join([str(x) for x in (self.identity_fraction(), self.matches, self.mismatches, self.gaps, self.extensions)])
def align(pair, match=_SCORE_MATCH, mismatch=_SCORE_MISMATCH, gap=_SCORE_GAP_START, extension=_SCORE_GAP_EXTENSION, **keywds):
cmdline = _build_dnal_cmdline(match, mismatch, gap, extension)
temp_file = Wise.align(cmdline, pair, **keywds)
try:
return Statistics(temp_file.name, match, mismatch, gap, extension)
except AttributeError:
try:
keywds['dry_run']
return None
except KeyError:
raise
def main():
import sys
stats = align(sys.argv[1:3])
print "\n".join(["%s: %s" % (attr, getattr(stats, attr))
for attr in
("matches", "mismatches", "gaps", "extensions")])
print "identity_fraction: %s" % stats.identity_fraction()
print "coords: %s" % stats.coords
def _test(*args, **keywds):
import doctest, sys
doctest.testmod(sys.modules[__name__], *args, **keywds)
if __name__ == "__main__":
if __debug__:
_test()
main()
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/Wise/dnal.py
|
Python
|
gpl-2.0
| 4,323
|
[
"Biopython"
] |
8e18e52ff1e2b869a137f9760f08c32d1196e16cc4094f18a389a66b65b9f04c
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles directives.
This converter removes the directive functions from the code and moves the
information they specify into AST annotations. It is a specialized form of
static analysis, one that is specific to AutoGraph.
Note that this requires that the actual directive functions are static - that
is, they do not change at runtime. So if you do something like this:
tf.autograph.set_loop_options = <new function>
Then the directive will may no longer be recognized. Furthermore, if the
converted function is cached, such an action action may be irreversible.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.lang import directives
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.util import tf_inspect
ENCLOSING_LOOP = 'enclosing_loop'
STATIC_VALUE = 'static_value'
"""Used for AST annotations, see visit_Name."""
def _map_args(call_node, function):
"""Maps AST call nodes to the actual function's arguments.
Args:
call_node: ast.Call
function: Callable[..., Any], the actual function matching call_node
Returns:
Dict[Text, ast.AST], mapping each of the function's argument names to
the respective AST node.
Raises:
ValueError: if the default arguments are not correctly set
"""
args = call_node.args
kwds = {kwd.arg: kwd.value for kwd in call_node.keywords}
call_args = tf_inspect.getcallargs(function, *args, **kwds)
# Keyword arguments not specified in kwds will be mapped to their defaults,
# which are Python values. Since we don't currently have a way to transform
# those into AST references, we simply remove them. By convention, directives
# use UNSPECIFIED as default value for for optional arguments. No other
# defaults should be present.
unexpected_defaults = []
for k in call_args:
if (k not in kwds
and call_args[k] not in args
and call_args[k] is not directives.UNSPECIFIED):
unexpected_defaults.append(k)
if unexpected_defaults:
raise ValueError('Unexpected keyword argument values, %s, for function %s'
% (zip(unexpected_defaults,
[call_args[k] for k in unexpected_defaults]),
function))
return {k: v for k, v in call_args.items() if v is not directives.UNSPECIFIED}
class DirectivesTransformer(converter.Base):
"""Parses compiler directives and converts them into AST annotations."""
def _process_symbol_directive(self, call_node, directive):
if len(call_node.args) < 1:
raise ValueError('"%s" requires a positional first argument'
' as the target' % directive.__name__)
target = call_node.args[0]
defs = anno.getanno(target, anno.Static.ORIG_DEFINITIONS)
for def_ in defs:
def_.directives[directive] = _map_args(call_node, directive)
return call_node
def _process_statement_directive(self, call_node, directive):
if self.local_scope_level < 1:
raise ValueError(
'"%s" must be used inside a statement' % directive.__name__)
target = self.get_local(ENCLOSING_LOOP)
node_anno = anno.getanno(target, converter.AgAnno.DIRECTIVES, {})
node_anno[directive] = _map_args(call_node, directive)
anno.setanno(target, converter.AgAnno.DIRECTIVES, node_anno)
return call_node
def visit_Name(self, node):
node = self.generic_visit(node)
if isinstance(node.ctx, gast.Load):
defs = anno.getanno(node, anno.Static.DEFINITIONS, ())
is_defined = bool(defs)
if not is_defined and node.id in self.ctx.info.namespace:
anno.setanno(node, STATIC_VALUE, self.ctx.info.namespace[node.id])
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
parent_val = anno.getanno(node.value, STATIC_VALUE, default=None)
if parent_val is not None and hasattr(parent_val, node.attr):
anno.setanno(node, STATIC_VALUE, getattr(parent_val, node.attr))
return node
def visit_Expr(self, node):
node = self.generic_visit(node)
if isinstance(node.value, gast.Call):
call_node = node.value
static_val = anno.getanno(call_node.func, STATIC_VALUE, default=None)
if static_val is not None:
# Note: directive calls are not output in the generated code, hence
# the removal from the code by returning None.
if static_val is directives.set_element_type:
self._process_symbol_directive(call_node, static_val)
return None
elif static_val is directives.set_loop_options:
self._process_statement_directive(call_node, static_val)
return None
return node
# TODO(mdan): This will be insufficient for other control flow.
# That means that if we ever have a directive that affects things other than
# loops, we'll need support for parallel scopes, or have multiple converters.
def _track_and_visit_loop(self, node):
self.enter_local_scope()
self.set_local(ENCLOSING_LOOP, node)
node = self.generic_visit(node)
self.exit_local_scope()
return node
def visit_While(self, node):
return self._track_and_visit_loop(node)
def visit_For(self, node):
return self._track_and_visit_loop(node)
def transform(node, ctx):
return DirectivesTransformer(ctx).visit(node)
|
kevin-coder/tensorflow-fork
|
tensorflow/python/autograph/converters/directives.py
|
Python
|
apache-2.0
| 6,115
|
[
"VisIt"
] |
fcf7783091e429d3efc2617f285bf242fd380d098cf35830d066446d79395e8f
|
from .gaussian import *
from .voigt import *
|
adrn/GaiaPairsFollowup
|
comoving_rv/longslit/fitting/__init__.py
|
Python
|
mit
| 45
|
[
"Gaussian"
] |
b4e058bc5f1f943fbd1b797d3f564e1f99bf3ea1e687d71977128b807b59f7f5
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements functions to perform various useful operations on
entries, such as grouping entries by structure.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Feb 24, 2012"
import logging
import json
import datetime
import collections
import itertools
import csv
import re
from typing import List, Union, Iterable, Set
from pymatgen.core.periodic_table import Element
from pymatgen.core.composition import Composition
from pymatgen.analysis.phase_diagram import PDEntry
from pymatgen.entries.computed_entries import ComputedEntry, ComputedStructureEntry
from monty.json import MontyEncoder, MontyDecoder, MSONable
from monty.string import unicode2str
from pymatgen.analysis.structure_matcher import StructureMatcher, \
SpeciesComparator
logger = logging.getLogger(__name__)
def _get_host(structure, species_to_remove):
if species_to_remove:
s = structure.copy()
s.remove_species(species_to_remove)
return s
else:
return structure
def _perform_grouping(args):
(entries_json, hosts_json, ltol, stol, angle_tol,
primitive_cell, scale, comparator, groups) = args
entries = json.loads(entries_json, cls=MontyDecoder)
hosts = json.loads(hosts_json, cls=MontyDecoder)
unmatched = list(zip(entries, hosts))
while len(unmatched) > 0:
ref_host = unmatched[0][1]
logger.info(
"Reference tid = {}, formula = {}".format(unmatched[0][0].entry_id,
ref_host.formula)
)
ref_formula = ref_host.composition.reduced_formula
logger.info("Reference host = {}".format(ref_formula))
matches = [unmatched[0]]
for i in range(1, len(unmatched)):
test_host = unmatched[i][1]
logger.info("Testing tid = {}, formula = {}"
.format(unmatched[i][0].entry_id, test_host.formula))
test_formula = test_host.composition.reduced_formula
logger.info("Test host = {}".format(test_formula))
m = StructureMatcher(ltol=ltol, stol=stol, angle_tol=angle_tol,
primitive_cell=primitive_cell, scale=scale,
comparator=comparator)
if m.fit(ref_host, test_host):
logger.info("Fit found")
matches.append(unmatched[i])
groups.append(json.dumps([m[0] for m in matches], cls=MontyEncoder))
unmatched = list(filter(lambda x: x not in matches, unmatched))
logger.info("{} unmatched remaining".format(len(unmatched)))
def group_entries_by_structure(entries, species_to_remove=None,
ltol=0.2, stol=.4, angle_tol=5,
primitive_cell=True, scale=True,
comparator=SpeciesComparator(),
ncpus=None):
"""
Given a sequence of ComputedStructureEntries, use structure fitter to group
them by structural similarity.
Args:
entries: Sequence of ComputedStructureEntries.
species_to_remove: Sometimes you want to compare a host framework
(e.g., in Li-ion battery analysis). This allows you to specify
species to remove before structural comparison.
ltol (float): Fractional length tolerance. Default is 0.2.
stol (float): Site tolerance in Angstrom. Default is 0.4 Angstrom.
angle_tol (float): Angle tolerance in degrees. Default is 5 degrees.
primitive_cell (bool): If true: input structures will be reduced to
primitive cells prior to matching. Defaults to True.
scale: Input structures are scaled to equivalent volume if true;
For exact matching, set to False.
comparator: A comparator object implementing an equals method that
declares equivalency of sites. Default is SpeciesComparator,
which implies rigid species mapping.
ncpus: Number of cpus to use. Use of multiple cpus can greatly improve
fitting speed. Default of None means serial processing.
Returns:
Sequence of sequence of entries by structural similarity. e.g,
[[ entry1, entry2], [entry3, entry4, entry5]]
"""
start = datetime.datetime.now()
logger.info("Started at {}".format(start))
entries_host = [(entry, _get_host(entry.structure, species_to_remove))
for entry in entries]
if ncpus:
symm_entries = collections.defaultdict(list)
for entry, host in entries_host:
symm_entries[comparator.get_structure_hash(host)].append((entry,
host))
import multiprocessing as mp
logging.info("Using {} cpus".format(ncpus))
manager = mp.Manager()
groups = manager.list()
p = mp.Pool(ncpus)
# Parallel processing only supports Python primitives and not objects.
p.map(_perform_grouping,
[(json.dumps([e[0] for e in eh], cls=MontyEncoder),
json.dumps([e[1] for e in eh], cls=MontyEncoder),
ltol, stol, angle_tol, primitive_cell, scale,
comparator, groups)
for eh in symm_entries.values()])
else:
groups = []
hosts = [host for entry, host in entries_host]
_perform_grouping((json.dumps(entries, cls=MontyEncoder),
json.dumps(hosts, cls=MontyEncoder),
ltol, stol, angle_tol, primitive_cell, scale,
comparator, groups))
entry_groups = []
for g in groups:
entry_groups.append(json.loads(g, cls=MontyDecoder))
logging.info("Finished at {}".format(datetime.datetime.now()))
logging.info("Took {}".format(datetime.datetime.now() - start))
return entry_groups
class EntrySet(collections.abc.MutableSet, MSONable):
"""
A convenient container for manipulating entries. Allows for generating
subsets, dumping into files, etc.
"""
def __init__(self, entries: Iterable[Union[PDEntry, ComputedEntry, ComputedStructureEntry]]):
"""
Args:
entries: All the entries.
"""
self.entries = set(entries)
def __contains__(self, item):
return item in self.entries
def __iter__(self):
return self.entries.__iter__()
def __len__(self):
return len(self.entries)
def add(self, element):
"""
Add an entry.
:param element: Entry
"""
self.entries.add(element)
def discard(self, element):
"""
Discard an entry.
:param element: Entry
"""
self.entries.discard(element)
@property
def chemsys(self) -> set:
"""
Returns:
set representing the chemical system, e.g., {"Li", "Fe", "P", "O"}
"""
chemsys = set()
for e in self.entries:
chemsys.update([el.symbol for el in e.composition.keys()])
return chemsys
def remove_non_ground_states(self):
"""
Removes all non-ground state entries, i.e., only keep the lowest energy
per atom entry at each composition.
"""
entries = sorted(self.entries, key=lambda e: e.composition.reduced_formula)
ground_states = set()
for _, g in itertools.groupby(entries, key=lambda e: e.composition.reduced_formula):
ground_states.add(min(g, key=lambda e: e.energy_per_atom))
self.entries = ground_states
def get_subset_in_chemsys(self, chemsys: List[str]):
"""
Returns an EntrySet containing only the set of entries belonging to
a particular chemical system (in this definition, it includes all sub
systems). For example, if the entries are from the
Li-Fe-P-O system, and chemsys=["Li", "O"], only the Li, O,
and Li-O entries are returned.
Args:
chemsys: Chemical system specified as list of elements. E.g.,
["Li", "O"]
Returns:
EntrySet
"""
chem_sys = set(chemsys)
if not chem_sys.issubset(self.chemsys):
raise ValueError("%s is not a subset of %s" % (chem_sys,
self.chemsys))
subset = set()
for e in self.entries:
elements = [sp.symbol for sp in e.composition.keys()]
if chem_sys.issuperset(elements):
subset.add(e)
return EntrySet(subset)
def as_dict(self):
"""
:return: MSONable dict
"""
return {
"entries": list(self.entries)
}
def to_csv(self, filename: str, latexify_names: bool = False):
"""
Exports PDEntries to a csv
Args:
filename: Filename to write to.
entries: PDEntries to export.
latexify_names: Format entry names to be LaTex compatible,
e.g., Li_{2}O
"""
els = set() # type: Set[Element]
for entry in self.entries:
els.update(entry.composition.elements)
elements = sorted(list(els), key=lambda a: a.X)
writer = csv.writer(open(filename, "w"), delimiter=unicode2str(","),
quotechar=unicode2str("\""),
quoting=csv.QUOTE_MINIMAL)
writer.writerow(["Name"] + [el.symbol for el in elements] + ["Energy"])
for entry in self.entries:
row = [entry.name if not latexify_names
else re.sub(r"([0-9]+)", r"_{\1}", entry.name)]
row.extend([entry.composition[el] for el in elements])
row.append(str(entry.energy))
writer.writerow(row)
@classmethod
def from_csv(cls, filename: str):
"""
Imports PDEntries from a csv.
Args:
filename: Filename to import from.
Returns:
List of Elements, List of PDEntries
"""
with open(filename, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter=unicode2str(","),
quotechar=unicode2str("\""),
quoting=csv.QUOTE_MINIMAL)
entries = list()
header_read = False
elements = [] # type: List[str]
for row in reader:
if not header_read:
elements = row[1:(len(row) - 1)]
header_read = True
else:
name = row[0]
energy = float(row[-1])
comp = dict()
for ind in range(1, len(row) - 1):
if float(row[ind]) > 0:
comp[Element(elements[ind - 1])] = float(row[ind])
entries.append(PDEntry(Composition(comp), energy, name))
return cls(entries)
|
gVallverdu/pymatgen
|
pymatgen/entries/entry_tools.py
|
Python
|
mit
| 11,247
|
[
"pymatgen"
] |
b97fd253449ba3e522788c697150374f45f3a57b957d791c2e3b016ca8e2db2b
|
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2011, 2013-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2011-2014 Google, Inc.
# Copyright (c) 2012 Tim Hatch <tim@timhatch.com>
# Copyright (c) 2013-2018 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Rene Zhang <rz99@cornell.edu>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Steven Myint <hg@stevenmyint.com>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016 Erik <erik.eriksson@yahoo.com>
# Copyright (c) 2016 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2017 Martin von Gagern <gagern@google.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Alexander Todorov <atodorov@otb.bg>
# Copyright (c) 2018 Ville Skyttä <ville.skytta@upcloud.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Checks for various exception related errors."""
import builtins
import inspect
import sys
import typing
import astroid
from pylint import checkers
from pylint.checkers import utils
from pylint import interfaces
def _builtin_exceptions():
def predicate(obj):
return isinstance(obj, type) and issubclass(obj, BaseException)
members = inspect.getmembers(builtins, predicate)
return {exc.__name__ for (_, exc) in members}
def _annotated_unpack_infer(stmt, context=None):
"""
Recursively generate nodes inferred by the given statement.
If the inferred value is a list or a tuple, recurse on the elements.
Returns an iterator which yields tuples in the format
('original node', 'infered node').
"""
if isinstance(stmt, (astroid.List, astroid.Tuple)):
for elt in stmt.elts:
inferred = utils.safe_infer(elt)
if inferred and inferred is not astroid.Uninferable:
yield elt, inferred
return
for infered in stmt.infer(context):
if infered is astroid.Uninferable:
continue
yield stmt, infered
def _is_raising(body: typing.List) -> bool:
"""Return true if the given statement node raise an exception"""
for node in body:
if isinstance(node, astroid.Raise):
return True
return False
PY3K = sys.version_info >= (3, 0)
OVERGENERAL_EXCEPTIONS = ("Exception",)
BUILTINS_NAME = builtins.__name__
MSGS = {
"E0701": (
"Bad except clauses order (%s)",
"bad-except-order",
"Used when except clauses are not in the correct order (from the "
"more specific to the more generic). If you don't fix the order, "
"some exceptions may not be caught by the most specific handler.",
),
"E0702": (
"Raising %s while only classes or instances are allowed",
"raising-bad-type",
"Used when something which is neither a class, an instance or a "
"string is raised (i.e. a `TypeError` will be raised).",
),
"E0703": (
"Exception context set to something which is not an " "exception, nor None",
"bad-exception-context",
'Used when using the syntax "raise ... from ...", '
"where the exception context is not an exception, "
"nor None.",
),
"E0704": (
"The raise statement is not inside an except clause",
"misplaced-bare-raise",
"Used when a bare raise is not used inside an except clause. "
"This generates an error, since there are no active exceptions "
"to be reraised. An exception to this rule is represented by "
"a bare raise inside a finally clause, which might work, as long "
"as an exception is raised inside the try block, but it is "
"nevertheless a code smell that must not be relied upon.",
),
"E0710": (
"Raising a new style class which doesn't inherit from BaseException",
"raising-non-exception",
"Used when a new style class which doesn't inherit from "
"BaseException is raised.",
),
"E0711": (
"NotImplemented raised - should raise NotImplementedError",
"notimplemented-raised",
"Used when NotImplemented is raised instead of " "NotImplementedError",
),
"E0712": (
"Catching an exception which doesn't inherit from Exception: %s",
"catching-non-exception",
"Used when a class which doesn't inherit from "
"Exception is used as an exception in an except clause.",
),
"W0702": (
"No exception type(s) specified",
"bare-except",
"Used when an except clause doesn't specify exceptions type to " "catch.",
),
"W0703": (
"Catching too general exception %s",
"broad-except",
"Used when an except catches a too general exception, "
"possibly burying unrelated errors.",
),
"W0705": (
"Catching previously caught exception type %s",
"duplicate-except",
"Used when an except catches a type that was already caught by "
"a previous handler.",
),
"W0706": (
"The except handler raises immediately",
"try-except-raise",
"Used when an except handler uses raise as its first or only "
"operator. This is useless because it raises back the exception "
"immediately. Remove the raise operator or the entire "
"try-except-raise block!",
),
"W0711": (
'Exception to catch is the result of a binary "%s" operation',
"binary-op-exception",
"Used when the exception to catch is of the form "
'"except A or B:". If intending to catch multiple, '
'rewrite as "except (A, B):"',
),
"W0715": (
"Exception arguments suggest string formatting might be intended",
"raising-format-tuple",
"Used when passing multiple arguments to an exception "
"constructor, the first of them a string literal containing what "
"appears to be placeholders intended for formatting",
),
}
class BaseVisitor:
"""Base class for visitors defined in this module."""
def __init__(self, checker, node):
self._checker = checker
self._node = node
def visit(self, node):
name = node.__class__.__name__.lower()
dispatch_meth = getattr(self, "visit_" + name, None)
if dispatch_meth:
dispatch_meth(node)
else:
self.visit_default(node)
def visit_default(self, node): # pylint: disable=unused-argument
"""Default implementation for all the nodes."""
class ExceptionRaiseRefVisitor(BaseVisitor):
"""Visit references (anything that is not an AST leaf)."""
def visit_name(self, name):
if name.name == "NotImplemented":
self._checker.add_message("notimplemented-raised", node=self._node)
def visit_call(self, call):
if isinstance(call.func, astroid.Name):
self.visit_name(call.func)
if (
len(call.args) > 1
and isinstance(call.args[0], astroid.Const)
and isinstance(call.args[0].value, str)
):
msg = call.args[0].value
if "%" in msg or ("{" in msg and "}" in msg):
self._checker.add_message("raising-format-tuple", node=self._node)
class ExceptionRaiseLeafVisitor(BaseVisitor):
"""Visitor for handling leaf kinds of a raise value."""
def visit_const(self, const):
if not isinstance(const.value, str):
# raising-string will be emitted from python3 porting checker.
self._checker.add_message(
"raising-bad-type", node=self._node, args=const.value.__class__.__name__
)
def visit_instance(self, instance):
# pylint: disable=protected-access
cls = instance._proxied
self.visit_classdef(cls)
# Exception instances have a particular class type
visit_exceptioninstance = visit_instance
def visit_classdef(self, cls):
if not utils.inherit_from_std_ex(cls) and utils.has_known_bases(cls):
if cls.newstyle:
self._checker.add_message("raising-non-exception", node=self._node)
else:
self._checker.add_message("nonstandard-exception", node=self._node)
def visit_tuple(self, tuple_node):
if PY3K or not tuple_node.elts:
self._checker.add_message("raising-bad-type", node=self._node, args="tuple")
return
# On Python 2, using the following is not an error:
# raise (ZeroDivisionError, None)
# raise (ZeroDivisionError, )
# What's left to do is to check that the first
# argument is indeed an exception. Verifying the other arguments
# is not the scope of this check.
first = tuple_node.elts[0]
inferred = utils.safe_infer(first)
if not inferred or inferred is astroid.Uninferable:
return
if (
isinstance(inferred, astroid.Instance)
and inferred.__class__.__name__ != "Instance"
):
# TODO: explain why
self.visit_default(tuple_node)
else:
self.visit(inferred)
def visit_default(self, node):
name = getattr(node, "name", node.__class__.__name__)
self._checker.add_message("raising-bad-type", node=self._node, args=name)
class ExceptionsChecker(checkers.BaseChecker):
"""Exception related checks."""
__implements__ = interfaces.IAstroidChecker
name = "exceptions"
msgs = MSGS
priority = -4
options = (
(
"overgeneral-exceptions",
{
"default": OVERGENERAL_EXCEPTIONS,
"type": "csv",
"metavar": "<comma-separated class names>",
"help": "Exceptions that will emit a warning "
'when being caught. Defaults to "%s".'
% (", ".join(OVERGENERAL_EXCEPTIONS),),
},
),
)
def open(self):
self._builtin_exceptions = _builtin_exceptions()
super(ExceptionsChecker, self).open()
@utils.check_messages(
"nonstandard-exception",
"misplaced-bare-raise",
"raising-bad-type",
"raising-non-exception",
"notimplemented-raised",
"bad-exception-context",
"raising-format-tuple",
)
def visit_raise(self, node):
if node.exc is None:
self._check_misplaced_bare_raise(node)
return
if PY3K and node.cause:
self._check_bad_exception_context(node)
expr = node.exc
ExceptionRaiseRefVisitor(self, node).visit(expr)
try:
inferred_value = expr.inferred()[-1]
except astroid.InferenceError:
pass
else:
if inferred_value:
ExceptionRaiseLeafVisitor(self, node).visit(inferred_value)
def _check_misplaced_bare_raise(self, node):
# Filter out if it's present in __exit__.
scope = node.scope()
if (
isinstance(scope, astroid.FunctionDef)
and scope.is_method()
and scope.name == "__exit__"
):
return
current = node
# Stop when a new scope is generated or when the raise
# statement is found inside a TryFinally.
ignores = (astroid.ExceptHandler, astroid.FunctionDef)
while current and not isinstance(current.parent, ignores):
current = current.parent
expected = (astroid.ExceptHandler,)
if not current or not isinstance(current.parent, expected):
self.add_message("misplaced-bare-raise", node=node)
def _check_bad_exception_context(self, node):
"""Verify that the exception context is properly set.
An exception context can be only `None` or an exception.
"""
cause = utils.safe_infer(node.cause)
if cause in (astroid.Uninferable, None):
return
if isinstance(cause, astroid.Const):
if cause.value is not None:
self.add_message("bad-exception-context", node=node)
elif not isinstance(cause, astroid.ClassDef) and not utils.inherit_from_std_ex(
cause
):
self.add_message("bad-exception-context", node=node)
def _check_catching_non_exception(self, handler, exc, part):
if isinstance(exc, astroid.Tuple):
# Check if it is a tuple of exceptions.
inferred = [utils.safe_infer(elt) for elt in exc.elts]
if any(node is astroid.Uninferable for node in inferred):
# Don't emit if we don't know every component.
return
if all(
node
and (utils.inherit_from_std_ex(node) or not utils.has_known_bases(node))
for node in inferred
):
return
if not isinstance(exc, astroid.ClassDef):
# Don't emit the warning if the infered stmt
# is None, but the exception handler is something else,
# maybe it was redefined.
if isinstance(exc, astroid.Const) and exc.value is None:
if (
isinstance(handler.type, astroid.Const)
and handler.type.value is None
) or handler.type.parent_of(exc):
# If the exception handler catches None or
# the exception component, which is None, is
# defined by the entire exception handler, then
# emit a warning.
self.add_message(
"catching-non-exception",
node=handler.type,
args=(part.as_string(),),
)
else:
self.add_message(
"catching-non-exception",
node=handler.type,
args=(part.as_string(),),
)
return
if (
not utils.inherit_from_std_ex(exc)
and exc.name not in self._builtin_exceptions
):
if utils.has_known_bases(exc):
self.add_message(
"catching-non-exception", node=handler.type, args=(exc.name,)
)
def _check_try_except_raise(self, node):
def gather_exceptions_from_handler(handler):
exceptions = []
if handler.type:
exceptions_in_handler = utils.safe_infer(handler.type)
if isinstance(exceptions_in_handler, astroid.Tuple):
exceptions = {
exception
for exception in exceptions_in_handler.elts
if isinstance(exception, astroid.Name)
}
elif exceptions_in_handler:
exceptions = [exceptions_in_handler]
return exceptions
bare_raise = False
handler_having_bare_raise = None
excs_in_bare_handler = []
for handler in node.handlers:
if bare_raise:
# check that subsequent handler is not parent of handler which had bare raise.
# since utils.safe_infer can fail for bare except, check it before.
# also break early if bare except is followed by bare except.
excs_in_current_handler = gather_exceptions_from_handler(handler)
if not excs_in_current_handler:
bare_raise = False
break
for exc_in_current_handler in excs_in_current_handler:
inferred_current = utils.safe_infer(exc_in_current_handler)
if any(
utils.is_subclass_of(
utils.safe_infer(exc_in_bare_handler), inferred_current
)
for exc_in_bare_handler in excs_in_bare_handler
):
bare_raise = False
break
# `raise` as the first operator inside the except handler
if _is_raising([handler.body[0]]):
# flags when there is a bare raise
if handler.body[0].exc is None:
bare_raise = True
handler_having_bare_raise = handler
excs_in_bare_handler = gather_exceptions_from_handler(handler)
if bare_raise:
self.add_message("try-except-raise", node=handler_having_bare_raise)
@utils.check_messages(
"bare-except",
"broad-except",
"try-except-raise",
"binary-op-exception",
"bad-except-order",
"catching-non-exception",
"duplicate-except",
)
def visit_tryexcept(self, node):
"""check for empty except"""
self._check_try_except_raise(node)
exceptions_classes = []
nb_handlers = len(node.handlers)
for index, handler in enumerate(node.handlers):
if handler.type is None:
if not _is_raising(handler.body):
self.add_message("bare-except", node=handler)
# check if an "except:" is followed by some other
# except
if index < (nb_handlers - 1):
msg = "empty except clause should always appear last"
self.add_message("bad-except-order", node=node, args=msg)
elif isinstance(handler.type, astroid.BoolOp):
self.add_message(
"binary-op-exception", node=handler, args=handler.type.op
)
else:
try:
excs = list(_annotated_unpack_infer(handler.type))
except astroid.InferenceError:
continue
for part, exc in excs:
if exc is astroid.Uninferable:
continue
if isinstance(exc, astroid.Instance) and utils.inherit_from_std_ex(
exc
):
# pylint: disable=protected-access
exc = exc._proxied
self._check_catching_non_exception(handler, exc, part)
if not isinstance(exc, astroid.ClassDef):
continue
exc_ancestors = [
anc
for anc in exc.ancestors()
if isinstance(anc, astroid.ClassDef)
]
for previous_exc in exceptions_classes:
if previous_exc in exc_ancestors:
msg = "%s is an ancestor class of %s" % (
previous_exc.name,
exc.name,
)
self.add_message(
"bad-except-order", node=handler.type, args=msg
)
if (
exc.name in self.config.overgeneral_exceptions
and exc.root().name == utils.EXCEPTIONS_MODULE
and not _is_raising(handler.body)
):
self.add_message(
"broad-except", args=exc.name, node=handler.type
)
if exc in exceptions_classes:
self.add_message(
"duplicate-except", args=exc.name, node=handler.type
)
exceptions_classes += [exc for _, exc in excs]
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(ExceptionsChecker(linter))
|
kczapla/pylint
|
pylint/checkers/exceptions.py
|
Python
|
gpl-2.0
| 20,193
|
[
"VisIt"
] |
7bb88d814ad0e0ea4939508b6a958daa18fc4f0adffc1fd450fae75f25db7e9f
|
#
# Copyright (C) 2018-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Testmodule for the actor base class.
"""
import unittest as ut
import espressomd.actors
import espressomd.highlander
class TestActor(espressomd.actors.Actor):
def __init__(self, *args, **kwargs):
self._core_args = None
self._activated = False
self._deactivated = False
self._validated = False
super().__init__(*args, **kwargs)
def _get_params_from_es_core(self):
return self._core_args
def _set_params_in_es_core(self):
self._core_args = self._params
def valid_keys(self):
return {"a", "b", "c"}
def required_keys(self):
return {"a", "c"}
def default_params(self):
return {"a": False, "b": False, "c": False}
def _activate_method(self):
self._activated = True
def _deactivate_method(self):
self._deactivated = True
def validate_params(self):
self._validated = True
class ActorTest(ut.TestCase):
def test_ctor(self):
a = TestActor(a=False, c=False)
self.assertFalse(a.is_active())
self.assertEqual(a.get_params(), a.default_params())
self.assertEqual(a.system, None)
def test_params_non_active(self):
a = TestActor(a=True, c=True)
a.set_params(a=False, b=True, c=False)
params = a.get_params()
self.assertEqual(params["a"], False)
self.assertEqual(params["b"], True)
self.assertEqual(params["c"], False)
self.assertEqual(a._core_args, None)
def test_params_active(self):
a = TestActor(a=True, c=True)
a._activate()
a.set_params(a=False, b=True, c=False)
params = a.get_params()
self.assertEqual(params["a"], False)
self.assertEqual(params["b"], True)
self.assertEqual(params["c"], False)
self.assertEqual(a._core_args, params)
def test_activation(self):
a = TestActor(a=True, c=True)
a._activate()
self.assertTrue(a.is_active())
def test_deactivation(self):
a = TestActor(a=True, c=True)
a._activate()
self.assertTrue(a.is_active())
a._deactivate()
self.assertFalse(a.is_active())
params = a.get_params()
self.assertEqual(params["a"], True)
self.assertEqual(params["b"], False)
self.assertEqual(params["c"], True)
def test_exception(self):
error_msg_valid = (r"Only the following keys can be given as keyword arguments: "
r"\['a', 'b', 'c'\], got \['a', 'c', 'd'\] \(unknown \['d'\]\)")
error_msg_required = (r"The following keys have to be given as keyword arguments: "
r"\['a', 'c'\], got \['a'\] \(missing \['c'\]\)")
with self.assertRaisesRegex(ValueError, error_msg_valid):
TestActor(a=True, c=True, d=True)
with self.assertRaisesRegex(ValueError, error_msg_required):
TestActor(a=True)
valid_actor = TestActor(a=True, c=True)
with self.assertRaisesRegex(ValueError, error_msg_valid):
valid_actor.set_params(a=True, c=True, d=True)
with self.assertRaisesRegex(ValueError, error_msg_required):
valid_actor.set_params(a=True)
class ActorsTest(ut.TestCase):
actors = espressomd.actors.Actors()
def tearDown(self):
self.actors.clear()
def test_clear(self):
# clearing the list of actors removes all of them
for actors_size in range(10):
for _ in range(actors_size):
actor = TestActor(a=False, c=False)
self.actors.add(actor)
self.assertEqual(len(self.actors), actors_size)
self.actors.clear()
self.assertEqual(len(self.actors), 0)
def test_deactivation(self):
actor = TestActor(a=False, c=False)
self.assertFalse(actor.is_active())
# adding an actor activates it
self.actors.add(actor)
self.assertTrue(actor.is_active())
# removing an actor deactivates it
self.actors.clear()
self.assertFalse(actor.is_active())
# re-adding an actor re-activates it
self.actors.add(actor)
self.assertTrue(actor.is_active())
# removing an actor deactivates it
del self.actors[0]
self.assertFalse(actor.is_active())
def test_unique(self):
# an actor can only be added once
actor = TestActor(a=False, c=False)
self.actors.add(actor)
with self.assertRaises(espressomd.highlander.ThereCanOnlyBeOne):
self.actors.add(actor)
# an actor can only be removed once
self.actors.remove(actor)
with self.assertRaises(Exception):
self.actors.remove(actor)
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/python/actor.py
|
Python
|
gpl-3.0
| 5,507
|
[
"ESPResSo"
] |
7d7b3faa70a324708a7ee1f4e104971cf2c557bce0c792d5579252d2f3c8e8e3
|
# (C) British Crown Copyright 2013 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.fileformats.netcdf.Saver` class."""
from __future__ import (absolute_import, division, print_function)
from six.moves import zip
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import mock
import netCDF4 as nc
import numpy as np
import iris
from iris.coord_systems import GeogCS, TransverseMercator, RotatedGeogCS
from iris.coords import DimCoord
from iris.cube import Cube
from iris.fileformats.netcdf import Saver
import iris.tests.stock as stock
class Test_write(tests.IrisTest):
def _transverse_mercator_cube(self, ellipsoid=None):
data = np.arange(12).reshape(3, 4)
cube = Cube(data, 'air_pressure_anomaly')
trans_merc = TransverseMercator(49.0, -2.0, -400000.0, 100000.0,
0.9996012717, ellipsoid)
coord = DimCoord(np.arange(3), 'projection_y_coordinate', units='m',
coord_system=trans_merc)
cube.add_dim_coord(coord, 0)
coord = DimCoord(np.arange(4), 'projection_x_coordinate', units='m',
coord_system=trans_merc)
cube.add_dim_coord(coord, 1)
return cube
def test_transverse_mercator(self):
# Create a Cube with a transverse Mercator coordinate system.
ellipsoid = GeogCS(6377563.396, 6356256.909)
cube = self._transverse_mercator_cube(ellipsoid)
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube)
self.assertCDL(nc_path)
def test_transverse_mercator_no_ellipsoid(self):
# Create a Cube with a transverse Mercator coordinate system.
cube = self._transverse_mercator_cube()
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube)
self.assertCDL(nc_path)
def _simple_cube(self, dtype):
data = np.arange(12, dtype=dtype).reshape(3, 4)
points = np.arange(3, dtype=dtype)
bounds = np.arange(6, dtype=dtype).reshape(3, 2)
cube = Cube(data, 'air_pressure_anomaly')
coord = DimCoord(points, bounds=bounds)
cube.add_dim_coord(coord, 0)
return cube
def test_little_endian(self):
# Create a Cube with little-endian data.
cube = self._simple_cube('<f4')
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube)
result_path = self.result_path('endian', 'cdl')
self.assertCDL(nc_path, result_path, flags='')
def test_big_endian(self):
# Create a Cube with big-endian data.
cube = self._simple_cube('>f4')
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube)
result_path = self.result_path('endian', 'cdl')
self.assertCDL(nc_path, result_path, flags='')
def test_zlib(self):
cube = self._simple_cube('>f4')
with mock.patch('iris.fileformats.netcdf.netCDF4') as api:
with Saver('/dummy/path', 'NETCDF4') as saver:
saver.write(cube, zlib=True)
dataset = api.Dataset.return_value
create_var_calls = mock.call.createVariable(
'air_pressure_anomaly', np.dtype('float32'), ['dim0', 'dim1'],
fill_value=None, shuffle=True, least_significant_digit=None,
contiguous=False, zlib=True, fletcher32=False,
endian='native', complevel=4, chunksizes=None).call_list()
dataset.assert_has_calls(create_var_calls)
def test_least_significant_digit(self):
cube = Cube(np.array([1.23, 4.56, 7.89]),
standard_name='surface_temperature', long_name=None,
var_name='temp', units='K')
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube, least_significant_digit=1)
cube_saved = iris.load_cube(nc_path)
self.assertEqual(
cube_saved.attributes['least_significant_digit'], 1)
self.assertFalse(np.all(cube.data == cube_saved.data))
self.assertArrayAllClose(cube.data, cube_saved.data, 0.1)
def test_default_unlimited_dimensions(self):
cube = self._simple_cube('>f4')
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube)
ds = nc.Dataset(nc_path)
self.assertTrue(ds.dimensions['dim0'].isunlimited())
self.assertFalse(ds.dimensions['dim1'].isunlimited())
ds.close()
def test_no_unlimited_dimensions(self):
cube = self._simple_cube('>f4')
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube, unlimited_dimensions=[])
ds = nc.Dataset(nc_path)
for dim in ds.dimensions.itervalues():
self.assertFalse(dim.isunlimited())
ds.close()
def test_invalid_unlimited_dimensions(self):
cube = self._simple_cube('>f4')
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
# should not raise an exception
saver.write(cube, unlimited_dimensions=['not_found'])
def test_custom_unlimited_dimensions(self):
cube = self._transverse_mercator_cube()
unlimited_dimensions = ['projection_y_coordinate',
'projection_x_coordinate']
# test coordinates by name
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube, unlimited_dimensions=unlimited_dimensions)
ds = nc.Dataset(nc_path)
for dim in unlimited_dimensions:
self.assertTrue(ds.dimensions[dim].isunlimited())
ds.close()
# test coordinate arguments
with self.temp_filename('.nc') as nc_path:
coords = [cube.coord(dim) for dim in unlimited_dimensions]
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube, unlimited_dimensions=coords)
ds = nc.Dataset(nc_path)
for dim in unlimited_dimensions:
self.assertTrue(ds.dimensions[dim].isunlimited())
ds.close()
def test_reserved_attributes(self):
cube = self._simple_cube('>f4')
cube.attributes['dimensions'] = 'something something_else'
with self.temp_filename('.nc') as nc_path:
with Saver(nc_path, 'NETCDF4') as saver:
saver.write(cube)
ds = nc.Dataset(nc_path)
res = ds.getncattr('dimensions')
ds.close()
self.assertEqual(res, 'something something_else')
class TestCoordSystems(tests.IrisTest):
def cube_with_cs(self, coord_system,
names=['grid_longitude', 'grid_latitude']):
cube = stock.lat_lon_cube()
x, y = cube.coord('longitude'), cube.coord('latitude')
x.coord_system = y.coord_system = coord_system
for coord, name in zip([x, y], names):
coord.rename(name)
return cube
def construct_cf_grid_mapping_variable(self, cube):
# Calls the actual NetCDF saver with appropriate mocking, returning
# the grid variable that gets created.
grid_variable = mock.Mock(name='NetCDFVariable')
create_var_fn = mock.Mock(side_effect=[grid_variable])
dataset = mock.Mock(variables=[],
createVariable=create_var_fn)
saver = mock.Mock(spec=Saver, _coord_systems=[],
_dataset=dataset)
variable = mock.Mock()
Saver._create_cf_grid_mapping(saver, cube, variable)
self.assertEqual(create_var_fn.call_count, 1)
self.assertEqual(variable.grid_mapping,
grid_variable.grid_mapping_name)
return grid_variable
def variable_attributes(self, mocked_variable):
"""Get the attributes dictionary from a mocked NetCDF variable."""
# Get the attributes defined on the mock object.
attributes = [name for name in sorted(mocked_variable.__dict__.keys())
if not name.startswith('_')]
attributes.remove('method_calls')
return {key: getattr(mocked_variable, key) for key in attributes}
def test_rotated_geog_cs(self):
coord_system = RotatedGeogCS(37.5, 177.5, ellipsoid=GeogCS(6371229.0))
cube = self.cube_with_cs(coord_system)
expected = {'grid_mapping_name': 'rotated_latitude_longitude',
'north_pole_grid_longitude': 0.0,
'grid_north_pole_longitude': 177.5,
'grid_north_pole_latitude': 37.5,
'longitude_of_prime_meridian': 0.0,
'earth_radius': 6371229.0,
}
grid_variable = self.construct_cf_grid_mapping_variable(cube)
actual = self.variable_attributes(grid_variable)
# To see obvious differences, check that they keys are the same.
self.assertEqual(sorted(actual.keys()), sorted(expected.keys()))
# Now check that the values are equivalent.
self.assertEqual(actual, expected)
def test_spherical_geog_cs(self):
coord_system = GeogCS(6371229.0)
cube = self.cube_with_cs(coord_system)
expected = {'grid_mapping_name': 'latitude_longitude',
'longitude_of_prime_meridian': 0.0,
'earth_radius': 6371229.0
}
grid_variable = self.construct_cf_grid_mapping_variable(cube)
actual = self.variable_attributes(grid_variable)
# To see obvious differences, check that they keys are the same.
self.assertEqual(sorted(actual.keys()), sorted(expected.keys()))
# Now check that the values are equivalent.
self.assertEqual(actual, expected)
def test_elliptic_geog_cs(self):
coord_system = GeogCS(637, 600)
cube = self.cube_with_cs(coord_system)
expected = {'grid_mapping_name': 'latitude_longitude',
'longitude_of_prime_meridian': 0.0,
'semi_minor_axis': 600.0,
'semi_major_axis': 637.0,
}
grid_variable = self.construct_cf_grid_mapping_variable(cube)
actual = self.variable_attributes(grid_variable)
# To see obvious differences, check that they keys are the same.
self.assertEqual(sorted(actual.keys()), sorted(expected.keys()))
# Now check that the values are equivalent.
self.assertEqual(actual, expected)
if __name__ == "__main__":
tests.main()
|
Jozhogg/iris
|
lib/iris/tests/unit/fileformats/netcdf/test_Saver.py
|
Python
|
lgpl-3.0
| 11,728
|
[
"NetCDF"
] |
c8933e8c52a463825c9b31c59c276d34d7b30f0e2169e7669f64d5090b9eaa78
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Normal (Gaussian) distribution class.
@@Gaussian
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util # pylint: disable=line-too-long
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# TODO(ebrevdo): Use asserts contrib module when ready
def _assert_all_positive(x):
return logging_ops.Assert(
math_ops.reduce_all(x > 0),
["Tensor %s should contain only positive values: " % x.name, x])
class Gaussian(object):
"""The scalar Gaussian distribution with mean and stddev parameters mu, sigma.
The PDF of this distribution is:
```f(x) = sqrt(1/(2*pi*sigma^2)) exp(-(x-mu)^2/(2*sigma^2))```
"""
def __init__(self, mu, sigma, name=None):
"""Construct Gaussian distributions with mean and stddev `mu` and `sigma`.
The parameters `mu` and `sigma` must be shaped in a way that supports
broadcasting (e.g. `mu + sigma` is a valid operation).
Args:
mu: `float` or `double` tensor, the means of the distribution(s).
sigma: `float` or `double` tensor, the stddevs of the distribution(s).
sigma must contain only positive values.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if mu and sigma are different dtypes.
"""
with ops.op_scope([mu, sigma], name, "Gaussian"):
mu = ops.convert_to_tensor(mu)
sigma = ops.convert_to_tensor(sigma)
with ops.control_dependencies([_assert_all_positive(sigma)]):
self._mu = mu
self._sigma = sigma
contrib_tensor_util.assert_same_float_dtype((mu, sigma))
@property
def dtype(self):
return self._mu.dtype
@property
def mu(self):
return self._mu
@property
def sigma(self):
return self._sigma
@property
def mean(self):
return self._mu * array_ops.ones_like(self._sigma)
def log_pdf(self, x, name=None):
"""Log pdf of observations in `x` under these Gaussian distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
log_pdf: tensor of dtype `dtype`, the log-PDFs of `x`.
"""
with ops.op_scope([self._mu, self._sigma, x], name, "GaussianLogPdf"):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
log_2_pi = constant_op.constant(math.log(2 * math.pi), dtype=self.dtype)
return (-0.5*log_2_pi - math_ops.log(self._sigma)
-0.5*math_ops.square((x - self._mu) / self._sigma))
def cdf(self, x, name=None):
"""CDF of observations in `x` under these Gaussian distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
cdf: tensor of dtype `dtype`, the CDFs of `x`.
"""
with ops.op_scope([self._mu, self._sigma, x], name, "GaussianCdf"):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
return (0.5 + 0.5*math_ops.erf(
1.0/(math.sqrt(2.0) * self._sigma)*(x - self._mu)))
def log_cdf(self, x, name=None):
"""Log CDF of observations `x` under these Gaussian distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
log_cdf: tensor of dtype `dtype`, the log-CDFs of `x`.
"""
with ops.op_scope([self._mu, self._sigma, x], name, "GaussianLogCdf"):
return math_ops.log(self.cdf(x))
def pdf(self, x, name=None):
"""The PDF of observations in `x` under these Gaussian distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
pdf: tensor of dtype `dtype`, the pdf values of `x`.
"""
with ops.op_scope([self._mu, self._sigma, x], name, "GaussianPdf"):
return math_ops.exp(self.log_pdf(x))
def entropy(self, name=None):
"""The entropy of Gaussian distribution(s).
Args:
name: The name to give this op.
Returns:
entropy: tensor of dtype `dtype`, the entropy.
"""
with ops.op_scope([self._mu, self._sigma], name, "GaussianEntropy"):
two_pi_e1 = constant_op.constant(
2 * math.pi * math.exp(1), dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast sigma.
sigma = self._sigma * array_ops.ones_like(self._mu)
return 0.5 * math_ops.log(two_pi_e1 * math_ops.square(sigma))
def sample(self, n, seed=None, name=None):
"""Sample `n` observations from the Gaussian Distributions.
Args:
n: `Scalar`, type int32, the number of observations to sample.
seed: Python integer, the random seed.
name: The name to give this op.
Returns:
samples: `[n, ...]`, a `Tensor` of `n` samples for each
of the distributions determined by broadcasting the hyperparameters.
"""
with ops.op_scope([self._mu, self._sigma, n], name, "GaussianSample"):
broadcast_shape = (self._mu + self._sigma).get_shape()
n = ops.convert_to_tensor(n)
shape = array_ops.concat(
0, [array_ops.pack([n]), array_ops.shape(self.mean)])
sampled = random_ops.random_normal(
shape=shape, mean=0, stddev=1, dtype=self._mu.dtype, seed=seed)
# Provide some hints to shape inference
n_val = tensor_util.constant_value(n)
final_shape = tensor_shape.vector(n_val).concatenate(broadcast_shape)
sampled.set_shape(final_shape)
return sampled * self._sigma + self._mu
|
shishaochen/TensorFlow-0.8-Win
|
tensorflow/contrib/distributions/python/ops/gaussian.py
|
Python
|
apache-2.0
| 6,920
|
[
"Gaussian"
] |
76020ea999cd54d1b51d8a14c1c7841e7078a300f5755ef5af7d9217651f38a5
|
"""
Gaussian Model of functional data
moduleauthor:: Derek Tucker <dtucker@stat.fsu.edu>
"""
import numpy as np
import fdasrsf.utility_functions as uf
import collections
def gauss_model(fn, time, qn, gam, n=1, sort_samples=False):
"""
This function models the functional data using a Gaussian model
extracted from the principal components of the srvfs
:param fn: numpy ndarray of shape (M,N) of N aligned functions with
M samples
:param time: vector of size M describing the sample points
:param qn: numpy ndarray of shape (M,N) of N aligned srvfs with M samples
:param gam: warping functions
:param n: number of random samples
:param sort_samples: sort samples (default = T)
:type n: integer
:type sort_samples: bool
:type fn: np.ndarray
:type qn: np.ndarray
:type gam: np.ndarray
:type time: np.ndarray
:rtype: tuple of numpy array
:return fs: random aligned samples
:return gams: random warping functions
:return ft: random samples
"""
# Parameters
eps = np.finfo(np.double).eps
binsize = np.diff(time)
binsize = binsize.mean()
M = time.size
# compute mean and covariance in q-domain
mq_new = qn.mean(axis=1)
mididx = np.round(time.shape[0] / 2)
m_new = np.sign(fn[mididx, :]) * np.sqrt(np.abs(fn[mididx, :]))
mqn = np.append(mq_new, m_new.mean())
qn2 = np.vstack((qn, m_new))
C = np.cov(qn2)
q_s = np.random.multivariate_normal(mqn, C, n)
q_s = q_s.transpose()
# compute the correspondence to the original function domain
fs = np.zeros((M, n))
for k in range(0, n):
fs[:, k] = uf.cumtrapzmid(time, q_s[0:M, k] * np.abs(q_s[0:M, k]),
np.sign(q_s[M, k]) * (q_s[M, k] ** 2))
# random warping generation
rgam = uf.randomGamma(gam, n)
gams = np.zeros((M, n))
for k in range(0, n):
gams[:, k] = uf.invertGamma(rgam[:, k])
# sort functions and warping
if sort_samples:
mx = fs.max(axis=0)
seq1 = mx.argsort()
# compute the psi-function
fy = np.gradient(rgam, binsize)
psi = fy / np.sqrt(abs(fy) + eps)
ip = np.zeros(n)
len = np.zeros(n)
for i in range(0, n):
tmp = np.ones(M)
ip[i] = tmp.dot(psi[:, i] / M)
len[i] = np.acos(tmp.dot(psi[:, i] / M))
seq2 = len.argsort()
# combine x-variability and y-variability
ft = np.zeros((M, n))
for k in range(0, n):
ft[:, k] = np.interp(gams[:, seq2[k]], np.arange(0, M) /
np.double(M - 1), fs[:, seq1[k]])
tmp = np.isnan(ft[:, k])
while tmp.any():
rgam2 = uf.randomGamma(gam, 1)
ft[:, k] = np.interp(gams[:, seq2[k]], np.arange(0, M) /
np.double(M - 1), uf.invertGamma(rgam2))
else:
# combine x-variability and y-variability
ft = np.zeros((M, n))
for k in range(0, n):
ft[:, k] = np.interp(gams[:, k], np.arange(0, M) /
np.double(M - 1), fs[:, k])
tmp = np.isnan(ft[:, k])
while tmp.any():
rgam2 = uf.randomGamma(gam, 1)
ft[:, k] = np.interp(gams[:, k], np.arange(0, M) /
np.double(M - 1), uf.invertGamma(rgam2))
samples = collections.namedtuple('samples', ['fs', 'gams', 'ft'])
out = samples(fs, rgam, ft)
return out
|
glemaitre/fdasrsf
|
fdasrsf/gauss_model.py
|
Python
|
gpl-3.0
| 3,546
|
[
"Gaussian"
] |
75c8854073c5a74070033cbf9b314c7dbdf850ba1e05717994729ed603380073
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# TAMkin is a post-processing toolkit for normal mode analysis, thermochemistry
# and reaction kinetics.
# Copyright (C) 2008-2012 Toon Verstraelen <Toon.Verstraelen@UGent.be>, An Ghysels
# <An.Ghysels@UGent.be> and Matthias Vandichel <Matthias.Vandichel@UGent.be>
# Center for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all
# rights reserved unless otherwise stated.
#
# This file is part of TAMkin.
#
# TAMkin is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "TAMkin: A Versatile Package for Vibrational Analysis and Chemical Kinetics",
# An Ghysels, Toon Verstraelen, Karen Hemelsoet, Michel Waroquier and Veronique
# Van Speybroeck, Journal of Chemical Information and Modeling, 2010, 50,
# 1736-1750W
# http://dx.doi.org/10.1021/ci100099g
#
# TAMkin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
# this tiny program cuts away all unused lines from a gaussian log file of
# a torsional scan computation. All torsional scan files in TAMkin are reduced
# in size with this program.
from __future__ import print_function
import sys
active = 3
for line in sys.stdin:
line = line[:-1]
if line == " Input orientation: ":
active = 3
if active > 0 or line.startswith(" SCF Done:") or line.startswith(" -- Stationary point found."):
print(line)
if line == " ---------------------------------------------------------------------":
active -= 1
|
molmod/tamkin
|
tamkin/examples/009_ethyl_ethene/cutter.py
|
Python
|
gpl-3.0
| 2,220
|
[
"Gaussian"
] |
6b0547af82aa8a4616ea6bcf4edab70a2015b8e5ccad89db4ac25249a89fc4a0
|
#
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2019 Jonathan Dent, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import bpy
from ..utils import util
class ASRENDER_PT_base(object):
bl_context = "render"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
@classmethod
def poll(cls, context):
renderer = context.scene.render
return renderer.engine == 'APPLESEED_RENDER'
class ASRENDER_PT_export(bpy.types.Panel, ASRENDER_PT_base):
bl_label = "Rendering Mode"
COMPAT_ENGINES = {'APPLESEED_RENDER'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
asr_scene_props = scene.appleseed
layout.prop(asr_scene_props, "scene_export_mode", text="")
# layout.operator("appleseed.export_scene", text="Export")
if asr_scene_props.scene_export_mode == 'export_only':
layout.prop(asr_scene_props, "export_path", text="Export Path")
layout.prop(asr_scene_props, "export_selected", text="Only Export Selected Objects")
class ASRENDER_PT_settings(bpy.types.Panel, ASRENDER_PT_base):
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_label = "System"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
asr_scene_props = scene.appleseed
col = layout.column(align=True)
row = col.row(align=True)
row.enabled = not asr_scene_props.threads_auto
row.prop(asr_scene_props, "threads", text="Threads")
col.prop(asr_scene_props, "threads_auto", text="Auto Threads")
layout.separator()
col = layout.column(align=True)
col.prop(asr_scene_props, "noise_seed", text="Noise Seed")
col.prop(asr_scene_props, "per_frame_noise", text="Vary per Frame")
layout.separator()
layout.prop(asr_scene_props, "log_level", text="Render Log")
layout.separator()
layout.prop(asr_scene_props, "tex_cache", text="Tex Cache")
# Here be dragons
box = layout.box()
box.label(text="Experimental Features")
box.prop(asr_scene_props, "use_embree", text="Use Embree")
class ASRENDER_PT_shading_override(bpy.types.Panel, ASRENDER_PT_base):
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_label = "Shading Override"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
asr_scene_props = scene.appleseed
col = layout.column(align=True)
col.prop(asr_scene_props, "shading_override", text="Override Shading")
row = col.row(align=True)
row.enabled = asr_scene_props.shading_override
row.prop(asr_scene_props, "override_mode", text="Mode")
class ASRENDER_PT_denoise(bpy.types.Panel, ASRENDER_PT_base):
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_label = "Denoising"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
asr_scene_props = context.scene.appleseed
layout.prop(asr_scene_props, "denoise_mode", text="Mode")
if asr_scene_props.denoise_mode == 'write_outputs':
layout.prop(asr_scene_props, "denoise_output_dir", text="Output Directory")
col = layout.column(align=True)
col.active = asr_scene_props.denoise_mode != 'off'
col.prop(asr_scene_props, "spike_threshold", text="Spike Threshold")
col.prop(asr_scene_props, "patch_distance_threshold", text="Patch Distance")
col.prop(asr_scene_props, "denoise_scales", text="Denoise Scales")
col.prop(asr_scene_props, "random_pixel_order", text="Random Pixe Order")
col.prop(asr_scene_props, "skip_denoised", text="Skip Denoised Pixels")
col.prop(asr_scene_props, "prefilter_spikes", text="Prefilter Spikes")
col.prop(asr_scene_props, "mark_invalid_pixels", text="Mark Invalid Pixels")
class ASRENDER_PT_sampling(bpy.types.Panel, ASRENDER_PT_base):
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_label = "Image Sampling"
def draw(self, context):
pass
class ASRENDER_PT_sampling_sampler(bpy.types.Panel, ASRENDER_PT_base):
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_label = "Sampler"
bl_parent_id = "ASRENDER_PT_sampling"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
asr_scene_props = scene.appleseed
layout.prop(asr_scene_props, "pixel_sampler", text="Sampler")
col = layout.column(align=True)
col.prop(asr_scene_props, "renderer_passes", text="Passes")
if asr_scene_props.pixel_sampler == 'adaptive':
col = layout.column(align=True)
col.prop(asr_scene_props, "adaptive_batch_size", text="Batch Size")
col.prop(asr_scene_props, "adaptive_max_samples", text="Max Samples")
col = layout.column(align=True)
col.prop(asr_scene_props, "adaptive_noise_threshold", text="Noise Threshold")
col.prop(asr_scene_props, "adaptive_min_samples", text="Min Samples")
elif asr_scene_props.pixel_sampler == 'uniform':
col.prop(asr_scene_props, "samples", text="Samples")
else:
col = layout.column(align=True)
col.prop(asr_scene_props, "texture_sampler_filepath", text="Texture Path")
col = layout.column(align=True)
col.prop(asr_scene_props, "adaptive_min_samples", text="Min Samples")
col.prop(asr_scene_props, "adaptive_max_samples", text="Max Samples")
class ASRENDER_PT_sampling_interactive(bpy.types.Panel, ASRENDER_PT_base):
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_label = "Interactive Rendering"
bl_parent_id = "ASRENDER_PT_sampling"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
asr_scene_props = scene.appleseed
col = layout.column(align=True)
col.prop(asr_scene_props, "interactive_max_fps", text="FPS")
col.prop(asr_scene_props, "interactive_max_samples", text="Max Samples")
col.prop(asr_scene_props, "interactive_max_time", text="Max Time in Seconds")
class ASRENDER_PT_sampling_filter(bpy.types.Panel, ASRENDER_PT_base):
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_label = "Filter"
bl_parent_id = "ASRENDER_PT_sampling"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
asr_scene_props = scene.appleseed
col = layout.column(align=True)
col.prop(asr_scene_props, "tile_ordering", text="Tile Order")
col.prop(asr_scene_props, "tile_size", text="Size")
layout.separator()
col = layout.column(align=True)
col.prop(asr_scene_props, "pixel_filter", text="Pixel Filter")
col.prop(asr_scene_props, "pixel_filter_size", text="Size")
class ASRENDER_PT_lighting(bpy.types.Panel, ASRENDER_PT_base):
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_label = "Lighting"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
asr_scene_props = scene.appleseed
layout.prop(asr_scene_props, "lighting_engine", text="Engine")
class ASRENDER_PT_lighting_pt(bpy.types.Panel, ASRENDER_PT_base):
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_label = "Path Tracing"
bl_parent_id = "ASRENDER_PT_lighting"
@classmethod
def poll(cls, context):
renderer = context.scene.render
return renderer.engine == 'APPLESEED_RENDER' and context.scene.appleseed.lighting_engine == 'pt'
def draw(self, context):
layout = self.layout
scene = context.scene
asr_scene_props = scene.appleseed
layout.use_property_split = True
col = layout.column()
col.prop(asr_scene_props, "record_light_paths", text="Record Light Paths")
layout.separator()
col = layout.column(align=True)
col.prop(asr_scene_props, "enable_dl", text="Directly Sample Lights")
col = col.column(align=True)
col.enabled = asr_scene_props.enable_dl
col.prop(asr_scene_props, "enable_light_importance_sampling", text="Light Importance Sampling")
col.prop(asr_scene_props, "dl_light_samples", text="Samples")
col.prop(asr_scene_props, "dl_low_light_threshold", text="Low Light Threshold")
layout.separator()
col = layout.column(align=True)
col.enabled = asr_scene_props.next_event_estimation
col.prop(asr_scene_props, "enable_ibl", text="Environment Emits Light")
col = col.column(align=True)
col.enabled = asr_scene_props.enable_ibl
col.prop(asr_scene_props, "ibl_env_samples", text="Samples")
layout.separator()
col = layout.column()
col.prop(asr_scene_props, "enable_caustics", text="Caustics")
col.prop(asr_scene_props, "enable_clamp_roughness", text="Clamp Roughness")
col = layout.column(align=True)
col.prop(asr_scene_props, "max_ray_intensity_unlimited", text="Max Ray Intensity Unlimited")
col = col.column(align=True)
col.enabled = not asr_scene_props.max_ray_intensity_unlimited
col.prop(asr_scene_props, "max_ray_intensity", text="Max Ray Intensity")
class ASRENDER_PT_lighting_bounces(bpy.types.Panel, ASRENDER_PT_base):
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_label = "Bounces"
bl_parent_id = "ASRENDER_PT_lighting_pt"
def draw(self, context):
layout = self.layout
scene = context.scene
asr_scene_props = scene.appleseed
layout.use_property_split = True
col = layout.column(align=True)
col.prop(asr_scene_props, "max_bounces_unlimited", text="Max Bounce Unlimited")
col.prop(asr_scene_props, "max_diffuse_bounces_unlimited", text="Diffuse Bounce Unlimited")
col.prop(asr_scene_props, "max_glossy_brdf_bounces_unlimited", text="Glossy Bounce Unlimited")
col.prop(asr_scene_props, "max_specular_bounces_unlimited", text="Specular Bounce Unlimited")
col.prop(asr_scene_props, "max_volume_bounces_unlimited", text="Volume Bounce Unlimited")
col = layout.column(align=True)
col.enabled = not asr_scene_props.max_bounces_unlimited
col.prop(asr_scene_props, "max_bounces", text="Max Bounces")
col = layout.column(align=True)
col.enabled = not asr_scene_props.max_diffuse_bounces_unlimited
col.prop(asr_scene_props, "max_diffuse_bounces", text="Diffuse Bounces")
col = layout.column(align=True)
col.enabled = not asr_scene_props.max_glossy_brdf_bounces_unlimited
col.prop(asr_scene_props, "max_glossy_brdf_bounces", text="Glossy Bounces")
col = layout.column(align=True)
col.enabled = not asr_scene_props.max_specular_bounces_unlimited
col.prop(asr_scene_props, "max_specular_bounces", text="Specular Bounces")
col = layout.column(align=True)
col.enabled = not asr_scene_props.max_volume_bounces_unlimited
col.prop(asr_scene_props, "max_volume_bounces", text="Volume Bounces")
class ASRENDER_PT_lighting_sppm(bpy.types.Panel, ASRENDER_PT_base):
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_label = "SPPM"
bl_parent_id = "ASRENDER_PT_lighting"
@classmethod
def poll(cls, context):
renderer = context.scene.render
return renderer.engine == 'APPLESEED_RENDER' and context.scene.appleseed.lighting_engine == 'sppm'
def draw(self, context):
layout = self.layout
scene = context.scene
asr_scene_props = scene.appleseed
layout.use_property_split = True
layout.prop(asr_scene_props, "sppm_dl_mode", text="Direct Lighting")
col = layout.column(align=True)
col.enabled = asr_scene_props.next_event_estimation
col.prop(asr_scene_props, "enable_ibl", text="Environment Emits Light")
col = col.column(align=True)
col.enabled = asr_scene_props.enable_ibl
col.prop(asr_scene_props, "ibl_env_samples", text="Samples")
layout.prop(asr_scene_props, "enable_caustics", text="Caustics")
class ASRENDER_PT_lighting_sppm_tracing(bpy.types.Panel, ASRENDER_PT_base):
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_label = "Photon Tracing"
bl_parent_id = "ASRENDER_PT_lighting_sppm"
def draw(self, context):
layout = self.layout
scene = context.scene
asr_scene_props = scene.appleseed
layout.use_property_split = True
col = layout.column(align=True)
row = col.row()
row.prop(asr_scene_props, "sppm_enable_importons", text="Enable Importons")
row = col.row()
row.enabled = asr_scene_props.sppm_enable_importons
row.prop(asr_scene_props, "sppm_importon_lookup_radius", text="Importon Lookup Radius")
col = layout.column(align=True)
col.prop(asr_scene_props, "sppm_photon_max_length", text="Max Bounces")
col.prop(asr_scene_props, "sppm_photon_rr_start", text="Russian Roulette Start Bounce")
col.prop(asr_scene_props, "sppm_light_photons", text="Light Photons")
col.prop(asr_scene_props, "sppm_env_photons", text="Environment Photons")
class ASRENDER_PT_lighting_sppm_radiance(bpy.types.Panel, ASRENDER_PT_base):
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_label = "Radiance Estimation"
bl_parent_id = "ASRENDER_PT_lighting_sppm"
def draw(self, context):
layout = self.layout
scene = context.scene
asr_scene_props = scene.appleseed
layout.use_property_split = True
col = layout.column()
col.prop(asr_scene_props, "sppm_pt_max_ray_intensity_unlimited", text="Max Ray Intensity Unlimited")
row = col.row()
row.enabled = not asr_scene_props.sppm_pt_max_ray_intensity_unlimited
row.prop(asr_scene_props, "sppm_pt_max_ray_intensity", text="Max Ray Intensity")
col = layout.column(align=True)
col.prop(asr_scene_props, "sppm_pt_max_length", text="Max Bounces")
col.prop(asr_scene_props, "sppm_pt_rr_start", text="Russian Roulette Start Bounce")
col.prop(asr_scene_props, "sppm_initial_radius", text="Initial Radius")
col.prop(asr_scene_props, "sppm_max_per_estimate", text="Max Photons")
col.prop(asr_scene_props, "sppm_alpha", text="Alpha")
class ASRENDER_PT_lighting_advanced(bpy.types.Panel, ASRENDER_PT_base):
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_label = "Advanced"
bl_parent_id = "ASRENDER_PT_lighting"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
asr_scene_props = scene.appleseed
layout.prop(asr_scene_props, "rr_start", text="Russian Roulette Start Bounce")
col = layout.column(align=True)
col.prop(asr_scene_props, "volume_distance_samples", text="Volume Distance Samples")
col.prop(asr_scene_props, "optimize_for_lights_outside_volumes", text="Optimize for Lights Outside Volumes")
class ASRENDER_UL_post_processing(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
stage = item.name
if 'DEFAULT' in self.layout_type:
layout.label(text=stage, translate=False, icon_value=icon)
class ASRENDER_PT_post_process_stages(bpy.types.Panel, ASRENDER_PT_base):
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_label = "appleseed Post Process"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
asr_scene_props = context.scene.appleseed
pp_stages = asr_scene_props.post_processing_stages
row = layout.row()
row.template_list("ASRENDER_UL_post_processing", "", asr_scene_props,
"post_processing_stages", asr_scene_props, "post_processing_stages_index",
rows=1, maxrows=16, type="DEFAULT")
row = layout.row(align=True)
row.operator("appleseed.add_pp_stage", text="Add Stage", icon="ADD")
row.operator("appleseed.remove_pp_stage", text="Remove Stage", icon="REMOVE")
if pp_stages:
current_stage = pp_stages[asr_scene_props.post_processing_stages_index]
layout.prop(current_stage, "model", text="Model")
if current_stage.model == 'render_stamp_post_processing_stage':
layout.prop(current_stage, "render_stamp", text="Stamp")
layout.prop(current_stage, "render_stamp_patterns", text="Add Stamp")
else:
layout.prop(current_stage, "color_map", text="Mode")
row = layout.row()
row.enabled = current_stage.color_map == 'custom'
row.prop(current_stage, "color_map_file_path", text="Custom Map")
col = layout.column(align=True)
col.prop(current_stage, "add_legend_bar", text="Add Legends Bar")
row = col.row(align=True)
row.enabled = current_stage.add_legend_bar
row.prop(current_stage, "legend_bar_ticks", text="Ticks")
col = layout.column(align=True)
col.prop(current_stage, "auto_range", text="Auto Range")
row = col.row(align=True)
row.enabled = not current_stage.auto_range
row.prop(current_stage, "range_min", text="Min Range")
row = col.row(align=True)
row.enabled = not current_stage.auto_range
row.prop(current_stage, "range_max", text="Max Range")
col = layout.column(align=True)
col.prop(current_stage, "render_isolines", text="Render Isolines")
row = col.row(align=True)
row.enabled = current_stage.render_isolines
row.prop(current_stage, "line_thickness", text="Line Thickness")
class ASRENDER_PT_motion_blur(bpy.types.Panel, ASRENDER_PT_base):
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_label = "Motion Blur"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
asr_scene_props = context.scene.appleseed
col = layout.column(align=True)
col.prop(asr_scene_props, "shutter_open", text="Shutter Open Begin")
col.prop(asr_scene_props, "shutter_open_end_time", text="Shutter Open End")
col = layout.column(align=True)
col.prop(asr_scene_props, "shutter_close_begin_time", text="Shutter Close Begin")
col.prop(asr_scene_props, "shutter_close", text="Shutter Close End")
col = layout.column(align=True)
col.prop(asr_scene_props, "enable_camera_blur", text="Camera Blur")
col.prop(asr_scene_props, "camera_blur_samples", text="Samples")
col = layout.column(align=True)
col.prop(asr_scene_props, "enable_object_blur", text="Object Blur")
col.prop(asr_scene_props, "object_blur_samples", text="Samples")
col = layout.column(align=True)
col.prop(asr_scene_props, "enable_deformation_blur", text="Deformation Blur")
col.prop(asr_scene_props, "deformation_blur_samples", text="Samples")
class ASRENDER_PT_b_post_processing(bpy.types.Panel, ASRENDER_PT_base):
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_label = "Blender Post Processing"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
rd = context.scene.render
col = layout.column()
col.prop(rd, "use_compositing")
col.prop(rd, "use_sequencer")
classes = (
ASRENDER_PT_export,
ASRENDER_PT_settings,
ASRENDER_PT_shading_override,
ASRENDER_PT_denoise,
ASRENDER_PT_sampling,
ASRENDER_PT_sampling_sampler,
ASRENDER_PT_sampling_interactive,
ASRENDER_PT_sampling_filter,
ASRENDER_PT_lighting,
ASRENDER_PT_lighting_pt,
ASRENDER_PT_lighting_bounces,
ASRENDER_PT_lighting_sppm,
ASRENDER_PT_lighting_sppm_tracing,
ASRENDER_PT_lighting_sppm_radiance,
ASRENDER_PT_lighting_advanced,
ASRENDER_UL_post_processing,
ASRENDER_PT_post_process_stages,
ASRENDER_PT_motion_blur,
ASRENDER_PT_b_post_processing
)
def register():
for cls in classes:
util.safe_register_class(cls)
def unregister():
for cls in reversed(classes):
util.safe_unregister_class(cls)
|
dictoon/blenderseed
|
ui/render.py
|
Python
|
mit
| 21,990
|
[
"VisIt"
] |
cf6464ab26355b77d2247c2e4ba8721472e32dc072703e0cb237287efb3093a0
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""KD tree data structure for searching N-dimensional vectors.
The KD tree data structure can be used for all kinds of searches that
involve N-dimensional vectors. For example, neighbor searches (find all points
within a radius of a given point) or finding all point pairs in a set
that are within a certain radius of each other. See "Computational Geometry:
Algorithms and Applications" (Mark de Berg, Marc van Kreveld, Mark Overmars,
Otfried Schwarzkopf).
"""
from .KDTree import KDTree
__docformat__ = "restructuredtext en"
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/KDTree/__init__.py
|
Python
|
gpl-2.0
| 701
|
[
"Biopython"
] |
4908d96328e005c4c2e408d9e5241c4f0956e72efb808d083bf69a9a86d732ca
|
#!/usr/bin/python
"""
Copyright 2012 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import Cookie
import dbSession
import dbShared
import cgi
import MySQLdb
#
form = cgi.FieldStorage()
galaxy = form.getfirst('galaxy', '')
statID = form.getfirst('statID', '')
# escape input to prevent sql injection
galaxy = dbShared.dbInsertSafe(galaxy)
# Main program
rowCount = 0
print 'Content-type: text/html\r\n\r\n'
conn = dbShared.ghConn()
cursor = conn.cursor()
if (cursor):
if statID == 'currentSpawns':
sqlStr = 'SELECT Count(spawnID) FROM tResources WHERE galaxy=' + galaxy + ' AND unavailable IS NULL;'
elif statID == 'totalSpawns':
sqlStr = 'SELECT Count(spawnID) FROM tResources WHERE galaxy=' + galaxy + ';'
elif statID == 'currentWaypoints':
sqlStr = 'SELECT Count(*) FROM tWaypoint INNER JOIN tResources ON tWaypoint.spawnID = tResources.spawnID WHERE galaxy=' + galaxy + ' AND tWaypoint.unavailable IS NULL AND tResources.unavailable IS NULL;'
else:
sqlStr = 'SELECT \'Unknown statID\';'
cursor.execute(sqlStr)
row = cursor.fetchone()
if (row != None):
print str(row[0])
cursor.close()
conn.close()
|
clreinki/GalaxyHarvester
|
ghStats.py
|
Python
|
agpl-3.0
| 1,848
|
[
"Galaxy"
] |
246fa680bfbb378556ba9eabf9a3f7c5cc2efc391e68357d00f6836796e16c64
|
# -*- coding: utf-8 -*-
from itertools import chain
import pytest
from ..util.testing import requires
from ..util.parsing import parsing_library
from ..units import default_units, units_library, allclose
from ..chemistry import Substance, Reaction
from ..reactionsystem import ReactionSystem
@requires(parsing_library, "numpy")
def test_ReactionSystem():
import numpy as np
kw = dict(substance_factory=Substance.from_formula)
r1 = Reaction.from_string("H2O -> H+ + OH-", "H2O H+ OH-", name="r1")
rs = ReactionSystem([r1], "H2O H+ OH-", **kw)
r2 = Reaction.from_string("H2O -> 2 H+ + OH-", "H2O H+ OH-", name="r2")
with pytest.raises(ValueError):
ReactionSystem([r2], "H2O H+ OH-", **kw)
with pytest.raises(ValueError):
ReactionSystem([r1, r1], "H2O H+ OH-", **kw)
assert rs.as_substance_index("H2O") == 0
assert rs.as_substance_index(0) == 0
varied, varied_keys = rs.per_substance_varied(
{"H2O": 55.4, "H+": 1e-7, "OH-": 1e-7},
{"H+": [1e-8, 1e-9, 1e-10, 1e-11], "OH-": [1e-3, 1e-2]},
)
assert varied_keys == ("H+", "OH-")
assert len(varied.shape) == 3
assert varied.shape[:-1] == (4, 2)
assert varied.shape[-1] == 3
assert np.all(varied[..., 0] == 55.4)
assert np.all(varied[:, 1, 2] == 1e-2)
assert rs["r1"] is r1
rs.rxns.append(r2)
assert rs["r2"] is r2
with pytest.raises(KeyError):
rs["r3"]
rs.rxns.append(Reaction({}, {}, 0, name="r2", checks=()))
with pytest.raises(ValueError):
rs["r2"]
empty_rs = ReactionSystem([])
rs2 = empty_rs + rs
assert rs2 == rs
rs3 = rs + empty_rs
assert rs3 == rs
@requires(parsing_library)
def test_ReactionSystem__missing_substances_from_keys():
r1 = Reaction({"H2O"}, {"H+", "OH-"})
with pytest.raises(ValueError):
ReactionSystem([r1], substances={"H2O": Substance.from_formula("H2O")})
kw = dict(
missing_substances_from_keys=True, substance_factory=Substance.from_formula
)
rs = ReactionSystem([r1], substances={"H2O": Substance.from_formula("H2O")}, **kw)
assert rs.substances["OH-"].composition == {0: -1, 1: 1, 8: 1}
@requires(parsing_library)
def test_ReactionSystem__check_balance():
rs1 = ReactionSystem.from_string(
"\n".join(["2 NH3 -> N2 + 3 H2", "N2H4 -> N2 + 2 H2"])
)
assert rs1.check_balance(strict=True)
rs2 = ReactionSystem.from_string(
"\n".join(["2 A -> B", "B -> 2A"]), substance_factory=Substance
)
assert not rs2.check_balance(strict=True)
assert rs2.composition_balance_vectors() == ([], [])
def test_ReactionSystem__per_reaction_effect_on_substance():
rs = ReactionSystem([Reaction({"H2": 2, "O2": 1}, {"H2O": 2})])
assert rs.per_reaction_effect_on_substance("H2") == {0: -2}
assert rs.per_reaction_effect_on_substance("O2") == {0: -1}
assert rs.per_reaction_effect_on_substance("H2O") == {0: 2}
def test_ReactionSystem__rates():
rs = ReactionSystem([Reaction({"H2O"}, {"H+", "OH-"}, 11)])
assert rs.rates({"H2O": 3, "H+": 5, "OH-": 7}) == {
"H2O": -11 * 3,
"H+": 11 * 3,
"OH-": 11 * 3,
}
def test_ReactionSystem__rates__cstr():
k = 11
rs = ReactionSystem([Reaction({"H2O2": 2}, {"O2": 1, "H2O": 2}, k)])
c0 = {"H2O2": 3, "O2": 5, "H2O": 53}
fr = 7
fc = {"H2O2": 13, "O2": 17, "H2O": 23}
r = k * c0["H2O2"] ** 2
ref = {
"H2O2": -2 * r + fr * fc["H2O2"] - fr * c0["H2O2"],
"O2": r + fr * fc["O2"] - fr * c0["O2"],
"H2O": 2 * r + fr * fc["H2O"] - fr * c0["H2O"],
}
variables = dict(
chain(c0.items(), [("fc_" + key, val) for key, val in fc.items()], [("fr", fr)])
)
assert (
rs.rates(variables, cstr_fr_fc=("fr", {sk: "fc_" + sk for sk in rs.substances}))
== ref
)
@requires("numpy")
def test_ReactionSystem__html_tables():
r1 = Reaction({"A": 2}, {"A"}, name="R1")
r2 = Reaction({"A"}, {"A": 2}, name="R2")
rs = ReactionSystem([r1, r2])
ut, unc = rs.unimolecular_html_table()
assert unc == {0}
from chempy.printing import html
assert (
html(ut, with_name=False)
== u'<table><tr><td>A</td><td ><a title="1: A → 2 A">R2</a></td></tr></table>'
)
bt, bnc = rs.bimolecular_html_table()
assert bnc == {1}
assert html(bt, with_name=False) == (
u'<table><th></th><th>A</th>\n<tr><td>A</td><td ><a title="0: 2 A → A">R1</a></td></tr></table>'
)
@requires(parsing_library, "numpy")
def test_ReactionSystem__substance_factory():
r1 = Reaction.from_string("H2O -> H+ + OH-", "H2O H+ OH-")
rs = ReactionSystem([r1], "H2O H+ OH-", substance_factory=Substance.from_formula)
assert rs.net_stoichs(["H2O"]) == [-1]
assert rs.net_stoichs(["H+"]) == [1]
assert rs.net_stoichs(["OH-"]) == [1]
assert rs.substances["H2O"].composition[8] == 1
assert rs.substances["OH-"].composition[0] == -1
assert rs.substances["H+"].charge == 1
@requires(units_library)
def test_ReactionSystem__as_per_substance_array_dict():
mol = default_units.mol
m = default_units.metre
M = default_units.molar
rs = ReactionSystem([], [Substance("H2O")])
c = rs.as_per_substance_array({"H2O": 1 * M}, unit=M)
assert c.dimensionality == M.dimensionality
assert abs(c[0] / (1000 * mol / m ** 3) - 1) < 1e-16
c = rs.as_per_substance_array({"H2O": 1})
with pytest.raises(KeyError):
c = rs.as_per_substance_array({"H": 1})
assert rs.as_per_substance_dict([42]) == {"H2O": 42}
@requires(parsing_library)
def test_ReactionSystem__add():
rs1 = ReactionSystem.from_string(
"\n".join(["2 H2O2 -> O2 + 2 H2O", "H2 + O2 -> H2O2"])
)
rs2 = ReactionSystem.from_string("\n".join(["2 NH3 -> N2 + 3 H2"]))
rs3 = rs1 + rs2
assert rs1 == rs1
assert rs1 != rs2
assert rs3 != rs1
assert len(rs1.rxns) == 2 and len(rs2.rxns) == 1 and len(rs3.rxns) == 3
for k in "H2O2 O2 H2O H2 NH3 N2".split():
assert k in rs3.substances
rs1 += rs2
assert len(rs1.rxns) == 3 and len(rs2.rxns) == 1
assert rs1 == rs3
rs4 = ReactionSystem.from_string("H2O -> H+ + OH-; 1e-4")
rs4 += [Reaction({"H+", "OH-"}, {"H2O"}, 1e10)]
assert len(rs4.rxns) == 2
assert rs4.rxns[0].reac == {"H2O": 1}
assert rs4.rxns[1].reac == {"H+": 1, "OH-": 1}
res = rs4.rates({"H2O": 1, "H+": 1e-7, "OH-": 1e-7})
for k in "H2O H+ OH-".split():
assert abs(res[k]) < 1e-16
rs5 = ReactionSystem.from_string("H3O+ -> H+ + H2O")
rs6 = rs4 + rs5
rs7 = rs6 + (Reaction.from_string("H+ + H2O -> H3O+"),)
assert len(rs7.rxns) == 4
with pytest.raises(ValueError):
rs5 += (rs1, rs2)
with pytest.raises(ValueError):
rs5 + (rs1, rs2)
rs1 = ReactionSystem.from_string("O2 + H2 -> H2O2")
rs1.substances["H2O2"].data["D"] = 123
rs2 = ReactionSystem.from_string("H2O2 -> 2 OH")
rs2.substances["H2O2"].data["D"] = 456
rs2.substances["OH"].data["D"] = 789
rs3 = rs2 + rs1
assert (
rs3.substances["H2O2"].data["D"] == 123
and rs3.substances["OH"].data["D"] == 789
)
assert rs3.rxns[0].reac == {"H2O2": 1}
assert rs3.rxns[1].reac == {"O2": 1, "H2": 1}
assert len(rs3.rxns) == 2
rs2 += rs1
assert (
rs2.substances["H2O2"].data["D"] == 123
and rs2.substances["OH"].data["D"] == 789
)
assert rs2.rxns[0].reac == {"H2O2": 1}
assert rs2.rxns[1].reac == {"O2": 1, "H2": 1}
assert len(rs2.rxns) == 2
@requires(parsing_library, units_library)
def test_ReactionSystem__from_string():
rs = ReactionSystem.from_string("-> H + OH; Radiolytic(2.1e-7)", checks=())
assert rs.rxns[0].reac == {}
assert rs.rxns[0].prod == {"H": 1, "OH": 1}
assert rs.rxns[0].param.args == [2.1e-7]
ref = 2.1e-7 * 0.15 * 998
assert rs.rates({"doserate": 0.15, "density": 998}) == {"H": ref, "OH": ref}
(r2,) = ReactionSystem.from_string("H2O + H2O + H+ -> H3O+ + H2O").rxns
assert r2.reac == {"H2O": 2, "H+": 1}
assert r2.prod == {"H2O": 1, "H3O+": 1}
rs2 = ReactionSystem.from_string(
"""
# H2O -> OH + H
H+ + OH- -> H2O; 4*pi*(4e-9*m**2/s + 2e-9*m**2/s)*0.44*nm*Avogadro_constant; ref='made up #hashtag' # comment
#H+ + OH- -> H2O
"""
)
assert len(rs2.rxns) == 1
assert sorted(rs2.substances.keys()) == sorted("H2O H+ OH-".split())
assert allclose(
rs2.rxns[0].param, 1.99786e10 / default_units.M / default_units.s, rtol=1e-5
)
assert rs2.rxns[0].ref == "made up #hashtag"
@requires(parsing_library, "numpy")
def test_ReactionSystem__from_string__symbolics():
rs3 = ReactionSystem.from_string(
"""
A -> B; 'kA'
B -> C; 0
""",
substance_factory=Substance,
)
rs3.rxns[1].param = 2 * rs3.rxns[0].param
assert rs3.rates(dict(A=29, B=31, kA=42)) == {
"A": -29 * 42,
"B": 29 * 42 - 2 * 31 * 42,
"C": 2 * 31 * 42,
}
@requires(parsing_library, units_library)
def test_ReactionSystem__from_string__units():
(r3,) = ReactionSystem.from_string(
"(H2O) -> e-(aq) + H+ + OH; Radiolytic(2.1e-7*mol/J)"
).rxns
assert len(r3.reac) == 0 and r3.inact_reac == {"H2O": 1}
assert r3.prod == {"e-(aq)": 1, "H+": 1, "OH": 1}
from chempy.kinetics.rates import Radiolytic
mol, J = default_units.mol, default_units.J
assert r3.param == Radiolytic(2.1e-7 * mol / J)
assert r3.param != Radiolytic(2.0e-7 * mol / J)
assert r3.param != Radiolytic(2.1e-7)
assert r3.order() == 0
k = 1e-4 / default_units.second
rs = ReactionSystem.from_string(
"""
H2O -> H+ + OH-; {}
""".format(
repr(k)
)
)
assert allclose(rs.rxns[0].param, k)
@requires(parsing_library, "numpy")
def test_ReactionSystem__from_string___special_naming():
rs = ReactionSystem.from_string(
"""
H2O* + H2O -> 2 H2O
H2O* -> OH + H
"""
) # excited water
for sk in "H2O* H2O OH H".split():
assert sk in rs.substances
assert rs.substances["H2O*"].composition == {1: 2, 8: 1}
assert rs.categorize_substances() == dict(
accumulated={"OH", "H", "H2O"},
depleted={"H2O*"},
unaffected=set(),
nonparticipating=set(),
)
@requires(parsing_library)
def test_ReactionSystem__from_string__string_rate_const():
rsys = ReactionSystem.from_string("H+ + OH- -> H2O; 'kf'")
(r2,) = rsys.rxns
assert r2.reac == {"OH-": 1, "H+": 1}
assert r2.prod == {"H2O": 1}
r2str = r2.string(rsys.substances, with_param=True)
assert r2str.endswith("; 'kf'")
@requires("numpy")
def test_ReactionSystem__upper_conc_bounds():
rs = ReactionSystem.from_string(
"\n".join(["2 NH3 -> N2 + 3 H2", "N2H4 -> N2 + 2 H2"])
)
c0 = {"NH3": 5, "N2": 7, "H2": 11, "N2H4": 2}
_N = 5 + 14 + 4
_H = 15 + 22 + 8
ref = {
"NH3": min(_N, _H / 3),
"N2": _N / 2,
"H2": _H / 2,
"N2H4": min(_N / 2, _H / 4),
}
res = rs.as_per_substance_dict(rs.upper_conc_bounds(c0))
assert res == ref
@requires("numpy")
def test_ReactionSystem__upper_conc_bounds__a_substance_no_composition():
rs = ReactionSystem.from_string(
"""
H2O -> e-(aq) + H2O+
H2O+ + e-(aq) -> H2O
"""
)
c0 = {"H2O": 55.0, "e-(aq)": 2e-3, "H2O+": 3e-3}
_O = 55 + 3e-3
_H = 2 * 55 + 2 * 3e-3
ref = {
"H2O": min(_O, _H / 2),
"e-(aq)": float("inf"),
"H2O+": min(_O, _H / 2),
}
res = rs.as_per_substance_dict(rs.upper_conc_bounds(c0))
assert res == ref
@requires(parsing_library)
def test_ReactionSystem__identify_equilibria():
rsys = ReactionSystem.from_string(
"""
2 H2 + O2 -> 2 H2O ; 1e-3
H2O -> H+ + OH- ; 1e-4/55.35
H+ + OH- -> H2O ; 1e10
2 H2O -> 2 H2 + O2
"""
)
assert rsys.identify_equilibria() == [(0, 3), (1, 2)]
@requires(parsing_library, "numpy")
def test_ReactionSystem__categorize_substances():
rsys1 = ReactionSystem.from_string(
"""
2 H2 + O2 -> 2 H2O ; 1e-3
H2O -> H+ + OH- ; 1e-4/55.35
H+ + OH- -> H2O ; 1e10
2 H2O -> 2 H2 + O2
"""
)
assert all(not s for s in rsys1.categorize_substances().values())
rsys2 = ReactionSystem.from_string(
"\n".join(["2 NH3 -> N2 + 3 H2", "N2H4 -> N2 + 2 H2"])
)
assert rsys2.categorize_substances() == dict(
accumulated={"N2", "H2"},
depleted={"NH3", "N2H4"},
unaffected=set(),
nonparticipating=set(),
)
rsys3 = ReactionSystem.from_string("H+ + OH- -> H2O; 'kf'")
assert rsys3.categorize_substances() == dict(
accumulated={"H2O"},
depleted={"H+", "OH-"},
unaffected=set(),
nonparticipating=set(),
)
rsys4 = ReactionSystem(
[Reaction({"H2": 2, "O2": 1}, {"H2O": 2})], "H2 O2 H2O N2 Ar"
)
assert rsys4.categorize_substances() == dict(
accumulated={"H2O"},
depleted={"H2", "O2"},
unaffected=set(),
nonparticipating={"N2", "Ar"},
)
rsys5 = ReactionSystem.from_string(
"""
A -> B; MassAction(unique_keys=('k1',))
B + C -> A + C; MassAction(unique_keys=('k2',))
2 B -> B + C; MassAction(unique_keys=('k3',))
""",
substance_factory=lambda formula: Substance(formula),
)
assert rsys5.categorize_substances() == dict(
accumulated={"C"}, depleted=set(), unaffected=set(), nonparticipating=set()
)
rsys6 = ReactionSystem.from_string("""H2O2 + Fe+3 + (H2O2) -> 2 H2O + O2 + Fe+3""")
assert rsys6.rxns[0].order() == 2 # the additional H2O2 within parenthesis
assert rsys6.categorize_substances() == dict(
accumulated={"H2O", "O2"},
depleted={"H2O2"},
unaffected={"Fe+3"},
nonparticipating=set(),
)
@requires(parsing_library, "numpy")
def test_ReactionSystem__split():
a = """
2 H2 + O2 -> 2 H2O ; 1e-3
H2O -> H+ + OH- ; 1e-4/55.35
H+ + OH- -> H2O ; 1e10
2 H2O -> 2 H2 + O2"""
b = """
2 N -> N2"""
c = """
2 ClBr -> Cl2 + Br2
"""
rsys1 = ReactionSystem.from_string(a + b + c)
res = rsys1.split()
ref = list(map(ReactionSystem.from_string, [a, b, c]))
for rs in chain(res, ref):
rs.sort_substances_inplace()
res1a, res1b, res1c = res
ref1a, ref1b, ref1c = ref
assert res1a == ref1a
assert res1b == ref1b
assert res1c == ref1c
assert res1c != ref1a
assert rsys1.categorize_substances() == dict(
accumulated={"N2", "Cl2", "Br2"},
depleted={"N", "ClBr"},
unaffected=set(),
nonparticipating=set(),
)
def test_ReactionSystem__subset():
r1 = Reaction({"NH3": 2}, {"N2": 1, "H2": 3})
r2 = Reaction({"N2H4": 1}, {"N2": 1, "H2": 2})
rs1 = ReactionSystem([r1, r2])
rs2, rs3 = rs1.subset(lambda r: "N2H4" in r.keys())
assert len(rs1.rxns) == 2 and len(rs2.rxns) == 1
assert rs2 == ReactionSystem([r2])
assert rs3 == ReactionSystem([r1])
@requires(parsing_library)
def test_ReactionSystem__concatenate():
rs1 = ReactionSystem.from_string(
"""
H + OH -> H2O; 1e10; name='rs1a'
2 H2O2 -> 2 H2O + O2; 1e-7; name='rs1b'
"""
)
rs2 = ReactionSystem.from_string(
"""
H + OH -> H2O; 1e11; name='rs2a'
H2O2 -> H2 + O2; 1e-9; name='rs2b'
"""
)
rs, skipped = ReactionSystem.concatenate([rs1, rs2])
(sr,) = skipped.rxns
assert sr.name == "rs2a"
assert rs.rxns[-1].name == "rs2b"
|
bjodah/aqchem
|
chempy/tests/test_reactionsystem.py
|
Python
|
bsd-2-clause
| 15,621
|
[
"ChemPy"
] |
7e99b4f6028ee04499d9f473a06189e1cefdbc4e93ce39fc07cf73cb0f452b93
|
# Jython Database Specification API 2.0
#
# Copyright (c) 2001 brian zimmer <bzimmer@ziclix.com>
"""
To run the tests, simply invoke this script from the commandline:
jython runner.py <xml config file> [vendor, ...]
If no vendors are given, then all vendors will be tested. If a
vendor is given, then only that vendor will be tested.
"""
import unittest, os
import xmllib, __builtin__, re
def __imp__(module, attr=None):
if attr:
j = __import__(module, globals(), locals())
return getattr(j, attr)
else:
last = module.split(".")[-1]
return __import__(module, globals(), locals(), last)
class Factory:
def __init__(self, classname, method):
self.classname = classname
self.method = method
self.arguments = []
self.keywords = {}
class Testcase:
def __init__(self, frm, impt):
self.frm = frm
self.impt = impt
self.ignore = []
class Test:
def __init__(self, name, os):
self.name = name
self.os = os
self.factory = None
self.tests = []
class Vendor:
def __init__(self, name, datahandler=None):
self.name = name
self.scroll = None
self.datahandler = datahandler
self.tests = []
self.tables = {}
class ConfigParser(xmllib.XMLParser):
"""
A simple XML parser for the config file.
"""
def __init__(self, **kw):
apply(xmllib.XMLParser.__init__, (self,), kw)
self.vendors = []
self.table_stack = []
self.re_var = re.compile(r"\${(.*?)}")
def vendor(self):
assert len(self.vendors) > 0, "no vendors"
return self.vendors[-1]
def test(self):
v = self.vendor()
assert len(v.tests) > 0, "no tests"
return v.tests[-1]
def factory(self):
c = self.test()
assert c.factory, "no factory"
return c.factory
def testcase(self):
s = self.test()
assert len(s.tests) > 0, "no testcases"
return s.tests[-1]
def value(self, value):
def repl(sub):
from java.lang import System
return System.getProperty(sub.group(1), sub.group(1))
value = self.re_var.sub(repl, value)
return value
def start_vendor(self, attrs):
if attrs.has_key('datahandler'):
v = Vendor(attrs['name'], attrs['datahandler'])
else:
v = Vendor(attrs['name'])
if attrs.has_key('scroll'):
v.scroll = attrs['scroll']
self.vendors.append(v)
def start_test(self, attrs):
v = self.vendor()
c = Test(attrs['name'], attrs['os'])
v.tests.append(c)
def start_factory(self, attrs):
c = self.test()
f = Factory(attrs['class'], attrs['method'])
c.factory = f
def start_argument(self, attrs):
f = self.factory()
if attrs.has_key('type'):
f.arguments.append((attrs['name'], getattr(__builtin__, attrs['type'])(self.value(attrs['value']))))
else:
f.arguments.append((attrs['name'], self.value(attrs['value'])))
def start_keyword(self, attrs):
f = self.factory()
if attrs.has_key('type'):
f.keywords[attrs['name']] = getattr(__builtin__, attrs['type'])(self.value(attrs['value']))
else:
f.keywords[attrs['name']] = self.value(attrs['value'])
def start_ignore(self, attrs):
t = self.testcase()
t.ignore.append(attrs['name'])
def start_testcase(self, attrs):
c = self.test()
c.tests.append(Testcase(attrs['from'], attrs['import']))
def start_table(self, attrs):
self.table_stack.append((attrs['ref'], attrs['name']))
def end_table(self):
del self.table_stack[-1]
def handle_data(self, data):
if len(self.table_stack):
ref, tabname = self.table_stack[-1]
self.vendor().tables[ref] = (tabname, data.strip())
class SQLTestCase(unittest.TestCase):
"""
Base testing class. It contains the list of table and factory information
to run any tests.
"""
def __init__(self, name, vendor, factory):
unittest.TestCase.__init__(self, name)
self.vendor = vendor
self.factory = factory
if self.vendor.datahandler:
self.datahandler = __imp__(self.vendor.datahandler)
def table(self, name):
return self.vendor.tables[name]
def has_table(self, name):
return self.vendor.tables.has_key(name)
def make_suite(vendor, testcase, factory, mask=None):
clz = __imp__(testcase.frm, testcase.impt)
caseNames = filter(lambda x, i=testcase.ignore: x not in i, unittest.getTestCaseNames(clz, "test"))
if mask is not None:
caseNames = filter(lambda x, mask=mask: x == mask, caseNames)
tests = [clz(caseName, vendor, factory) for caseName in caseNames]
return unittest.TestSuite(tests)
def test(vendors, include=None, mask=None):
for vendor in vendors:
if not include or vendor.name in include:
print
print "testing [%s]" % (vendor.name)
for test in vendor.tests:
if not test.os or test.os == os.name:
for testcase in test.tests:
suite = make_suite(vendor, testcase, test.factory, mask)
unittest.TextTestRunner().run(suite)
else:
print
print "skipping [%s]" % (vendor.name)
if __name__ == '__main__':
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "t:", [])
except getopt.error, msg:
print "%s -t [testmask] <vendor>[,<vendor>]"
sys.exit(0)
mask = None
for a in opts:
opt, arg = a
if opt == '-t':
mask = arg
configParser = ConfigParser()
fp = open(args[0], "r")
configParser.feed(fp.read())
fp.close()
test(configParser.vendors, args[1:], mask=mask)
sys.exit(0)
|
ofermend/medicare-demo
|
socialite/jython/Lib/test/zxjdbc/runner.py
|
Python
|
apache-2.0
| 5,996
|
[
"Brian"
] |
240e5f3d665fcf6e5b5103b72b762944b5aa539fb0f2633860ef697b42228985
|
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2018 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Nick Bastin <nick.bastin@gmail.com>
# Copyright (c) 2015 Michael Kefeder <oss@multiwave.ch>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Stephane Wirtel <stephane@wirtel.be>
# Copyright (c) 2015 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Elias Dorneles <eliasdorneles@gmail.com>
# Copyright (c) 2016 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016 Yannack <yannack@users.noreply.github.com>
# Copyright (c) 2016 Alex Jurkiewicz <alex@jurkiewi.cz>
# Copyright (c) 2017 Jacques Kvam <jwkvam@gmail.com>
# Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com>
# Copyright (c) 2017 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2018 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2018 Steven M. Vascellaro <svascellaro@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 Chris Lamb <chris@chris-lamb.co.uk>
# Copyright (c) 2018 glmdgrielson <32415403+glmdgrielson@users.noreply.github.com>
# Copyright (c) 2018 Ville Skyttä <ville.skytta@upcloud.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""basic checker for Python code"""
import builtins
import collections
import itertools
import sys
import re
from typing import Pattern
import astroid
import astroid.bases
import astroid.scoped_nodes
from pylint import checkers
from pylint import exceptions
from pylint import interfaces
from pylint.checkers import utils
from pylint import reporters
from pylint.checkers.utils import get_node_last_lineno
from pylint.reporters.ureports import nodes as reporter_nodes
import pylint.utils as lint_utils
class NamingStyle:
# It may seem counterintuitive that single naming style
# has multiple "accepted" forms of regular expressions,
# but we need to special-case stuff like dunder names
# in method names.
CLASS_NAME_RGX = None # type: Pattern[str]
MOD_NAME_RGX = None # type: Pattern[str]
CONST_NAME_RGX = None # type: Pattern[str]
COMP_VAR_RGX = None # type: Pattern[str]
DEFAULT_NAME_RGX = None # type: Pattern[str]
CLASS_ATTRIBUTE_RGX = None # type: Pattern[str]
@classmethod
def get_regex(cls, name_type):
return {
"module": cls.MOD_NAME_RGX,
"const": cls.CONST_NAME_RGX,
"class": cls.CLASS_NAME_RGX,
"function": cls.DEFAULT_NAME_RGX,
"method": cls.DEFAULT_NAME_RGX,
"attr": cls.DEFAULT_NAME_RGX,
"argument": cls.DEFAULT_NAME_RGX,
"variable": cls.DEFAULT_NAME_RGX,
"class_attribute": cls.CLASS_ATTRIBUTE_RGX,
"inlinevar": cls.COMP_VAR_RGX,
}[name_type]
class SnakeCaseStyle(NamingStyle):
"""Regex rules for snake_case naming style."""
CLASS_NAME_RGX = re.compile("[a-z_][a-z0-9_]+$")
MOD_NAME_RGX = re.compile("([a-z_][a-z0-9_]*)$")
CONST_NAME_RGX = re.compile("(([a-z_][a-z0-9_]*)|(__.*__))$")
COMP_VAR_RGX = re.compile("[a-z_][a-z0-9_]*$")
DEFAULT_NAME_RGX = re.compile(
"(([a-z_][a-z0-9_]{2,})|(_[a-z0-9_]*)|(__[a-z][a-z0-9_]+__))$"
)
CLASS_ATTRIBUTE_RGX = re.compile(r"(([a-z_][a-z0-9_]{2,}|(__.*__)))$")
class CamelCaseStyle(NamingStyle):
"""Regex rules for camelCase naming style."""
CLASS_NAME_RGX = re.compile("[a-z_][a-zA-Z0-9]+$")
MOD_NAME_RGX = re.compile("([a-z_][a-zA-Z0-9]*)$")
CONST_NAME_RGX = re.compile("(([a-z_][A-Za-z0-9]*)|(__.*__))$")
COMP_VAR_RGX = re.compile("[a-z_][A-Za-z0-9]*$")
DEFAULT_NAME_RGX = re.compile("(([a-z_][a-zA-Z0-9]{2,})|(__[a-z][a-zA-Z0-9_]+__))$")
CLASS_ATTRIBUTE_RGX = re.compile(r"([a-z_][A-Za-z0-9]{2,}|(__.*__))$")
class PascalCaseStyle(NamingStyle):
"""Regex rules for PascalCase naming style."""
CLASS_NAME_RGX = re.compile("[A-Z_][a-zA-Z0-9]+$")
MOD_NAME_RGX = re.compile("[A-Z_][a-zA-Z0-9]+$")
CONST_NAME_RGX = re.compile("(([A-Z_][A-Za-z0-9]*)|(__.*__))$")
COMP_VAR_RGX = re.compile("[A-Z_][a-zA-Z0-9]+$")
DEFAULT_NAME_RGX = re.compile("[A-Z_][a-zA-Z0-9]{2,}$|(__[a-z][a-zA-Z0-9_]+__)$")
CLASS_ATTRIBUTE_RGX = re.compile("[A-Z_][a-zA-Z0-9]{2,}$")
class UpperCaseStyle(NamingStyle):
"""Regex rules for UPPER_CASE naming style."""
CLASS_NAME_RGX = re.compile("[A-Z_][A-Z0-9_]+$")
MOD_NAME_RGX = re.compile("[A-Z_][A-Z0-9_]+$")
CONST_NAME_RGX = re.compile("(([A-Z_][A-Z0-9_]*)|(__.*__))$")
COMP_VAR_RGX = re.compile("[A-Z_][A-Z0-9_]+$")
DEFAULT_NAME_RGX = re.compile("([A-Z_][A-Z0-9_]{2,})|(__[a-z][a-zA-Z0-9_]+__)$")
CLASS_ATTRIBUTE_RGX = re.compile("[A-Z_][A-Z0-9_]{2,}$")
class AnyStyle(NamingStyle):
@classmethod
def get_regex(cls, name_type):
return re.compile(".*")
NAMING_STYLES = {
"snake_case": SnakeCaseStyle,
"camelCase": CamelCaseStyle,
"PascalCase": PascalCaseStyle,
"UPPER_CASE": UpperCaseStyle,
"any": AnyStyle,
}
# do not require a doc string on private/system methods
NO_REQUIRED_DOC_RGX = re.compile("^_")
REVERSED_PROTOCOL_METHOD = "__reversed__"
SEQUENCE_PROTOCOL_METHODS = ("__getitem__", "__len__")
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD,))
TYPECHECK_COMPARISON_OPERATORS = frozenset(("is", "is not", "==", "!=", "in", "not in"))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
UNITTEST_CASE = "unittest.case"
BUILTINS = builtins.__name__
TYPE_QNAME = "%s.type" % BUILTINS
PY33 = sys.version_info >= (3, 3)
PY3K = sys.version_info >= (3, 0)
PY35 = sys.version_info >= (3, 5)
ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+,
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = {"exempt", "ignore"}
# A mapping from builtin-qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(
[".".join([BUILTINS, x]) for x in ("set", "dict", "list")],
["set()", "{}", "[]"],
)
)
REVERSED_COMPS = {"<": ">", "<=": ">=", ">": "<", ">=": "<="}
COMPARISON_OPERATORS = frozenset(("==", "!=", "<", ">", "<=", ">="))
# List of methods which can be redefined
REDEFINABLE_METHODS = frozenset(("__module__",))
def _redefines_import(node):
""" Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(
parent,
(
astroid.For,
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
),
):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _get_break_loop_node(break_node):
"""
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
"""
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(
parent, "orelse", []
):
parent = parent.parent
if parent is None:
break
return parent
def _loop_exits_early(loop):
"""
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
"""
loop_nodes = (astroid.For, astroid.While)
definition_nodes = (astroid.FunctionDef, astroid.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
def _is_multi_naming_match(match, node_type, confidence):
return (
match is not None
and match.lastgroup is not None
and match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != "method" or confidence != interfaces.INFERENCE_FAILURE)
)
BUILTIN_PROPERTY = "builtins.property"
def _get_properties(config):
"""Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
"""
property_classes = {BUILTIN_PROPERTY}
property_names = set() # Not returning 'property', it has its own check.
if config is not None:
property_classes.update(config.property_classes)
property_names.update(
(prop.rsplit(".", 1)[-1] for prop in config.property_classes)
)
return property_classes, property_names
def _determine_function_name_type(node, config=None):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:type node: astroid.node_classes.NodeNG
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
"""
property_classes, property_names = _get_properties(config)
if not node.is_method():
return "function"
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if isinstance(decorator, astroid.Name) or (
isinstance(decorator, astroid.Attribute)
and decorator.attrname in property_names
):
infered = utils.safe_infer(decorator)
if infered and infered.qname() in property_classes:
return "attr"
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
elif isinstance(decorator, astroid.Attribute) and decorator.attrname in (
"setter",
"deleter",
):
return "attr"
return "method"
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError:
raise exceptions.EmptyReportError()
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.) / total
nice_stats[node_type]["percent_documented"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.) / total
nice_stats[node_type]["percent_badname"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines = ("type", "number", "old number", "difference", "%documented", "%badname")
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = reporters.diff_string(old, new)
else:
old, diff_str = "NC", "NC"
lines += (
node_type,
str(new),
str(old),
diff_str,
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
)
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, astroid.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
class _BasicChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = "basic"
class BasicErrorChecker(_BasicChecker):
msgs = {
"E0100": (
"__init__ method is a generator",
"init-is-generator",
"Used when the special class method __init__ is turned into a "
"generator by a yield in its body.",
),
"E0101": (
"Explicit return in __init__",
"return-in-init",
"Used when the special class method __init__ has an explicit "
"return value.",
),
"E0102": (
"%s already defined line %s",
"function-redefined",
"Used when a function / class / method is redefined.",
),
"E0103": (
"%r not properly in loop",
"not-in-loop",
"Used when break or continue keywords are used outside a loop.",
),
"E0104": (
"Return outside function",
"return-outside-function",
'Used when a "return" statement is found outside a function or ' "method.",
),
"E0105": (
"Yield outside function",
"yield-outside-function",
'Used when a "yield" statement is found outside a function or ' "method.",
),
"E0106": (
"Return with argument inside generator",
"return-arg-in-generator",
'Used when a "return" statement with an argument is found '
"outside in a generator function or method (e.g. with some "
'"yield" statements).',
{"maxversion": (3, 3)},
),
"E0107": (
"Use of the non-existent %s operator",
"nonexistent-operator",
"Used when you attempt to use the C-style pre-increment or "
"pre-decrement operator -- and ++, which doesn't exist in Python.",
),
"E0108": (
"Duplicate argument name %s in function definition",
"duplicate-argument-name",
"Duplicate argument names in function definitions are syntax" " errors.",
),
"E0110": (
"Abstract class %r with abstract methods instantiated",
"abstract-class-instantiated",
"Used when an abstract class with `abc.ABCMeta` as metaclass "
"has abstract methods and is instantiated.",
),
"W0120": (
"Else clause on loop without a break statement",
"useless-else-on-loop",
"Loops should only have an else clause if they can exit early "
"with a break statement, otherwise the statements under else "
"should be on the same scope as the loop itself.",
),
"E0112": (
"More than one starred expression in assignment",
"too-many-star-expressions",
"Emitted when there are more than one starred "
"expressions (`*x`) in an assignment. This is a SyntaxError.",
),
"E0113": (
"Starred assignment target must be in a list or tuple",
"invalid-star-assignment-target",
"Emitted when a star expression is used as a starred " "assignment target.",
),
"E0114": (
"Can use starred expression only in assignment target",
"star-needs-assignment-target",
"Emitted when a star expression is not used in an " "assignment target.",
),
"E0115": (
"Name %r is nonlocal and global",
"nonlocal-and-global",
"Emitted when a name is both nonlocal and global.",
),
"E0116": (
"'continue' not supported inside 'finally' clause",
"continue-in-finally",
"Emitted when the `continue` keyword is found "
"inside a finally clause, which is a SyntaxError.",
),
"E0117": (
"nonlocal name %s found without binding",
"nonlocal-without-binding",
"Emitted when a nonlocal variable does not have an attached "
"name somewhere in the parent scopes",
),
"E0118": (
"Name %r is used prior to global declaration",
"used-prior-global-declaration",
"Emitted when a name is used prior a global declaration, "
"which results in an error since Python 3.6.",
{"minversion": (3, 6)},
),
}
@utils.check_messages("function-redefined")
def visit_classdef(self, node):
self._check_redefinition("class", node)
@utils.check_messages("too-many-star-expressions", "invalid-star-assignment-target")
def visit_assign(self, node):
target = node.targets[0]
if isinstance(target, astroid.Tuple):
itered_elements = target.itered()
starred_count = 0
for elem in itered_elements:
if isinstance(elem, astroid.Starred):
starred_count += 1
elif sum(1 for _ in elem.nodes_of_class(astroid.Starred)) > 1:
self.add_message("too-many-star-expressions", node=node)
if starred_count > 1:
self.add_message("too-many-star-expressions", node=node)
# Check *a = b
if isinstance(target, astroid.Starred):
self.add_message("invalid-star-assignment-target", node=node)
@utils.check_messages("star-needs-assignment-target")
def visit_starred(self, node):
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, astroid.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if PY35 and isinstance(
node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)
):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, astroid.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message("star-needs-assignment-target", node=node)
@utils.check_messages(
"init-is-generator",
"return-in-init",
"function-redefined",
"return-arg-in-generator",
"duplicate-argument-name",
"nonlocal-and-global",
"used-prior-global-declaration",
)
def visit_functiondef(self, node):
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if not redefined_by_decorator(
node
) and not utils.is_registered_in_singledispatch_function(node):
self._check_redefinition(node.is_method() and "method" or "function", node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(
astroid.Return, skip_klass=(astroid.FunctionDef, astroid.ClassDef)
)
if node.is_method() and node.name == "__init__":
if node.is_generator():
self.add_message("init-is-generator", node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message("return-in-init", node=node)
elif node.is_generator():
# make sure we don't mix non-None returns and yields
if not PY33:
for retnode in returns:
if (
isinstance(retnode.value, astroid.Const)
and retnode.value.value is not None
):
self.add_message(
"return-arg-in-generator",
node=node,
line=retnode.fromlineno,
)
# Check for duplicate names by clustering args with same name for detailed report
arg_clusters = collections.defaultdict(list)
arguments = filter(None, [node.args.args, node.args.kwonlyargs])
for arg in itertools.chain.from_iterable(arguments):
arg_clusters[arg.name].append(arg)
# provide detailed report about each repeated argument
for argument_duplicates in arg_clusters.values():
if len(argument_duplicates) != 1:
for argument in argument_duplicates:
self.add_message(
"duplicate-argument-name",
line=argument.lineno,
node=argument,
args=(argument.name,),
)
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node):
scope_globals = {
name: child
for child in node.nodes_of_class(astroid.Global)
for name in child.names
if child.scope() is node
}
if not scope_globals:
return
for node_name in node.nodes_of_class(astroid.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message(
"used-prior-global-declaration", node=node_name, args=(name,)
)
def _check_nonlocal_and_global(self, node):
"""Check that a name is both nonlocal and global."""
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node)
@utils.check_messages("return-outside-function")
def visit_return(self, node):
if not isinstance(node.frame(), astroid.FunctionDef):
self.add_message("return-outside-function", node=node)
@utils.check_messages("yield-outside-function")
def visit_yield(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("yield-outside-function")
def visit_yieldfrom(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("not-in-loop", "continue-in-finally")
def visit_continue(self, node):
self._check_in_loop(node, "continue")
@utils.check_messages("not-in-loop")
def visit_break(self, node):
self._check_in_loop(node, "break")
@utils.check_messages("useless-else-on-loop")
def visit_for(self, node):
self._check_else_on_loop(node)
@utils.check_messages("useless-else-on-loop")
def visit_while(self, node):
self._check_else_on_loop(node)
@utils.check_messages("nonexistent-operator")
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if (
(node.op in "+-")
and isinstance(node.operand, astroid.UnaryOp)
and (node.operand.op == node.op)
):
self.add_message("nonexistent-operator", node=node, args=node.op * 2)
def _check_nonlocal_without_binding(self, node, name):
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, astroid.FunctionDef):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
@utils.check_messages("nonlocal-without-binding")
def visit_nonlocal(self, node):
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.check_messages("abstract-class-instantiated")
def visit_call(self, node):
""" Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
for inferred in node.func.infer():
self._check_inferred_class_is_abstract(inferred, node)
except astroid.InferenceError:
return
def _check_inferred_class_is_abstract(self, infered, node):
if not isinstance(infered, astroid.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is infered:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
abstract_methods = _has_abstract_methods(infered)
if not abstract_methods:
return
metaclass = infered.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in infered.ancestors():
if ancestor.qname() == "abc.ABC":
self.add_message(
"abstract-class-instantiated", args=(infered.name,), node=node
)
break
return
if metaclass.qname() in ABC_METACLASSES:
self.add_message(
"abstract-class-instantiated", args=(infered.name,), node=node
)
def _check_yield_outside_func(self, node):
if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)):
self.add_message("yield-outside-function", node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message(
"useless-else-on-loop",
node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1,
)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
if node not in _node.orelse:
return
if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)):
break
if (
isinstance(_node, astroid.TryFinally)
and node in _node.finalbody
and isinstance(node, astroid.Continue)
):
self.add_message("continue-in-finally", node=node)
_node = _node.parent
self.add_message("not-in-loop", node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
parent_frame = node.parent.frame()
defined_self = parent_frame[node.name]
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
# Additional checks for methods which are not considered
# redefined, since they are already part of the base API.
if (
isinstance(parent_frame, astroid.ClassDef)
and node.name in REDEFINABLE_METHODS
):
return
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message(
"function-redefined",
node=node,
args=(redeftype, defined_self.fromlineno),
)
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = interfaces.IAstroidChecker
name = "basic"
msgs = {
"W0101": (
"Unreachable code",
"unreachable",
'Used when there is some code behind a "return" or "raise" '
"statement, which will never be accessed.",
),
"W0102": (
"Dangerous default value %s as argument",
"dangerous-default-value",
"Used when a mutable value as list or dictionary is detected in "
"a default value for an argument.",
),
"W0104": (
"Statement seems to have no effect",
"pointless-statement",
"Used when a statement doesn't have (or at least seems to) " "any effect.",
),
"W0105": (
"String statement has no effect",
"pointless-string-statement",
"Used when a string is used as a statement (which of course "
"has no effect). This is a particular case of W0104 with its "
"own message so you can easily disable it if you're using "
"those strings as documentation, instead of comments.",
),
"W0106": (
'Expression "%s" is assigned to nothing',
"expression-not-assigned",
"Used when an expression that is not a function call is assigned "
"to nothing. Probably something else was intended.",
),
"W0108": (
"Lambda may not be necessary",
"unnecessary-lambda",
"Used when the body of a lambda expression is a function call "
"on the same argument list as the lambda itself; such lambda "
"expressions are in all but a few cases replaceable with the "
"function being called in the body of the lambda.",
),
"W0109": (
"Duplicate key %r in dictionary",
"duplicate-key",
"Used when a dictionary expression binds the same key multiple " "times.",
),
"W0122": (
"Use of exec",
"exec-used",
'Used when you use the "exec" statement (function for Python '
"3), to discourage its usage. That doesn't "
"mean you cannot use it !",
),
"W0123": (
"Use of eval",
"eval-used",
'Used when you use the "eval" function, to discourage its '
"usage. Consider using `ast.literal_eval` for safely evaluating "
"strings containing Python expressions "
"from untrusted sources. ",
),
"W0150": (
"%s statement in finally block may swallow exception",
"lost-exception",
"Used when a break or a return statement is found inside the "
"finally clause of a try...finally block: the exceptions raised "
"in the try clause will be silently swallowed instead of being "
"re-raised.",
),
"W0199": (
"Assert called on a 2-uple. Did you mean 'assert x,y'?",
"assert-on-tuple",
"A call of assert on a tuple will always evaluate to true if "
"the tuple is not empty, and will always evaluate to false if "
"it is.",
),
"W0124": (
'Following "as" with another context manager looks like a tuple.',
"confusing-with-statement",
"Emitted when a `with` statement component returns multiple values "
"and uses name binding with `as` only for a part of those values, "
"as in with ctx() as a, b. This can be misleading, since it's not "
"clear if the context manager returns a tuple or if the node without "
"a name binding is another context manager.",
),
"W0125": (
"Using a conditional statement with a constant value",
"using-constant-test",
"Emitted when a conditional statement (If or ternary if) "
"uses a constant value for its test. This might not be what "
"the user intended to do.",
),
"E0111": (
"The first reversed() argument is not a sequence",
"bad-reversed-sequence",
"Used when the first argument to reversed() builtin "
"isn't a sequence (does not implement __reversed__, "
"nor __getitem__ and __len__",
),
"E0119": (
"format function is not called on str",
"misplaced-format-function",
"Emitted when format function is not called on str object. "
'e.g doing print("value: {}").format(123) instead of '
'print("value: {}".format(123)). This might not be what the user '
"intended to do.",
),
}
reports = (("RP0101", "Statistics by type", report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics
"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0)
@utils.check_messages("using-constant-test")
def visit_if(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test")
def visit_ifexp(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test")
def visit_comprehension(self, node):
if node.ifs:
for if_test in node.ifs:
self._check_using_constant_test(node, if_test)
def _check_using_constant_test(self, node, test):
const_nodes = (
astroid.Module,
astroid.scoped_nodes.GeneratorExp,
astroid.Lambda,
astroid.FunctionDef,
astroid.ClassDef,
astroid.bases.Generator,
astroid.UnboundMethod,
astroid.BoundMethod,
astroid.Module,
)
structs = (astroid.Dict, astroid.Tuple, astroid.Set)
# These nodes are excepted, since they are not constant
# values, requiring a computation to happen. The only type
# of node in this list which doesn't have this property is
# Attribute, which is excepted because the conditional statement
# can be used to verify that the attribute was set inside a class,
# which is definitely a valid use case.
except_nodes = (
astroid.Attribute,
astroid.Call,
astroid.BinOp,
astroid.BoolOp,
astroid.UnaryOp,
astroid.Subscript,
)
inferred = None
emit = isinstance(test, (astroid.Const,) + structs + const_nodes)
if not isinstance(test, except_nodes):
inferred = utils.safe_infer(test)
if emit or isinstance(inferred, const_nodes):
self.add_message("using-constant-test", node=node)
def visit_module(self, _):
"""check module name, docstring and required arguments
"""
self.stats["module"] += 1
def visit_classdef(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats["class"] += 1
@utils.check_messages(
"pointless-statement", "pointless-string-statement", "expression-not-assigned"
)
def visit_expr(self, node):
"""check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value, str):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(
scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)
):
if isinstance(scope, astroid.FunctionDef) and scope.name != "__init__":
pass
else:
sibling = expr.previous_sibling()
if (
sibling is not None
and sibling.scope() is scope
and isinstance(sibling, (astroid.Assign, astroid.AnnAssign))
):
return
self.add_message("pointless-string-statement", node=node)
return
# ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield (which are wrapped by a discard node in _ast XXX)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if isinstance(expr, (astroid.Yield, astroid.Await, astroid.Call)) or (
isinstance(node.parent, astroid.TryExcept) and node.parent.body == [node]
):
return
if any(expr.nodes_of_class(astroid.Call)):
self.add_message(
"expression-not-assigned", node=node, args=expr.as_string()
)
else:
self.add_message("pointless-statement", node=node)
@staticmethod
def _filter_vararg(node, call_args):
# Return the arguments for the given call which are
# not passed as vararg.
for arg in call_args:
if isinstance(arg, astroid.Starred):
if (
isinstance(arg.value, astroid.Name)
and arg.value.name != node.args.vararg
):
yield arg
else:
yield arg
@staticmethod
def _has_variadic_argument(args, variadic_name):
if not args:
return True
for arg in args:
if isinstance(arg.value, astroid.Name):
if arg.value.name != variadic_name:
return True
else:
return True
return False
@utils.check_messages("unnecessary-lambda")
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious
"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
elif call.kwargs or call.keywords:
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node)
@utils.check_messages("dangerous-default-value")
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats[node.is_method() and "method" or "function"] += 1
self._check_dangerous_default(node)
visit_asyncfunctiondef = visit_functiondef
def _check_dangerous_default(self, node):
# check for dangerous default values as arguments
is_iterable = lambda n: isinstance(n, (astroid.List, astroid.Set, astroid.Dict))
for default in node.args.defaults:
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (
isinstance(value, astroid.Instance)
and value.qname() in DEFAULT_ARGUMENT_SYMBOLS
):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif isinstance(value, astroid.Instance) or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.Call):
msg = "%s() (%s)" % (value.name, value.qname())
else:
msg = "%s (%s)" % (default.as_string(), value.qname())
else:
# this argument is a name
msg = "%s (%s)" % (
default.as_string(),
DEFAULT_ARGUMENT_SYMBOLS[value.qname()],
)
self.add_message("dangerous-default-value", node=node, args=(msg,))
@utils.check_messages("unreachable", "lost-exception")
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "return", (astroid.FunctionDef,))
@utils.check_messages("unreachable")
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("unreachable", "lost-exception")
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "break", (astroid.For, astroid.While))
@utils.check_messages("unreachable")
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("exec-used")
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message("exec-used", node=node)
def _check_misplaced_format_function(self, call_node):
if not isinstance(call_node.func, astroid.Attribute):
return
if call_node.func.attrname != "format":
return
expr = utils.safe_infer(call_node.func.expr)
if expr is astroid.Uninferable:
return
if not expr:
# we are doubtful on inferred type of node, so here just check if format
# was called on print()
call_expr = call_node.func.expr
if not isinstance(call_expr, astroid.Call):
return
if (
isinstance(call_expr.func, astroid.Name)
and call_expr.func.name == "print"
):
self.add_message("misplaced-format-function", node=call_node)
@utils.check_messages(
"eval-used", "exec-used", "bad-reversed-sequence", "misplaced-format-function"
)
def visit_call(self, node):
"""visit a Call node -> check if this is not a blacklisted builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "exec":
self.add_message("exec-used", node=node)
elif name == "reversed":
self._check_reversed(node)
elif name == "eval":
self.add_message("eval-used", node=node)
@utils.check_messages("assert-on-tuple")
def visit_assert(self, node):
"""check the use of an assert statement on a tuple."""
if (
node.fail is None
and isinstance(node.test, astroid.Tuple)
and len(node.test.elts) == 2
):
self.add_message("assert-on-tuple", node=node)
@utils.check_messages("duplicate-key")
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message("duplicate-key", node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message("unreachable", node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not an in try...finally block
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, "finalbody") and _node in _parent.finalbody:
self.add_message("lost-exception", node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
""" check that the argument to `reversed` is a sequence """
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.Uninferable:
return
if argument is None:
# Nothing was infered.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if getattr(
func, "name", None
) == "iter" and utils.is_builtin_object(func):
self.add_message("bad-reversed-sequence", node=node)
return
if isinstance(argument, astroid.Instance):
if argument._proxied.name == "dict" and utils.is_builtin_object(
argument._proxied
):
self.add_message("bad-reversed-sequence", node=node)
return
if any(
ancestor.name == "dict" and utils.is_builtin_object(ancestor)
for ancestor in argument._proxied.ancestors()
):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message("bad-reversed-sequence", node=node)
return
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message("bad-reversed-sequence", node=node)
elif not isinstance(argument, (astroid.List, astroid.Tuple)):
# everything else is not a proper sequence for reversed()
self.add_message("bad-reversed-sequence", node=node)
@utils.check_messages("confusing-with-statement")
def visit_with(self, node):
if not PY3K:
# in Python 2 a "with" statement with multiple managers coresponds
# to multiple nested AST "With" nodes
pairs = []
parent_node = node.parent
if isinstance(parent_node, astroid.With):
# we only care about the direct parent, since this method
# gets called for each with node anyway
pairs.extend(parent_node.items)
pairs.extend(node.items)
else:
# in PY3K a "with" statement with multiple managers coresponds
# to one AST "With" node with multiple items
pairs = node.items
if pairs:
for prev_pair, pair in zip(pairs, pairs[1:]):
if isinstance(prev_pair[1], astroid.AssignName) and (
pair[1] is None and not isinstance(pair[0], astroid.Call)
):
# don't emit a message if the second is a function call
# there's no way that can be mistaken for a name assignment
if PY3K or node.lineno == node.parent.lineno:
# if the line number doesn't match
# we assume it's a nested "with"
self.add_message("confusing-with-statement", node=node)
KNOWN_NAME_TYPES = {
"module",
"const",
"class",
"function",
"method",
"attr",
"argument",
"variable",
"class_attribute",
"inlinevar",
}
HUMAN_READABLE_TYPES = {
"module": "module",
"const": "constant",
"class": "class",
"function": "function",
"method": "method",
"attr": "attribute",
"argument": "argument",
"variable": "variable",
"class_attribute": "class attribute",
"inlinevar": "inline iteration",
}
DEFAULT_NAMING_STYLES = {
"module": "snake_case",
"const": "UPPER_CASE",
"class": "PascalCase",
"function": "snake_case",
"method": "snake_case",
"attr": "snake_case",
"argument": "snake_case",
"variable": "snake_case",
"class_attribute": "any",
"inlinevar": "any",
}
def _create_naming_options():
name_options = []
for name_type in sorted(KNOWN_NAME_TYPES):
human_readable_name = HUMAN_READABLE_TYPES[name_type]
default_style = DEFAULT_NAMING_STYLES[name_type]
name_type = name_type.replace("_", "-")
name_options.append(
(
"%s-naming-style" % (name_type,),
{
"default": default_style,
"type": "choice",
"choices": list(NAMING_STYLES.keys()),
"metavar": "<style>",
"help": "Naming style matching correct %s names."
% (human_readable_name,),
},
)
)
name_options.append(
(
"%s-rgx" % (name_type,),
{
"default": None,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression matching correct %s names. Overrides %s-naming-style."
% (human_readable_name, name_type),
},
)
)
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
"C0102": (
'Black listed name "%s"',
"blacklisted-name",
"Used when the name is listed in the black list (unauthorized " "names).",
),
"C0103": (
'%s name "%s" doesn\'t conform to %s',
"invalid-name",
"Used when the name doesn't conform to naming rules "
"associated to its type (constant, variable, class...).",
),
"W0111": (
"Name %s will become a keyword in Python %s",
"assign-to-new-keyword",
"Used when assignment will become invalid in future "
"Python release due to introducing new keyword.",
),
}
options = (
(
"good-names",
{
"default": ("i", "j", "k", "ex", "Run", "_"),
"type": "csv",
"metavar": "<names>",
"help": "Good variable names which should always be accepted,"
" separated by a comma.",
},
),
(
"bad-names",
{
"default": ("foo", "bar", "baz", "toto", "tutu", "tata"),
"type": "csv",
"metavar": "<names>",
"help": "Bad variable names which should always be refused, "
"separated by a comma.",
},
),
(
"name-group",
{
"default": (),
"type": "csv",
"metavar": "<name1:name2>",
"help": (
"Colon-delimited sets of names that determine each"
" other's naming style when the name regexes"
" allow several styles."
),
},
),
(
"include-naming-hint",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Include a hint for the correct naming format with invalid-name.",
},
),
(
"property-classes",
{
"default": ("abc.abstractproperty",),
"type": "csv",
"metavar": "<decorator names>",
"help": "List of decorators that produce properties, such as "
"abc.abstractproperty. Add to this list to register "
"other decorators that produce valid properties. "
"These decorators are taken in consideration only for invalid-name.",
},
),
) + _create_naming_options()
KEYWORD_ONSET = {(3, 7): {"async", "await"}}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
self._name_regexps = {}
self._name_hints = {}
def open(self):
self.stats = self.linter.add_stats(
badname_module=0,
badname_class=0,
badname_function=0,
badname_method=0,
badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0,
)
for group in self.config.name_group:
for name_type in group.split(":"):
self._name_group[name_type] = "group_%s" % (group,)
regexps, hints = self._create_naming_rules()
self._name_regexps = regexps
self._name_hints = hints
def _create_naming_rules(self):
regexps = {}
hints = {}
for name_type in KNOWN_NAME_TYPES:
naming_style_option_name = "%s_naming_style" % (name_type,)
naming_style_name = getattr(self.config, naming_style_option_name)
regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type)
custom_regex_setting_name = "%s_rgx" % (name_type,)
custom_regex = getattr(self.config, custom_regex_setting_name, None)
if custom_regex is not None:
regexps[name_type] = custom_regex
if custom_regex is not None:
hints[name_type] = "%r pattern" % custom_regex.pattern
else:
hints[name_type] = "%s naming style" % naming_style_name
return regexps, hints
@utils.check_messages("blacklisted-name", "invalid-name")
def visit_module(self, node):
self._check_name("module", node.name.split(".")[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in self._bad_names.values():
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in all_groups.values():
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(
groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group),
)
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@utils.check_messages("blacklisted-name", "invalid-name", "assign-to-new-keyword")
def visit_classdef(self, node):
self._check_assign_to_new_keyword_violation(node.name, node)
self._check_name("class", node.name, node)
for attr, anodes in node.instance_attrs.items():
if not any(node.instance_attr_ancestors(attr)):
self._check_name("attr", attr, anodes[0])
@utils.check_messages("blacklisted-name", "invalid-name", "assign-to-new-keyword")
def visit_functiondef(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
self._check_assign_to_new_keyword_violation(node.name, node)
confidence = interfaces.HIGH
if node.is_method():
if utils.overrides_a_method(node.parent.frame(), node.name):
return
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
self._check_name(
_determine_function_name_type(node, config=self.config),
node.name,
node,
confidence,
)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
visit_asyncfunctiondef = visit_functiondef
@utils.check_messages("blacklisted-name", "invalid-name")
def visit_global(self, node):
for name in node.names:
self._check_name("const", name, node)
@utils.check_messages("blacklisted-name", "invalid-name", "assign-to-new-keyword")
def visit_assignname(self, node):
"""check module level assigned names"""
self._check_assign_to_new_keyword_violation(node.name, node)
frame = node.frame()
assign_type = node.assign_type()
if isinstance(assign_type, astroid.Comprehension):
self._check_name("inlinevar", node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(assign_type, astroid.Assign) and not in_loop(assign_type):
if isinstance(utils.safe_infer(assign_type.value), astroid.ClassDef):
self._check_name("class", node.name, node)
else:
if not _redefines_import(node):
# Don't emit if the name redefines an import
# in an ImportError except handler.
self._check_name("const", node.name, node)
elif isinstance(assign_type, astroid.ExceptHandler):
self._check_name("variable", node.name, node)
elif isinstance(frame, astroid.FunctionDef):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name("variable", node.name, node)
elif isinstance(frame, astroid.ClassDef):
if not list(frame.local_attr_ancestors(node.name)):
self._check_name("class_attribute", node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssignName):
self._check_name("argument", arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(self, node, node_type, name, confidence):
type_label = HUMAN_READABLE_TYPES[node_type]
hint = self._name_hints[node_type]
if self.config.include_naming_hint:
hint += " (%r pattern)" % self._name_regexps[node_type].pattern
args = (type_label.capitalize(), name, hint)
self.add_message("invalid-name", node=node, args=args, confidence=confidence)
self.stats["badname_" + node_type] += 1
def _check_name(self, node_type, name, node, confidence=interfaces.HIGH):
"""check for a name using the type's regexp"""
def _should_exempt_from_invalid_name(node):
if node_type == "variable":
inferred = utils.safe_infer(node)
if isinstance(inferred, astroid.ClassDef):
return True
return False
if utils.is_inside_except(node):
clobbering, _ = utils.clobber_in_except(node)
if clobbering:
return
if name in self.config.good_names:
return
if name in self.config.bad_names:
self.stats["badname_" + node_type] += 1
self.add_message("blacklisted-name", node=node, args=name)
return
regexp = self._name_regexps[node_type]
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None and not _should_exempt_from_invalid_name(node):
self._raise_name_warning(node, node_type, name, confidence)
def _check_assign_to_new_keyword_violation(self, name, node):
keyword_first_version = self._name_became_keyword_in_version(
name, self.KEYWORD_ONSET
)
if keyword_first_version is not None:
self.add_message(
"assign-to-new-keyword",
node=node,
args=(name, keyword_first_version),
confidence=interfaces.HIGH,
)
@staticmethod
def _name_became_keyword_in_version(name, rules):
for version, keywords in rules.items():
if name in keywords and sys.version_info < version:
return ".".join(map(str, version))
return None
class DocStringChecker(_BasicChecker):
msgs = {
"C0111": (
"Missing %s docstring", # W0131
"missing-docstring",
"Used when a module, function, class or method has no docstring."
"Some special methods like __init__ doesn't necessary require a "
"docstring.",
),
"C0112": (
"Empty %s docstring", # W0132
"empty-docstring",
"Used when a module, function, class or method has an empty "
"docstring (it would be too easy ;).",
),
}
options = (
(
"no-docstring-rgx",
{
"default": NO_REQUIRED_DOC_RGX,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression which should only match "
"function or class names that do not require a "
"docstring.",
},
),
(
"docstring-min-length",
{
"default": -1,
"type": "int",
"metavar": "<int>",
"help": (
"Minimum line length for functions/classes that"
" require docstrings, shorter ones are exempt."
),
},
),
)
def open(self):
self.stats = self.linter.add_stats(
undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0,
)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_module(self, node):
self._check_docstring("module", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_classdef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring("class", node)
@staticmethod
def _is_setter_or_deleter(node):
names = {"setter", "deleter"}
for decorator in node.decorators.nodes:
if isinstance(decorator, astroid.Attribute) and decorator.attrname in names:
return True
return False
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_functiondef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = "method" if node.is_method() else "function"
if node.decorators and self._is_setter_or_deleter(node):
return
if isinstance(node.parent.frame(), astroid.ClassDef):
overridden = False
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and isinstance(
ancestor[node.name], astroid.FunctionDef
):
overridden = True
break
self._check_docstring(
ftype, node, report_missing=not overridden, confidence=confidence
)
elif isinstance(node.parent.frame(), astroid.Module):
self._check_docstring(ftype, node)
else:
return
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(
self, node_type, node, report_missing=True, confidence=interfaces.HIGH
):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
if not report_missing:
return
lines = get_node_last_lineno(node) - node.lineno
if node_type == "module" and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != "module" and max_lines > -1 and lines < max_lines:
return
self.stats["undocumented_" + node_type] += 1
if (
node.body
and isinstance(node.body[0], astroid.Expr)
and isinstance(node.body[0].value, astroid.Call)
):
# Most likely a string with a format call. Let's see.
func = utils.safe_infer(node.body[0].value.func)
if isinstance(func, astroid.BoundMethod) and isinstance(
func.bound, astroid.Instance
):
# Strings in Python 3, others in Python 2.
if PY3K and func.bound.name == "str":
return
if func.bound.name in ("str", "unicode", "bytes"):
return
self.add_message(
"missing-docstring", node=node, args=(node_type,), confidence=confidence
)
elif not docstring.strip():
self.stats["undocumented_" + node_type] += 1
self.add_message(
"empty-docstring", node=node, args=(node_type,), confidence=confidence
)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {
"W0107": (
"Unnecessary pass statement",
"unnecessary-pass",
'Used when a "pass" statement that can be avoided is ' "encountered.",
)
}
@utils.check_messages("unnecessary-pass")
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1 or (
isinstance(node.parent, (astroid.ClassDef, astroid.FunctionDef))
and (node.parent.doc is not None)
):
self.add_message("unnecessary-pass", node=node)
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return isinstance(call, astroid.Call) and len(call.args) == 1 and not call.keywords
class ComparisonChecker(_BasicChecker):
"""Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
"""
msgs = {
"C0121": (
"Comparison to %s should be %s",
"singleton-comparison",
"Used when an expression is compared to singleton "
"values like True, False or None.",
),
"C0122": (
"Comparison should be %s",
"misplaced-comparison-constant",
"Used when the constant is placed on the left side "
"of a comparison. It is usually clearer in intent to "
"place it in the right hand side of the comparison.",
),
"C0123": (
"Using type() instead of isinstance() for a typecheck.",
"unidiomatic-typecheck",
"The idiomatic way to perform an explicit typecheck in "
"Python is to use isinstance(x, Y) rather than "
"type(x) == Y, type(x) is Y. Though there are unusual "
"situations where these give different results.",
{"old_names": [("W0154", "unidiomatic-typecheck")]},
),
"R0123": (
"Comparison to literal",
"literal-comparison",
"Used when comparing an object to a literal, which is usually "
"what you do not want to do, since you can compare to a different "
"literal than what was expected altogether.",
),
"R0124": (
"Redundant comparison - %s",
"comparison-with-itself",
"Used when something is compared against itself.",
),
"W0143": (
"Comparing against a callable, did you omit the parenthesis?",
"comparison-with-callable",
"This message is emitted when pylint detects that a comparison with a "
"callable was made, which might suggest that some parenthesis were omitted, "
"resulting in potential unwanted behaviour.",
),
}
def _check_singleton_comparison(self, singleton, root_node, negative_check=False):
if singleton.value is True:
if not negative_check:
suggestion = "just 'expr' or 'expr is True'"
else:
suggestion = "just 'not expr' or 'expr is False'"
self.add_message(
"singleton-comparison", node=root_node, args=(True, suggestion)
)
elif singleton.value is False:
if not negative_check:
suggestion = "'not expr' or 'expr is False'"
else:
suggestion = "'expr' or 'expr is not False'"
self.add_message(
"singleton-comparison", node=root_node, args=(False, suggestion)
)
elif singleton.value is None:
if not negative_check:
suggestion = "'expr is None'"
else:
suggestion = "'expr is not None'"
self.add_message(
"singleton-comparison", node=root_node, args=(None, suggestion)
)
def _check_literal_comparison(self, literal, node):
"""Check if we compare to a literal, which is usually what we do not want to do."""
nodes = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set)
is_other_literal = isinstance(literal, nodes)
is_const = False
if isinstance(literal, astroid.Const):
if literal.value in (True, False, None):
# Not interested in this values.
return
is_const = isinstance(literal.value, (bytes, str, int, float))
if is_const or is_other_literal:
self.add_message("literal-comparison", node=node)
def _check_misplaced_constant(self, node, left, right, operator):
if isinstance(right, astroid.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = "%s %s %r" % (right.as_string(), operator, left.value)
self.add_message("misplaced-comparison-constant", node=node, args=(suggestion,))
def _check_logical_tautology(self, node):
"""Check if identifier is compared against itself.
:param node: Compare node
:type node: astroid.node_classes.Compare
:Example:
val = 786
if val == val: # [comparison-with-itself]
pass
"""
left_operand = node.left
right_operand = node.ops[0][1]
operator = node.ops[0][0]
if isinstance(left_operand, astroid.Const) and isinstance(
right_operand, astroid.Const
):
left_operand = left_operand.value
right_operand = right_operand.value
elif isinstance(left_operand, astroid.Name) and isinstance(
right_operand, astroid.Name
):
left_operand = left_operand.name
right_operand = right_operand.name
if left_operand == right_operand:
suggestion = "%s %s %s" % (left_operand, operator, right_operand)
self.add_message("comparison-with-itself", node=node, args=(suggestion,))
def _check_callable_comparison(self, node):
operator = node.ops[0][0]
if operator not in COMPARISON_OPERATORS:
return
bare_callables = (astroid.FunctionDef, astroid.BoundMethod)
left_operand, right_operand = node.left, node.ops[0][1]
# this message should be emitted only when there is comparison of bare callable
# with non bare callable.
if (
sum(
1
for operand in (left_operand, right_operand)
if isinstance(utils.safe_infer(operand), bare_callables)
)
== 1
):
self.add_message("comparison-with-callable", node=node)
@utils.check_messages(
"singleton-comparison",
"misplaced-comparison-constant",
"unidiomatic-typecheck",
"literal-comparison",
"comparison-with-itself",
"comparison-with-callable",
)
def visit_compare(self, node):
self._check_callable_comparison(node)
self._check_logical_tautology(node)
self._check_unidiomatic_typecheck(node)
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if operator in COMPARISON_OPERATORS and isinstance(left, astroid.Const):
self._check_misplaced_constant(node, left, right, operator)
if operator == "==":
if isinstance(left, astroid.Const):
self._check_singleton_comparison(left, node)
elif isinstance(right, astroid.Const):
self._check_singleton_comparison(right, node)
if operator == "!=":
if isinstance(right, astroid.Const):
self._check_singleton_comparison(right, node, negative_check=True)
if operator in ("is", "is not"):
self._check_literal_comparison(right, node)
def _check_unidiomatic_typecheck(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter))
|
kczapla/pylint
|
pylint/checkers/base.py
|
Python
|
gpl-2.0
| 84,804
|
[
"VisIt"
] |
63085a7f78dd0502674c676427acb6d881fa3a9fc4cf0383e4eae36a14fe61a7
|
#!/usr/bin/env python
########################################################################
# File : dirac-admin-get-CAs
# Author : Ricardo Graciani
########################################################################
"""Refresh the local copy of the CA certificates and revocation lists.
Connects to the BundleDelivery service to obtain the tar balls. Needed when proxies appear to be
invalid.
Usage:
dirac-admin-get-CAs (<options>|<cfgFile>)*
"""
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.FrameworkSystem.Client.BundleDeliveryClient import BundleDeliveryClient
__RCSID__ = "$Id$"
Script.addDefaultOptionValue('/DIRAC/Security/SkipCAChecks', 'yes')
Script.setUsageMessage(__doc__)
Script.parseCommandLine(ignoreErrors=True)
bdc = BundleDeliveryClient()
result = bdc.syncCAs()
if not result['OK']:
DIRAC.gLogger.error("Error while updating CAs", result['Message'])
DIRAC.exit(1)
elif result['Value']:
DIRAC.gLogger.notice("CAs got updated")
else:
DIRAC.gLogger.notice("CAs are already synchronized")
result = bdc.syncCRLs()
if not result['OK']:
DIRAC.gLogger.error("Error while updating CRLs", result['Message'])
DIRAC.exit(1)
elif result['Value']:
DIRAC.gLogger.notice("CRLs got updated")
else:
DIRAC.gLogger.notice("CRLs are already synchronized")
DIRAC.exit(0)
|
fstagni/DIRAC
|
FrameworkSystem/scripts/dirac-admin-get-CAs.py
|
Python
|
gpl-3.0
| 1,320
|
[
"DIRAC"
] |
1a2e0b6ec4a9e314d464b951401736d8ca9d0b17a6c4f20cb39c3b9c853a1c60
|
########################################################################
# $HeadURL$
########################################################################
""" DIRAC FileCatalog mix-in class to manage directory metadata
"""
__RCSID__ = "$Id$"
import os, types
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.Time import queryTime
class DirectoryMetadata:
def __init__( self, database = None ):
self.db = database
def setDatabase( self, database ):
self.db = database
##############################################################################
#
# Manage Metadata fields
#
##############################################################################
def addMetadataField( self, pname, ptype, credDict ):
""" Add a new metadata parameter to the Metadata Database.
pname - parameter name, ptype - parameter type in the MySQL notation
"""
result = self.db.fmeta.getFileMetadataFields( credDict )
if not result['OK']:
return result
if pname in result['Value'].keys():
return S_ERROR( 'The metadata %s is already defined for Files' % pname )
result = self.getMetadataFields( credDict )
if not result['OK']:
return result
if pname in result['Value'].keys():
if ptype.lower() == result['Value'][pname].lower():
return S_OK( 'Already exists' )
else:
return S_ERROR( 'Attempt to add an existing metadata with different type: %s/%s' %
( ptype, result['Value'][pname] ) )
valueType = ptype
if ptype.lower()[:3] == 'int':
valueType = 'INT'
elif ptype.lower() == 'string':
valueType = 'VARCHAR(128)'
elif ptype.lower() == 'float':
valueType = 'FLOAT'
elif ptype.lower() == 'date':
valueType = 'DATETIME'
elif ptype == "MetaSet":
valueType = "VARCHAR(64)"
req = "CREATE TABLE FC_Meta_%s ( DirID INTEGER NOT NULL, Value %s, PRIMARY KEY (DirID), INDEX (Value) )" \
% ( pname, valueType )
result = self.db._query( req )
if not result['OK']:
return result
result = self.db._insert( 'FC_MetaFields', ['MetaName', 'MetaType'], [pname, ptype] )
if not result['OK']:
return result
metadataID = result['lastRowId']
result = self.__transformMetaParameterToData( pname )
if not result['OK']:
return result
return S_OK( "Added new metadata: %d" % metadataID )
def deleteMetadataField( self, pname, credDict ):
""" Remove metadata field
"""
req = "DROP TABLE FC_Meta_%s" % pname
result = self.db._update( req )
error = ''
if not result['OK']:
error = result["Message"]
req = "DELETE FROM FC_MetaFields WHERE MetaName='%s'" % pname
result = self.db._update( req )
if not result['OK']:
if error:
result["Message"] = error + "; " + result["Message"]
return result
def getMetadataFields( self, credDict ):
""" Get all the defined metadata fields
"""
req = "SELECT MetaName,MetaType FROM FC_MetaFields"
result = self.db._query( req )
if not result['OK']:
return result
metaDict = {}
for row in result['Value']:
metaDict[row[0]] = row[1]
return S_OK( metaDict )
def addMetadataSet( self, metaSetName, metaSetDict, credDict ):
""" Add a new metadata set with the contents from metaSetDict
"""
result = self.getMetadataFields( credDict )
if not result['OK']:
return result
metaTypeDict = result['Value']
# Check the sanity of the metadata set contents
for key in metaSetDict:
if not key in metaTypeDict:
return S_ERROR( 'Unknown key %s' % key )
result = self.db._insert( 'FC_MetaSetNames', ['MetaSetName'], [metaSetName] )
if not result['OK']:
return result
metaSetID = result['lastRowId']
req = "INSERT INTO FC_MetaSets (MetaSetID,MetaKey,MetaValue) VALUES %s"
vList = []
for key, value in metaSetDict.items():
vList.append( "(%d,'%s','%s')" % ( metaSetID, key, str( value ) ) )
vString = ','.join( vList )
result = self.db._update( req % vString )
return result
def getMetadataSet( self, metaSetName, expandFlag, credDict ):
""" Get fully expanded contents of the metadata set
"""
result = self.getMetadataFields( credDict )
if not result['OK']:
return result
metaTypeDict = result['Value']
req = "SELECT S.MetaKey,S.MetaValue FROM FC_MetaSets as S, FC_MetaSetNames as N "
req += "WHERE N.MetaSetName='%s' AND N.MetaSetID=S.MetaSetID" % metaSetName
result = self.db._query( req )
if not result['OK']:
return result
if not result['Value']:
return S_OK( {} )
resultDict = {}
for key, value in result['Value']:
if not key in metaTypeDict:
return S_ERROR( 'Unknown key %s' % key )
if expandFlag:
if metaTypeDict[key] == "MetaSet":
result = self.getMetadataSet( value, expandFlag, credDict )
if not result['OK']:
return result
resultDict.update( result['Value'] )
else:
resultDict[key] = value
else:
resultDict[key] = value
return S_OK( resultDict )
#############################################################################################
#
# Set and get directory metadata
#
#############################################################################################
def setMetadata( self, dpath, metadict, credDict ):
""" Set the value of a given metadata field for the the given directory path
"""
result = self.getMetadataFields( credDict )
if not result['OK']:
return result
metaFields = result['Value']
result = self.db.dtree.findDir( dpath )
if not result['OK']:
return result
if not result['Value']:
return S_ERROR( 'Path not found: %s' % dpath )
dirID = result['Value']
dirmeta = self.getDirectoryMetadata( dpath, credDict, owndata = False )
if not dirmeta['OK']:
return dirmeta
for metaName, metaValue in metadict.items():
if not metaName in metaFields:
result = self.setMetaParameter( dpath, metaName, metaValue, credDict )
if not result['OK']:
return result
continue
# Check that the metadata is not defined for the parent directories
if metaName in dirmeta['Value']:
return S_ERROR( 'Metadata conflict detected for %s for directory %s' % ( metaName, dpath ) )
result = self.db._insert( 'FC_Meta_%s' % metaName, ['DirID', 'Value'], [dirID, metaValue] )
if not result['OK']:
if result['Message'].find( 'Duplicate' ) != -1:
req = "UPDATE FC_Meta_%s SET Value='%s' WHERE DirID=%d" % ( metaName, metaValue, dirID )
result = self.db._update( req )
if not result['OK']:
return result
else:
return result
return S_OK()
def removeMetadata( self, dpath, metadata, credDict ):
""" Remove the specified metadata for the given directory
"""
result = self.getMetadataFields( credDict )
if not result['OK']:
return result
metaFields = result['Value']
result = self.db.dtree.findDir( dpath )
if not result['OK']:
return result
if not result['Value']:
return S_ERROR( 'Path not found: %s' % dpath )
dirID = result['Value']
failedMeta = {}
for meta in metadata:
if meta in metaFields:
# Indexed meta case
req = "DELETE FROM FC_Meta_%s WHERE DirID=%d" % ( meta, dirID )
result = self.db._update( req )
if not result['OK']:
failedMeta[meta] = result['Value']
else:
# Meta parameter case
req = "DELETE FROM FC_DirMeta WHERE MetaKey='%s' AND DirID=%d" % ( meta, dirID )
result = self.db._update( req )
if not result['OK']:
failedMeta[meta] = result['Value']
if failedMeta:
metaExample = failedMeta.keys()[0]
result = S_ERROR( 'Failed to remove %d metadata, e.g. %s' % ( len( failedMeta ), failedMeta[metaExample] ) )
result['FailedMetadata'] = failedMeta
else:
return S_OK()
def setMetaParameter( self, dpath, metaName, metaValue, credDict ):
""" Set an meta parameter - metadata which is not used in the the data
search operations
"""
result = self.db.dtree.findDir( dpath )
if not result['OK']:
return result
if not result['Value']:
return S_ERROR( 'Path not found: %s' % dpath )
dirID = result['Value']
result = self.db._insert( 'FC_DirMeta',
['DirID', 'MetaKey', 'MetaValue'],
[dirID, metaName, str( metaValue )] )
return result
def getDirectoryMetaParameters( self, dpath, credDict, inherited = True, owndata = True ):
""" Get meta parameters for the given directory
"""
if inherited:
result = self.db.dtree.getPathIDs( dpath )
if not result['OK']:
return result
pathIDs = result['Value']
dirID = pathIDs[-1]
else:
result = self.db.dtree.findDir( dpath )
if not result['OK']:
return result
if not result['Value']:
return S_ERROR( 'Path not found: %s' % dpath )
dirID = result['Value']
pathIDs = [dirID]
if len( pathIDs ) > 1:
pathString = ','.join( [ str( x ) for x in pathIDs ] )
req = "SELECT DirID,MetaKey,MetaValue from FC_DirMeta where DirID in (%s)" % pathString
else:
req = "SELECT DirID,MetaKey,MetaValue from FC_DirMeta where DirID=%d " % dirID
result = self.db._query( req )
if not result['OK']:
return result
if not result['Value']:
return S_OK( {} )
metaDict = {}
for _dID, key, value in result['Value']:
if metaDict.has_key( key ):
if type( metaDict[key] ) == types.ListType:
metaDict[key].append( value )
else:
metaDict[key] = [metaDict[key]].append( value )
else:
metaDict[key] = value
return S_OK( metaDict )
def getDirectoryMetadata( self, path, credDict, inherited = True, owndata = True ):
""" Get metadata for the given directory aggregating metadata for the directory itself
and for all the parent directories if inherited flag is True. Get also the non-indexed
metadata parameters.
"""
result = self.db.dtree.getPathIDs( path )
if not result['OK']:
return result
pathIDs = result['Value']
result = self.getMetadataFields( credDict )
if not result['OK']:
return result
metaFields = result['Value']
metaDict = {}
metaOwnerDict = {}
metaTypeDict = {}
dirID = pathIDs[-1]
if not inherited:
pathIDs = pathIDs[-1:]
if not owndata:
pathIDs = pathIDs[:-1]
pathString = ','.join( [ str( x ) for x in pathIDs ] )
for meta in metaFields:
req = "SELECT Value,DirID FROM FC_Meta_%s WHERE DirID in (%s)" % ( meta, pathString )
result = self.db._query( req )
if not result['OK']:
return result
if len( result['Value'] ) > 1:
return S_ERROR( 'Metadata conflict for %s for directory %s' % (meta, path) )
if result['Value']:
metaDict[meta] = result['Value'][0][0]
if int( result['Value'][0][1] ) == dirID:
metaOwnerDict[meta] = 'OwnMetadata'
else:
metaOwnerDict[meta] = 'ParentMetadata'
metaTypeDict[meta] = metaFields[meta]
# Get also non-searchable data
result = self.getDirectoryMetaParameters( path, credDict, inherited, owndata )
if result['OK']:
metaDict.update( result['Value'] )
for meta in result['Value']:
metaOwnerDict[meta] = 'OwnParameter'
result = S_OK( metaDict )
result['MetadataOwner'] = metaOwnerDict
result['MetadataType'] = metaTypeDict
return result
def __transformMetaParameterToData( self, metaname ):
""" Relocate the meta parameters of all the directories to the corresponding
indexed metadata table
"""
req = "SELECT DirID,MetaValue from FC_DirMeta WHERE MetaKey='%s'" % metaname
result = self.db._query( req )
if not result['OK']:
return result
if not result['Value']:
return S_OK()
dirDict = {}
for dirID, meta in result['Value']:
dirDict[dirID] = meta
dirList = dirDict.keys()
# Exclude child directories from the list
for dirID in dirList:
result = self.db.dtree.getSubdirectoriesByID( dirID )
if not result['OK']:
return result
if not result['Value']:
continue
childIDs = result['Value'].keys()
for childID in childIDs:
if childID in dirList:
del dirList[dirList.index( childID )]
insertValueList = []
for dirID in dirList:
insertValueList.append( "( %d,'%s' )" % ( dirID, dirDict[dirID] ) )
req = "INSERT INTO FC_Meta_%s (DirID,Value) VALUES %s" % ( metaname, ', '.join( insertValueList ) )
result = self.db._update( req )
if not result['OK']:
return result
req = "DELETE FROM FC_DirMeta WHERE MetaKey='%s'" % metaname
result = self.db._update( req )
return result
############################################################################################
#
# Find directories corresponding to the metadata
#
############################################################################################
def __createMetaSelection( self, meta, value, table = '' ):
if type( value ) == types.DictType:
selectList = []
for operation, operand in value.items():
if operation in ['>', '<', '>=', '<=']:
if type( operand ) == types.ListType:
return S_ERROR( 'Illegal query: list of values for comparison operation' )
if type( operand ) in [types.IntType, types.LongType]:
selectList.append( "%sValue%s%d" % ( table, operation, operand ) )
elif type( operand ) == types.FloatType:
selectList.append( "%sValue%s%f" % ( table, operation, operand ) )
else:
selectList.append( "%sValue%s'%s'" % ( table, operation, operand ) )
elif operation == 'in' or operation == "=":
if type( operand ) == types.ListType:
vString = ','.join( [ "'" + str( x ) + "'" for x in operand] )
selectList.append( "%sValue IN (%s)" % ( table, vString ) )
else:
selectList.append( "%sValue='%s'" % ( table, operand ) )
elif operation == 'nin' or operation == "!=":
if type( operand ) == types.ListType:
vString = ','.join( [ "'" + str( x ) + "'" for x in operand] )
selectList.append( "%sValue NOT IN (%s)" % ( table, vString ) )
else:
selectList.append( "%sValue!='%s'" % ( table, operand ) )
selectString = ' AND '.join( selectList )
elif type( value ) == types.ListType:
vString = ','.join( [ "'" + str( x ) + "'" for x in value] )
selectString = "%sValue in (%s)" % ( table, vString )
else:
if value == "Any":
selectString = ''
else:
selectString = "%sValue='%s' " % ( table, value )
return S_OK( selectString )
def __findSubdirByMeta( self, meta, value, pathSelection = '', subdirFlag = True ):
""" Find directories for the given meta datum. If the the meta datum type is a list,
combine values in OR. In case the meta datum is 'Any', finds all the subdirectories
for which the meta datum is defined at all.
"""
result = self.__createMetaSelection( meta, value, "M." )
if not result['OK']:
return result
selectString = result['Value']
req = " SELECT M.DirID FROM FC_Meta_%s AS M" % meta
if pathSelection:
req += " JOIN ( %s ) AS P WHERE M.DirID=P.DirID" % pathSelection
if selectString:
if pathSelection:
req += " AND %s" % selectString
else:
req += " WHERE %s" % selectString
result = self.db._query( req )
if not result['OK']:
return result
if not result['Value']:
return S_OK( [] )
dirList = []
for row in result['Value']:
dirID = row[0]
dirList.append( dirID )
#if subdirFlag:
# result = self.db.dtree.getSubdirectoriesByID( dirID )
# if not result['OK']:
# return result
# dirList += result['Value']
if subdirFlag:
result = self.db.dtree.getAllSubdirectoriesByID( dirList )
if not result['OK']:
return result
dirList += result['Value']
return S_OK( dirList )
def __findSubdirMissingMeta( self, meta, pathSelection ):
""" Find directories not having the given meta datum defined
"""
result = self.__findSubdirByMeta( meta, 'Any', pathSelection )
if not result['OK']:
return result
dirList = result['Value']
table = self.db.dtree.getTreeTable()
dirString = ','.join( [ str( x ) for x in dirList ] )
if dirList:
req = 'SELECT DirID FROM %s WHERE DirID NOT IN ( %s )' % ( table, dirString )
else:
req = 'SELECT DirID FROM %s' % table
result = self.db._query( req )
if not result['OK']:
return result
if not result['Value']:
return S_OK( [] )
dirList = [ x[0] for x in result['Value'] ]
return S_OK( dirList )
def __expandMetaDictionary( self, metaDict, credDict ):
""" Expand the dictionary with metadata query
"""
result = self.getMetadataFields( credDict )
if not result['OK']:
return result
metaTypeDict = result['Value']
resultDict = {}
extraDict = {}
for key, value in metaDict.items():
if not key in metaTypeDict:
#return S_ERROR( 'Unknown metadata field %s' % key )
extraDict[key] = value
continue
keyType = metaTypeDict[key]
if keyType != "MetaSet":
resultDict[key] = value
else:
result = self.getMetadataSet( value, True, credDict )
if not result['OK']:
return result
mDict = result['Value']
for mk, mv in mDict.items():
if mk in resultDict:
return S_ERROR( 'Contradictory query for key %s' % mk )
else:
resultDict[mk] = mv
result = S_OK( resultDict )
result['ExtraMetadata'] = extraDict
return result
def __checkDirsForMetadata( self, meta, value, pathString ):
""" Check if any of the given directories conform to the given metadata
"""
result = self.__createMetaSelection( meta, value, "M." )
if not result['OK']:
return result
selectString = result['Value']
if selectString:
req = "SELECT M.DirID FROM FC_Meta_%s AS M WHERE %s AND M.DirID IN (%s)" % ( meta, selectString, pathString )
else:
req = "SELECT M.DirID FROM FC_Meta_%s AS M WHERE M.DirID IN (%s)" % ( meta, pathString )
result = self.db._query( req )
if not result['OK']:
return result
elif not result['Value']:
return S_OK( None )
elif len( result['Value'] ) > 1:
return S_ERROR( 'Conflict in the directory metadata hierarchy' )
else:
return S_OK( result['Value'][0][0] )
@queryTime
def findDirIDsByMetadata( self, queryDict, path, credDict ):
""" Find Directories satisfying the given metadata and being subdirectories of
the given path
"""
pathDirList = []
pathDirID = 0
pathString = '0'
if path != '/':
result = self.db.dtree.getPathIDs( path )
if not result['OK']:
#as result[Value] is already checked in getPathIDs
return result
pathIDs = result['Value']
pathDirID = pathIDs[-1]
pathString = ','.join( [ str( x ) for x in pathIDs ] )
result = self.__expandMetaDictionary( queryDict, credDict )
if not result['OK']:
return result
metaDict = result['Value']
# Now check the meta data for the requested directory and its parents
finalMetaDict = dict( metaDict )
for meta in metaDict.keys():
result = self.__checkDirsForMetadata( meta, metaDict[meta], pathString )
if not result['OK']:
return result
elif result['Value'] is not None:
# Some directory in the parent hierarchy is already conforming with the
# given metadata, no need to check it further
del finalMetaDict[meta]
if finalMetaDict:
pathSelection = ''
if pathDirID:
result = self.db.dtree.getSubdirectoriesByID( pathDirID, includeParent = True, requestString = True )
if not result['OK']:
return result
pathSelection = result['Value']
dirList = []
first = True
for meta, value in finalMetaDict.items():
if value == "Missing":
result = self.__findSubdirMissingMeta( meta, pathSelection )
else:
result = self.__findSubdirByMeta( meta, value, pathSelection )
if not result['OK']:
return result
mList = result['Value']
if first:
dirList = mList
first = False
else:
newList = []
for d in dirList:
if d in mList:
newList.append( d )
dirList = newList
else:
if pathDirID:
result = self.db.dtree.getSubdirectoriesByID( pathDirID, includeParent = True )
if not result['OK']:
return result
pathDirList = result['Value'].keys()
finalList = []
dirSelect = False
if finalMetaDict:
dirSelect = True
finalList = dirList
if pathDirList:
finalList = list( set( dirList ) & set( pathDirList ) )
else:
if pathDirList:
dirSelect = True
finalList = pathDirList
result = S_OK( finalList )
if finalList:
result['Selection'] = 'Done'
elif dirSelect:
result['Selection'] = 'None'
else:
result['Selection'] = 'All'
return result
@queryTime
def findDirectoriesByMetadata( self, queryDict, path, credDict ):
""" Find Directory names satisfying the given metadata and being subdirectories of
the given path
"""
result = self.findDirIDsByMetadata( queryDict, path, credDict )
if not result['OK']:
return result
dirIDList = result['Value']
dirNameDict = {}
if dirIDList:
result = self.db.dtree.getDirectoryPaths( dirIDList )
if not result['OK']:
return result
dirNameDict = result['Value']
elif result['Selection'] == 'None':
dirNameDict = { 0:"None" }
elif result['Selection'] == 'All':
dirNameDict = { 0:"All" }
return S_OK( dirNameDict )
def findFilesByMetadata( self, metaDict, path, credDict ):
""" Find Files satisfying the given metadata
"""
result = self.findDirectoriesByMetadata( metaDict, path, credDict )
if not result['OK']:
return result
dirDict = result['Value']
dirList = dirDict.keys()
fileList = []
result = self.db.dtree.getFilesInDirectory( dirList, credDict )
if not result['OK']:
return result
for _fileID, dirID, fname in result['Value']:
fileList.append( dirDict[dirID] + '/' + os.path.basename( fname ) )
return S_OK( fileList )
def findFileIDsByMetadata( self, metaDict, path, credDict, startItem = 0, maxItems = 25 ):
""" Find Files satisfying the given metadata
"""
result = self.findDirIDsByMetadata( metaDict, path, credDict )
if not result['OK']:
return result
dirList = result['Value']
return self.db.dtree.getFileIDsInDirectoryWithLimits( dirList, credDict, startItem, maxItems )
################################################################################################
#
# Find metadata compatible with other metadata in order to organize dynamically updated
# metadata selectors
#
################################################################################################
def __findCompatibleDirectories( self, meta, value, fromDirs ):
""" Find directories compatible with the given meta datum.
Optionally limit the list of compatible directories to only those in the
fromDirs list
"""
# The directories compatible with the given meta datum are:
# - directory for which the datum is defined
# - all the subdirectories of the above directory
# - all the directories in the parent hierarchy of the above directory
# Find directories defining the meta datum and their subdirectories
result = self.__findSubdirByMeta( meta, value, subdirFlag = False )
if not result['OK']:
return result
selectedDirs = result['Value']
if not selectedDirs:
return S_OK( [] )
result = self.db.dtree.getAllSubdirectoriesByID( selectedDirs )
if not result['OK']:
return result
subDirs = result['Value']
# Find parent directories of the directories defining the meta datum
parentDirs = []
for psub in selectedDirs:
result = self.db.dtree.getPathIDsByID( psub )
if not result['OK']:
return result
parentDirs += result['Value']
# Constrain the output to only those that are present in the input list
resDirs = parentDirs + subDirs + selectedDirs
if fromDirs:
resDirs = list( set( resDirs ) & set( fromDirs ) )
return S_OK( resDirs )
def __findDistinctMetadata( self, metaList, dList ):
""" Find distinct metadata values defined for the list of the input directories.
Limit the search for only metadata in the input list
"""
if dList:
dString = ','.join( [ str( x ) for x in dList ] )
else:
dString = None
metaDict = {}
for meta in metaList:
req = "SELECT DISTINCT(Value) FROM FC_Meta_%s" % meta
if dString:
req += " WHERE DirID in (%s)" % dString
result = self.db._query( req )
if not result['OK']:
return result
if result['Value']:
metaDict[meta] = []
for row in result['Value']:
metaDict[meta].append( row[0] )
return S_OK( metaDict )
def getCompatibleMetadata( self, queryDict, path, credDict ):
""" Get distinct metadata values compatible with the given already defined metadata
"""
pathDirID = 0
if path != '/':
result = self.db.dtree.findDir( path )
if not result['OK']:
return result
if not result['Value']:
return S_ERROR( 'Path not found: %s' % path )
pathDirID = int( result['Value'] )
pathDirs = []
if pathDirID:
result = self.db.dtree.getSubdirectoriesByID( pathDirID, includeParent = True )
if not result['OK']:
return result
if result['Value']:
pathDirs = result['Value'].keys()
result = self.db.dtree.getPathIDsByID( pathDirID )
if not result['OK']:
return result
if result['Value']:
pathDirs += result['Value']
# Get the list of metadata fields to inspect
result = self.getMetadataFields( credDict )
if not result['OK']:
return result
metaFields = result['Value']
comFields = metaFields.keys()
# Commented out to return compatible data also for selection metadata
#for m in metaDict:
# if m in comFields:
# del comFields[comFields.index( m )]
result = self.__expandMetaDictionary( queryDict, credDict )
if not result['OK']:
return result
metaDict = result['Value']
fromList = pathDirs
anyMeta = True
if metaDict:
anyMeta = False
for meta, value in metaDict.items():
result = self.__findCompatibleDirectories( meta, value, fromList )
if not result['OK']:
return result
cdirList = result['Value']
if cdirList:
fromList = cdirList
else:
fromList = []
break
if anyMeta or fromList:
result = self.__findDistinctMetadata( comFields, fromList )
else:
result = S_OK( {} )
return result
def removeMetadataForDirectory( self, dirList, credDict ):
""" Remove all the metadata for the given directory list
"""
failed = {}
successful = {}
dirs = dirList
if type( dirList ) != types.ListType:
dirs = [dirList]
dirListString = ','.join( [ str( d ) for d in dirs ] )
# Get the list of metadata fields to inspect
result = self.getMetadataFields( credDict )
if not result['OK']:
return result
metaFields = result['Value']
for meta in metaFields:
req = "DELETE FROM FC_Meta_%s WHERE DirID in ( %s )" % ( meta, dirListString )
result = self.db._query( req )
if not result['OK']:
failed[meta] = result['Message']
else:
successful[meta] = 'OK'
return S_OK( {'Successful':successful, 'Failed':failed} )
|
Andrew-McNab-UK/DIRAC
|
DataManagementSystem/DB/FileCatalogComponents/DirectoryMetadata.py
|
Python
|
gpl-3.0
| 28,658
|
[
"DIRAC"
] |
d994edf9d56442b6d530ebd1ed94346755e25b842c2a59eda44c0d2aeb44bac1
|
"""
Extract a 20 by 20 km grid based on x-y coordinates from a netCDF file containing data on the xgeo-grid.
"""
# Input
INFILE = r"/mnt/grid/metdata/config/xgeo_dem.nc" # "/mnt/grid/metdata/prognosis/meps/det/archive/2022/meps_det_1km_20220207T00Z.nc"
VAR = "xgeo_dem_2" # "air_temperature_2m"
IX = 9 # number between 0 and 59 ------IX=10 and IY=15 is Hemsedal
IY = 15 # number between 0 and 77
TIME_START = 24 # usually the 24h minus model run time e.g., 24-6=18 for the 06-run.
TIME_STOP = TIME_START+23
# Config
X_LEFT = -75000.0
X_RIGHT = 1119000.0
Y_BOTTOM = 6450000.0
Y_TOP = 7999000.0
CELL_SIZE = 1000.0
NX = 1195
NY = 1550
X_START, Y_START = 15, 5
XY_STEP = 20
XY_OFFSET = 19
#TODO: Currently both commands create 21x21 pixel grids, should be 20x20
# ncks using coordinates
print("\nncks using coordinates:")
xl = X_LEFT+X_START*CELL_SIZE+CELL_SIZE*XY_STEP*IX
xr = xl+CELL_SIZE*XY_OFFSET
yb = Y_BOTTOM+Y_START*CELL_SIZE+CELL_SIZE*XY_STEP*IY
yt = yb+CELL_SIZE*XY_OFFSET
ncks_cmd_coord = "ncks -v {var} -d time,{t0},{t1} -d x,{xl:0.1f},{xr:0.1f} -d y,{yb:0.1f},{yt:0.1f} {infile} {var}_extr{ix:02}{iy:02}.nc".format(
var=VAR, xl=xl, xr=xr, yb=yb, yt=yt, ix=IX, iy=IY, t0=TIME_START, t1=TIME_STOP,
infile=INFILE
)
print(ncks_cmd_coord)
# ncks using indicies
print("\nncks using indicies:")
xl = X_START+XY_STEP*IX
xr = xl+XY_OFFSET
yb = Y_START+XY_STEP*IY
yt = yb+XY_OFFSET
ncks_cmd_coord = "ncks -v {var} -d time,{t0},{t1} -d x,{xl},{xr} -d y,{yb},{yt} {infile} {var}_extr{ix:02}{iy:02}i.nc".format(
var=VAR, xl=xl, xr=xr, yb=yb, yt=yt, ix=IX, iy=IY, t0=TIME_START, t1=TIME_STOP,
infile=INFILE
)
print(ncks_cmd_coord)
"""
ncks can also be run with a selection of cells/points by repeating the "-d" option e.g.:
ncks -v air_temperature_2m -d time,24,48 -d x,245 -d y,300 -d x,260 -d y,320 -d x,300 -d y,400 /mnt/grid/metdata/prognosis/meps/det/archive/2022/meps_det_1km_20220207T00Z.nc air_temperature_2m_cells.nc
Note: if x, or y are given without decimal they indicate an index, using decimals will indicate coordinates.
"""
|
kmunve/APS
|
aps/data/met_obs_grid/ncks_command_miniregion.py
|
Python
|
mit
| 2,062
|
[
"NetCDF"
] |
d5daeba0f660dfc93dbcf62db08a61e11b7983e2e0912feda06bbf6c1620ba08
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Jitter Explorer
===============
Application to explore orientation angle and angular dispersity.
From the command line::
# Show docs
python -m sasmodels.jitter --help
# Guyou projection jitter, uniform over 20 degree theta and 10 in phi
python -m sasmodels.jitter --projection=guyou --dist=uniform --jitter=20,10,0
From a jupyter cell::
import ipyvolume as ipv
from sasmodels import jitter
import importlib; importlib.reload(jitter)
jitter.set_plotter("ipv")
size = (10, 40, 100)
view = (20, 0, 0)
#size = (15, 15, 100)
#view = (60, 60, 0)
dview = (0, 0, 0)
#dview = (5, 5, 0)
#dview = (15, 180, 0)
#dview = (180, 15, 0)
projection = 'equirectangular'
#projection = 'azimuthal_equidistance'
#projection = 'guyou'
#projection = 'sinusoidal'
#projection = 'azimuthal_equal_area'
dist = 'uniform'
#dist = 'gaussian'
jitter.run(size=size, view=view, jitter=dview, dist=dist, projection=projection)
#filename = projection+('_theta' if dview[0] == 180 else '_phi' if dview[1] == 180 else '')
#ipv.savefig(filename+'.png')
"""
from __future__ import division, print_function
import argparse
import numpy as np
from numpy import pi, cos, sin, sqrt, exp, log, degrees, radians, arccos, arctan2
# Too many complaints about variable names from pylint:
# a, b, c, u, v, x, y, z, dx, dy, dz, px, py, pz, R, Rx, Ry, Rz, ...
# pylint: disable=invalid-name
def draw_beam(axes, view=(0, 0), alpha=0.5, steps=25):
"""
Draw the beam going from source at (0, 0, 1) to detector at (0, 0, -1)
"""
#axes.plot([0,0],[0,0],[1,-1])
#axes.scatter([0]*100,[0]*100,np.linspace(1, -1, 100), alpha=alpha)
u = np.linspace(0, 2 * pi, steps)
v = np.linspace(-1, 1, 2)
r = 0.02
x = r*np.outer(cos(u), np.ones_like(v))
y = r*np.outer(sin(u), np.ones_like(v))
z = 1.3*np.outer(np.ones_like(u), v)
theta, phi = view
shape = x.shape
points = np.matrix([x.flatten(), y.flatten(), z.flatten()])
points = Rz(phi)*Ry(theta)*points
x, y, z = [v.reshape(shape) for v in points]
axes.plot_surface(x, y, z, color='yellow', alpha=alpha)
# TODO: draw endcaps on beam
## Drawing tiny balls on the end will work
#draw_sphere(axes, radius=0.02, center=(0, 0, 1.3), color='yellow', alpha=alpha)
#draw_sphere(axes, radius=0.02, center=(0, 0, -1.3), color='yellow', alpha=alpha)
## The following does not work
#triangles = [(0, i+1, i+2) for i in range(steps-2)]
#x_cap, y_cap = x[:, 0], y[:, 0]
#for z_cap in z[:, 0], z[:, -1]:
# axes.plot_trisurf(x_cap, y_cap, z_cap, triangles,
# color='yellow', alpha=alpha)
def draw_ellipsoid(axes, size, view, jitter, steps=25, alpha=1):
"""Draw an ellipsoid."""
a, b, c = size
u = np.linspace(0, 2 * pi, steps)
v = np.linspace(0, pi, steps)
x = a*np.outer(cos(u), sin(v))
y = b*np.outer(sin(u), sin(v))
z = c*np.outer(np.ones_like(u), cos(v))
x, y, z = transform_xyz(view, jitter, x, y, z)
axes.plot_surface(x, y, z, color='w', alpha=alpha)
draw_labels(axes, view, jitter, [
('c+', [+0, +0, +c], [+1, +0, +0]),
('c-', [+0, +0, -c], [+0, +0, -1]),
('a+', [+a, +0, +0], [+0, +0, +1]),
('a-', [-a, +0, +0], [+0, +0, -1]),
('b+', [+0, +b, +0], [-1, +0, +0]),
('b-', [+0, -b, +0], [-1, +0, +0]),
])
def draw_sc(axes, size, view, jitter, steps=None, alpha=1):
"""Draw points for simple cubic paracrystal"""
# pylint: disable=unused-argument
atoms = _build_sc()
_draw_crystal(axes, size, view, jitter, atoms=atoms)
def draw_fcc(axes, size, view, jitter, steps=None, alpha=1):
"""Draw points for face-centered cubic paracrystal"""
# pylint: disable=unused-argument
# Build the simple cubic crystal
atoms = _build_sc()
# Define the centers for each face
# x planes at -1, 0, 1 have four centers per plane, at +/- 0.5 in y and z
x, y, z = (
[-1]*4 + [0]*4 + [1]*4,
([-0.5]*2 + [0.5]*2)*3,
[-0.5, 0.5]*12,
)
# y and z planes can be generated by substituting x for y and z respectively
atoms.extend(zip(x+y+z, y+z+x, z+x+y))
_draw_crystal(axes, size, view, jitter, atoms=atoms)
def draw_bcc(axes, size, view, jitter, steps=None, alpha=1):
"""Draw points for body-centered cubic paracrystal"""
# pylint: disable=unused-argument
# Build the simple cubic crystal
atoms = _build_sc()
# Define the centers for each octant
# x plane at +/- 0.5 have four centers per plane at +/- 0.5 in y and z
x, y, z = (
[-0.5]*4 + [0.5]*4,
([-0.5]*2 + [0.5]*2)*2,
[-0.5, 0.5]*8,
)
atoms.extend(zip(x, y, z))
_draw_crystal(axes, size, view, jitter, atoms=atoms)
def _draw_crystal(axes, size, view, jitter, atoms=None):
atoms, size = np.asarray(atoms, 'd').T, np.asarray(size, 'd')
x, y, z = atoms*size[:, None]
x, y, z = transform_xyz(view, jitter, x, y, z)
axes.scatter([x[0]], [y[0]], [z[0]], c='yellow', marker='o')
axes.scatter(x[1:], y[1:], z[1:], c='r', marker='o')
def _build_sc():
# three planes of 9 dots for x at -1, 0 and 1
x, y, z = (
[-1]*9 + [0]*9 + [1]*9,
([-1]*3 + [0]*3 + [1]*3)*3,
[-1, 0, 1]*9,
)
atoms = list(zip(x, y, z))
#print(list(enumerate(atoms)))
# Pull the dot at (0, 0, 1) to the front of the list
# It will be highlighted in the view
index = 14
highlight = atoms[index]
del atoms[index]
atoms.insert(0, highlight)
return atoms
def draw_box(axes, size, view):
"""Draw a wireframe box at a particular view."""
a, b, c = size
x = a*np.array([+1, -1, +1, -1, +1, -1, +1, -1])
y = b*np.array([+1, +1, -1, -1, +1, +1, -1, -1])
z = c*np.array([+1, +1, +1, +1, -1, -1, -1, -1])
x, y, z = transform_xyz(view, None, x, y, z)
def _draw(i, j):
axes.plot([x[i], x[j]], [y[i], y[j]], [z[i], z[j]], color='black')
_draw(0, 1)
_draw(0, 2)
_draw(0, 3)
_draw(7, 4)
_draw(7, 5)
_draw(7, 6)
def draw_parallelepiped(axes, size, view, jitter, steps=None,
color=(0.6, 1.0, 0.6), alpha=1):
"""Draw a parallelepiped surface, with view and jitter."""
# pylint: disable=unused-argument
a, b, c = size
x = a*np.array([+1, -1, +1, -1, +1, -1, +1, -1])
y = b*np.array([+1, +1, -1, -1, +1, +1, -1, -1])
z = c*np.array([+1, +1, +1, +1, -1, -1, -1, -1])
tri = np.array([
# counter clockwise triangles
# z: up/down, x: right/left, y: front/back
[0, 1, 2], [3, 2, 1], # top face
[6, 5, 4], [5, 6, 7], # bottom face
[0, 2, 6], [6, 4, 0], # right face
[1, 5, 7], [7, 3, 1], # left face
[2, 3, 6], [7, 6, 3], # front face
[4, 1, 0], [5, 1, 4], # back face
])
x, y, z = transform_xyz(view, jitter, x, y, z)
axes.plot_trisurf(x, y, triangles=tri, Z=z, color=color, alpha=alpha,
linewidth=0)
# Colour the c+ face of the box.
# Since I can't control face color, instead draw a thin box situated just
# in front of the "c+" face. Use the c face so that rotations about psi
# rotate that face.
if 0: # pylint: disable=using-constant-test
color = (1, 0.6, 0.6) # pink
x = a*np.array([+1, -1, +1, -1, +1, -1, +1, -1])
y = b*np.array([+1, +1, -1, -1, +1, +1, -1, -1])
z = c*np.array([+1, +1, +1, +1, -1, -1, -1, -1])
x, y, z = transform_xyz(view, jitter, x, y, abs(z)+0.001)
axes.plot_trisurf(x, y, triangles=tri, Z=z, color=color, alpha=alpha)
draw_labels(axes, view, jitter, [
('c+', [+0, +0, +c], [+1, +0, +0]),
('c-', [+0, +0, -c], [+0, +0, -1]),
('a+', [+a, +0, +0], [+0, +0, +1]),
('a-', [-a, +0, +0], [+0, +0, -1]),
('b+', [+0, +b, +0], [-1, +0, +0]),
('b-', [+0, -b, +0], [-1, +0, +0]),
])
def draw_sphere(axes, radius=1.0, steps=25,
center=(0, 0, 0), color='w', alpha=1.):
"""Draw a sphere"""
u = np.linspace(0, 2 * pi, steps)
v = np.linspace(0, pi, steps)
x = radius * np.outer(cos(u), sin(v)) + center[0]
y = radius * np.outer(sin(u), sin(v)) + center[1]
z = radius * np.outer(np.ones(np.size(u)), cos(v)) + center[2]
axes.plot_surface(x, y, z, color=color, alpha=alpha)
#axes.plot_wireframe(x, y, z)
def draw_axes(axes, origin=(-1, -1, -1), length=(2, 2, 2)):
"""Draw wireframe axes lines, with given origin and length"""
x, y, z = origin
dx, dy, dz = length
axes.plot([x, x+dx], [y, y], [z, z], color='black')
axes.plot([x, x], [y, y+dy], [z, z], color='black')
axes.plot([x, x], [y, y], [z, z+dz], color='black')
def draw_person_on_sphere(axes, view, height=0.5, radius=1.0):
"""
Draw a person on the surface of a sphere.
*view* indicates (latitude, longitude, orientation)
"""
limb_offset = height * 0.05
head_radius = height * 0.10
head_height = height - head_radius
neck_length = head_radius * 0.50
shoulder_height = height - 2*head_radius - neck_length
torso_length = shoulder_height * 0.55
torso_radius = torso_length * 0.30
leg_length = shoulder_height - torso_length
arm_length = torso_length * 0.90
def _draw_part(y, z):
x = np.zeros_like(y)
xp, yp, zp = transform_xyz(view, None, x, y, z + radius)
axes.plot(xp, yp, zp, color='k')
# circle for head
u = np.linspace(0, 2 * pi, 40)
y = head_radius * cos(u)
z = head_radius * sin(u) + head_height
_draw_part(y, z)
# rectangle for body
y = np.array([-torso_radius, torso_radius, torso_radius, -torso_radius, -torso_radius])
z = np.array([0., 0, torso_length, torso_length, 0]) + leg_length
_draw_part(y, z)
# arms
y = np.array([-torso_radius - limb_offset, -torso_radius - limb_offset, -torso_radius])
z = np.array([shoulder_height - arm_length, shoulder_height, shoulder_height])
_draw_part(y, z)
_draw_part(-y, z) # pylint: disable=invalid-unary-operand-type
# legs
y = np.array([-torso_radius + limb_offset, -torso_radius + limb_offset])
z = np.array([0, leg_length])
_draw_part(y, z)
_draw_part(-y, z) # pylint: disable=invalid-unary-operand-type
limits = [-radius-height, radius+height]
axes.set_xlim(limits)
axes.set_ylim(limits)
axes.set_zlim(limits)
axes.set_axis_off()
def draw_jitter(axes, view, jitter, dist='gaussian',
size=(0.1, 0.4, 1.0),
draw_shape=draw_parallelepiped,
projection='equirectangular',
alpha=0.8,
views=None):
"""
Represent jitter as a set of shapes at different orientations.
"""
project, project_weight = get_projection(projection)
# set max diagonal to 0.95
scale = 0.95/sqrt(sum(v**2 for v in size))
size = tuple(scale*v for v in size)
dtheta, dphi, dpsi = jitter
base = {'gaussian':3, 'rectangle':sqrt(3), 'uniform':1}[dist]
def _steps(delta):
if views is None:
n = max(3, min(25, 2*int(base*delta/5)))
else:
n = views
return base*delta*np.linspace(-1, 1, n) if delta > 0 else [0.]
for theta in _steps(dtheta):
for phi in _steps(dphi):
for psi in _steps(dpsi):
w = project_weight(theta, phi, 1.0, 1.0)
if w > 0:
dview = project(theta, phi, psi)
draw_shape(axes, size, view, dview, alpha=alpha)
for v in 'xyz':
a, b, c = size
lim = sqrt(a**2 + b**2 + c**2)
getattr(axes, 'set_'+v+'lim')([-lim, lim])
#getattr(axes, v+'axis').label.set_text(v)
PROJECTIONS = [
# in order of PROJECTION number; do not change without updating the
# constants in kernel_iq.c
'equirectangular', 'sinusoidal', 'guyou', 'azimuthal_equidistance',
'azimuthal_equal_area',
]
def get_projection(projection):
"""
jitter projections
<https://en.wikipedia.org/wiki/List_of_map_projections>
equirectangular (standard latitude-longitude mesh)
<https://en.wikipedia.org/wiki/Equirectangular_projection>
Allows free movement in phi (around the equator), but theta is
limited to +/- 90, and points are cos-weighted. Jitter in phi is
uniform in weight along a line of latitude. With small theta and
phi ranging over +/- 180 this forms a wobbling disk. With small
phi and theta ranging over +/- 90 this forms a wedge like a slice
of an orange.
azimuthal_equidistance (Postel)
<https://en.wikipedia.org/wiki/Azimuthal_equidistant_projection>
Preserves distance from center, and so is an excellent map for
representing a bivariate gaussian on the surface. Theta and phi
operate identically, cutting wegdes from the antipode of the viewing
angle. This unfortunately does not allow free movement in either
theta or phi since the orthogonal wobble decreases to 0 as the body
rotates through 180 degrees.
sinusoidal (Sanson-Flamsteed, Mercator equal-area)
<https://en.wikipedia.org/wiki/Sinusoidal_projection>
Preserves arc length with latitude, giving bad behaviour at
theta near +/- 90. Theta and phi operate somewhat differently,
so a system with a-b-c dtheta-dphi-dpsi will not give the same
value as one with b-a-c dphi-dtheta-dpsi, as would be the case
for azimuthal equidistance. Free movement using theta or phi
uniform over +/- 180 will work, but not as well as equirectangular
phi, with theta being slightly worse. Computationally it is much
cheaper for wide theta-phi meshes since it excludes points which
lie outside the sinusoid near theta +/- 90 rather than packing
them close together as in equirectangle. Note that the poles
will be slightly overweighted for theta > 90 with the circle
from theta at 90+dt winding backwards around the pole, overlapping
the circle from theta at 90-dt.
Guyou (hemisphere-in-a-square) **not weighted**
<https://en.wikipedia.org/wiki/Guyou_hemisphere-in-a-square_projection>
With tiling, allows rotation in phi or theta through +/- 180, with
uniform spacing. Both theta and phi allow free rotation, with wobble
in the orthogonal direction reasonably well behaved (though not as
good as equirectangular phi). The forward/reverse transformations
relies on elliptic integrals that are somewhat expensive, so the
behaviour has to be very good to justify the cost and complexity.
The weighting function for each point has not yet been computed.
Note: run the module *guyou.py* directly and it will show the forward
and reverse mappings.
azimuthal_equal_area **incomplete**
<https://en.wikipedia.org/wiki/Lambert_azimuthal_equal-area_projection>
Preserves the relative density of the surface patches. Not that
useful and not completely implemented
Gauss-Kreuger **not implemented**
<https://en.wikipedia.org/wiki/Transverse_Mercator_projection#Ellipsoidal_transverse_Mercator>
Should allow free movement in theta, but phi is distorted.
"""
# pylint: disable=unused-argument
# TODO: try Kent distribution instead of a gaussian warped by projection
if projection == 'equirectangular': #define PROJECTION 1
def _project(theta_i, phi_j, psi):
latitude, longitude = theta_i, phi_j
return latitude, longitude, psi, 'xyz'
#return Rx(phi_j)*Ry(theta_i)
def _weight(theta_i, phi_j, w_i, w_j):
return w_i*w_j*abs(cos(radians(theta_i)))
elif projection == 'sinusoidal': #define PROJECTION 2
def _project(theta_i, phi_j, psi):
latitude = theta_i
scale = cos(radians(latitude))
longitude = phi_j/scale if abs(phi_j) < abs(scale)*180 else 0
#print("(%+7.2f, %+7.2f) => (%+7.2f, %+7.2f)"%(theta_i, phi_j, latitude, longitude))
return latitude, longitude, psi, 'xyz'
#return Rx(longitude)*Ry(latitude)
def _project(theta_i, phi_j, w_i, w_j):
latitude = theta_i
scale = cos(radians(latitude))
active = 1 if abs(phi_j) < abs(scale)*180 else 0
return active*w_i*w_j
elif projection == 'guyou': #define PROJECTION 3 (eventually?)
def _project(theta_i, phi_j, psi):
from .guyou import guyou_invert
#latitude, longitude = guyou_invert([theta_i], [phi_j])
longitude, latitude = guyou_invert([phi_j], [theta_i])
return latitude, longitude, psi, 'xyz'
#return Rx(longitude[0])*Ry(latitude[0])
def _weight(theta_i, phi_j, w_i, w_j):
return w_i*w_j
elif projection == 'azimuthal_equidistance':
# Note that calculates angles for Rz Ry rather than Rx Ry
def _project(theta_i, phi_j, psi):
latitude = sqrt(theta_i**2 + phi_j**2)
longitude = degrees(arctan2(phi_j, theta_i))
#print("(%+7.2f, %+7.2f) => (%+7.2f, %+7.2f)"%(theta_i, phi_j, latitude, longitude))
return latitude, longitude, psi-longitude, 'zyz'
#R = Rz(longitude)*Ry(latitude)*Rz(psi)
#return R_to_xyz(R)
#return Rz(longitude)*Ry(latitude)
def _weight(theta_i, phi_j, w_i, w_j):
# Weighting for each point comes from the integral:
# \int\int I(q, lat, log) sin(lat) dlat dlog
# We are doing a conformal mapping from disk to sphere, so we need
# a change of variables g(theta, phi) -> (lat, long):
# lat, long = sqrt(theta^2 + phi^2), arctan(phi/theta)
# giving:
# dtheta dphi = det(J) dlat dlong
# where J is the jacobian from the partials of g. Using
# R = sqrt(theta^2 + phi^2),
# then
# J = [[x/R, Y/R], -y/R^2, x/R^2]]
# and
# det(J) = 1/R
# with the final integral being:
# \int\int I(q, theta, phi) sin(R)/R dtheta dphi
#
# This does approximately the right thing, decreasing the weight
# of each point as you go farther out on the disk, but it hasn't
# yet been checked against the 1D integral results. Prior
# to declaring this "good enough" and checking that integrals
# work in practice, we will examine alternative mappings.
#
# The issue is that the mapping does not support the case of free
# rotation about a single axis correctly, with a small deviation
# in the orthogonal axis independent of the first axis. Like the
# usual polar coordiates integration, the integrated sections
# form wedges, though at least in this case the wedge cuts through
# the entire sphere, and treats theta and phi identically.
latitude = sqrt(theta_i**2 + phi_j**2)
weight = sin(radians(latitude))/latitude if latitude != 0 else 1
return weight*w_i*w_j if latitude < 180 else 0
elif projection == 'azimuthal_equal_area':
# Note that calculates angles for Rz Ry rather than Rx Ry
def _project(theta_i, phi_j, psi):
radius = min(1, sqrt(theta_i**2 + phi_j**2)/180)
latitude = 180-degrees(2*arccos(radius))
longitude = degrees(arctan2(phi_j, theta_i))
#print("(%+7.2f, %+7.2f) => (%+7.2f, %+7.2f)"%(theta_i, phi_j, latitude, longitude))
return latitude, longitude, psi, 'zyz'
#R = Rz(longitude)*Ry(latitude)*Rz(psi)
#return R_to_xyz(R)
#return Rz(longitude)*Ry(latitude)
def _weight(theta_i, phi_j, w_i, w_j):
latitude = sqrt(theta_i**2 + phi_j**2)
weight = sin(radians(latitude))/latitude if latitude != 0 else 1
return weight*w_i*w_j if latitude < 180 else 0
else:
raise ValueError("unknown projection %r"%projection)
return _project, _weight
def R_to_xyz(R):
"""
Return phi, theta, psi Tait-Bryan angles corresponding to the given rotation matrix.
Extracting Euler Angles from a Rotation Matrix
Mike Day, Insomniac Games
https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2012/07/euler-angles1.pdf
Based on: Shoemake’s "Euler Angle Conversion", Graphics Gems IV, pp. 222-229
"""
phi = arctan2(R[1, 2], R[2, 2])
theta = arctan2(-R[0, 2], sqrt(R[0, 0]**2 + R[0, 1]**2))
psi = arctan2(R[0, 1], R[0, 0])
return degrees(phi), degrees(theta), degrees(psi)
def draw_mesh(axes, view, jitter, radius=1.2, n=11, dist='gaussian',
projection='equirectangular'):
"""
Draw the dispersion mesh showing the theta-phi orientations at which
the model will be evaluated.
"""
_project, _weight = get_projection(projection)
def _rotate(theta, phi, z):
dview = _project(theta, phi, 0.)
if dview[3] == 'zyz':
return Rz(dview[1])*Ry(dview[0])*z
else: # dview[3] == 'xyz':
return Rx(dview[1])*Ry(dview[0])*z
dist_x = np.linspace(-1, 1, n)
weights = np.ones_like(dist_x)
if dist == 'gaussian':
dist_x *= 3
weights = exp(-0.5*dist_x**2)
elif dist == 'rectangle':
# Note: uses sasmodels ridiculous definition of rectangle width
dist_x *= sqrt(3)
elif dist == 'uniform':
pass
else:
raise ValueError("expected dist to be gaussian, rectangle or uniform")
# mesh in theta, phi formed by rotating z
dtheta, dphi, dpsi = jitter # pylint: disable=unused-variable
z = np.matrix([[0], [0], [radius]])
points = np.hstack([_rotate(theta_i, phi_j, z)
for theta_i in dtheta*dist_x
for phi_j in dphi*dist_x])
dist_w = np.array([_weight(theta_i, phi_j, w_i, w_j)
for w_i, theta_i in zip(weights, dtheta*dist_x)
for w_j, phi_j in zip(weights, dphi*dist_x)])
#print(max(dist_w), min(dist_w), min(dist_w[dist_w > 0]))
points = points[:, dist_w > 0]
dist_w = dist_w[dist_w > 0]
dist_w /= max(dist_w)
# rotate relative to beam
points = orient_relative_to_beam(view, points)
x, y, z = [np.array(v).flatten() for v in points]
#plt.figure(2); plt.clf(); plt.hist(z, bins=np.linspace(-1, 1, 51))
axes.scatter(x, y, z, c=dist_w, marker='o', vmin=0., vmax=1.)
def draw_labels(axes, view, jitter, text):
"""
Draw text at a particular location.
"""
labels, locations, orientations = zip(*text)
px, py, pz = zip(*locations)
dx, dy, dz = zip(*orientations)
px, py, pz = transform_xyz(view, jitter, px, py, pz)
dx, dy, dz = transform_xyz(view, jitter, dx, dy, dz)
# TODO: zdir for labels is broken, and labels aren't appearing.
for label, p, zdir in zip(labels, zip(px, py, pz), zip(dx, dy, dz)):
zdir = np.asarray(zdir).flatten()
axes.text(p[0], p[1], p[2], label, zdir=zdir)
# Definition of rotation matrices comes from wikipedia:
# https://en.wikipedia.org/wiki/Rotation_matrix#Basic_rotations
def Rx(angle):
"""Construct a matrix to rotate points about *x* by *angle* degrees."""
angle = radians(angle)
rot = [[1, 0, 0],
[0, +cos(angle), -sin(angle)],
[0, +sin(angle), +cos(angle)]]
return np.matrix(rot)
def Ry(angle):
"""Construct a matrix to rotate points about *y* by *angle* degrees."""
angle = radians(angle)
rot = [[+cos(angle), 0, +sin(angle)],
[0, 1, 0],
[-sin(angle), 0, +cos(angle)]]
return np.matrix(rot)
def Rz(angle):
"""Construct a matrix to rotate points about *z* by *angle* degrees."""
angle = radians(angle)
rot = [[+cos(angle), -sin(angle), 0],
[+sin(angle), +cos(angle), 0],
[0, 0, 1]]
return np.matrix(rot)
def transform_xyz(view, jitter, x, y, z):
"""
Send a set of (x,y,z) points through the jitter and view transforms.
"""
x, y, z = [np.asarray(v) for v in (x, y, z)]
shape = x.shape
points = np.matrix([x.flatten(), y.flatten(), z.flatten()])
points = apply_jitter(jitter, points)
points = orient_relative_to_beam(view, points)
x, y, z = [np.array(v).reshape(shape) for v in points]
return x, y, z
def apply_jitter(jitter, points):
"""
Apply the jitter transform to a set of points.
Points are stored in a 3 x n numpy matrix, not a numpy array or tuple.
"""
if jitter is None:
return points
# Hack to deal with the fact that azimuthal_equidistance uses euler angles
if len(jitter) == 4:
dtheta, dphi, dpsi, _ = jitter
points = Rz(dphi)*Ry(dtheta)*Rz(dpsi)*points
else:
dtheta, dphi, dpsi = jitter
points = Rx(dphi)*Ry(dtheta)*Rz(dpsi)*points
return points
def orient_relative_to_beam(view, points):
"""
Apply the view transform to a set of points.
Points are stored in a 3 x n numpy matrix, not a numpy array or tuple.
"""
theta, phi, psi = view
points = Rz(phi)*Ry(theta)*Rz(psi)*points # viewing angle
#points = Rz(psi)*Ry(pi/2-theta)*Rz(phi)*points # 1-D integration angles
#points = Rx(phi)*Ry(theta)*Rz(psi)*points # angular dispersion angle
return points
def orient_relative_to_beam_quaternion(view, points):
"""
Apply the view transform to a set of points.
Points are stored in a 3 x n numpy matrix, not a numpy array or tuple.
This variant uses quaternions rather than rotation matrices for the
computation. It works but it is not used because it doesn't solve
any problems. The challenge of mapping theta/phi/psi to SO(3) does
not disappear by calculating the transform differently.
"""
theta, phi, psi = view
x, y, z = [1, 0, 0], [0, 1, 0], [0, 0, 1]
q = Quaternion(1, [0, 0, 0])
## Compose a rotation about the three axes by rotating
## the unit vectors before applying the rotation.
#q = Quaternion.from_angle_axis(theta, q.rot(x)) * q
#q = Quaternion.from_angle_axis(phi, q.rot(y)) * q
#q = Quaternion.from_angle_axis(psi, q.rot(z)) * q
## The above turns out to be equivalent to reversing
## the order of application, so ignore it and use below.
q = q * Quaternion.from_angle_axis(theta, x)
q = q * Quaternion.from_angle_axis(phi, y)
q = q * Quaternion.from_angle_axis(psi, z)
## Reverse the order by post-multiply rather than pre-multiply
#q = Quaternion.from_angle_axis(theta, x) * q
#q = Quaternion.from_angle_axis(phi, y) * q
#q = Quaternion.from_angle_axis(psi, z) * q
#print("axes psi", q.rot(np.matrix([x, y, z]).T))
return q.rot(points)
#orient_relative_to_beam = orient_relative_to_beam_quaternion
# === Quaterion class definition === BEGIN
# Simple stand-alone quaternion class
# Note: this code works but isn't unused since quaternions didn't solve the
# representation problem. Leave it here in case we want to revisit this later.
#import numpy as np
class Quaternion(object):
r"""
Quaternion(w, r) = w + ir[0] + jr[1] + kr[2]
Quaternion.from_angle_axis(theta, r) for a rotation of angle theta about
an axis oriented toward the direction r. This defines a unit quaternion,
normalizing $r$ to the unit vector $\hat r$, and setting quaternion
$Q = \cos \theta + \sin \theta \hat r$
Quaternion objects can be multiplied, which applies a rotation about the
given axis, allowing composition of rotations without risk of gimbal lock.
The resulting quaternion is applied to a set of points using *Q.rot(v)*.
"""
def __init__(self, w, r):
self.w = w
self.r = np.asarray(r, dtype='d')
@staticmethod
def from_angle_axis(theta, r):
"""Build quaternion as rotation theta about axis r"""
theta = np.radians(theta)/2
r = np.asarray(r)
w = np.cos(theta)
r = np.sin(theta)*r/np.dot(r, r)
return Quaternion(w, r)
def __mul__(self, other):
"""Multiply quaterions"""
if isinstance(other, Quaternion):
w = self.w*other.w - np.dot(self.r, other.r)
r = self.w*other.r + other.w*self.r + np.cross(self.r, other.r)
return Quaternion(w, r)
raise NotImplementedError("Quaternion * non-quaternion not implemented")
def rot(self, v):
"""Transform point *v* by quaternion"""
v = np.asarray(v).T
use_transpose = (v.shape[-1] != 3)
if use_transpose:
v = v.T
v = v + np.cross(2*self.r, np.cross(self.r, v) + self.w*v)
#v = v + 2*self.w*np.cross(self.r, v) + np.cross(2*self.r, np.cross(self.r, v))
if use_transpose:
v = v.T
return v.T
def conj(self):
"""Conjugate quaternion"""
return Quaternion(self.w, -self.r)
def inv(self):
"""Inverse quaternion"""
return self.conj()/self.norm()**2
def norm(self):
"""Quaternion length"""
return np.sqrt(self.w**2 + np.sum(self.r**2))
def __str__(self):
return "%g%+gi%+gj%+gk"%(self.w, self.r[0], self.r[1], self.r[2])
def test_qrot():
"""Quaternion checks"""
# Define rotation of 60 degrees around an axis in y-z that is 60 degrees
# from y. The rotation axis is determined by rotating the point [0, 1, 0]
# about x.
ax = Quaternion.from_angle_axis(60, [1, 0, 0]).rot([0, 1, 0])
q = Quaternion.from_angle_axis(60, ax)
# Set the point to be rotated, and its expected rotated position.
p = [1, -1, 2]
target = [(10+4*np.sqrt(3))/8, (1+2*np.sqrt(3))/8, (14-3*np.sqrt(3))/8]
#print(q, q.rot(p) - target)
assert max(abs(q.rot(p) - target)) < 1e-14
#test_qrot()
#import sys; sys.exit()
# === Quaterion class definition === END
# translate between number of dimension of dispersity and the number of
# points along each dimension.
PD_N_TABLE = {
(0, 0, 0): (0, 0, 0), # 0
(1, 0, 0): (100, 0, 0), # 100
(0, 1, 0): (0, 100, 0),
(0, 0, 1): (0, 0, 100),
(1, 1, 0): (30, 30, 0), # 900
(1, 0, 1): (30, 0, 30),
(0, 1, 1): (0, 30, 30),
(1, 1, 1): (15, 15, 15), # 3375
}
def clipped_range(data, portion=1.0, mode='central'):
"""
Determine range from data.
If *portion* is 1, use full range, otherwise use the center of the range
or the top of the range, depending on whether *mode* is 'central' or 'top'.
"""
if portion < 1.0:
if mode == 'central':
data = np.sort(data.flatten())
offset = int(portion*len(data)/2 + 0.5)
return data[offset], data[-offset]
if mode == 'top':
data = np.sort(data.flatten())
offset = int(portion*len(data) + 0.5)
return data[offset], data[-1]
# Default: full range
return data.min(), data.max()
def draw_scattering(calculator, axes, view, jitter, dist='gaussian'):
"""
Plot the scattering for the particular view.
*calculator* is returned from :func:`build_model`. *axes* are the 3D axes
on which the data will be plotted. *view* and *jitter* are the current
orientation and orientation dispersity. *dist* is one of the sasmodels
weight distributions.
"""
if dist == 'uniform': # uniform is not yet in this branch
dist, scale = 'rectangle', 1/sqrt(3)
else:
scale = 1
# add the orientation parameters to the model parameters
theta, phi, psi = view
theta_pd, phi_pd, psi_pd = [scale*v for v in jitter]
theta_pd_n, phi_pd_n, psi_pd_n = PD_N_TABLE[(theta_pd > 0, phi_pd > 0, psi_pd > 0)]
## increase pd_n for testing jitter integration rather than simple viz
#theta_pd_n, phi_pd_n, psi_pd_n = [5*v for v in (theta_pd_n, phi_pd_n, psi_pd_n)]
pars = dict(
theta=theta, theta_pd=theta_pd, theta_pd_type=dist, theta_pd_n=theta_pd_n,
phi=phi, phi_pd=phi_pd, phi_pd_type=dist, phi_pd_n=phi_pd_n,
psi=psi, psi_pd=psi_pd, psi_pd_type=dist, psi_pd_n=psi_pd_n,
)
pars.update(calculator.pars)
# compute the pattern
qx, qy = calculator.qxqy
Iqxy = calculator(**pars).reshape(len(qx), len(qy))
# scale it and draw it
Iqxy = log(Iqxy)
if calculator.limits:
# use limits from orientation (0,0,0)
vmin, vmax = calculator.limits
else:
vmax = Iqxy.max()
vmin = vmax*10**-7
#vmin, vmax = clipped_range(Iqxy, portion=portion, mode='top')
#vmin, vmax = Iqxy.min(), Iqxy.max()
#print("range",(vmin,vmax))
#qx, qy = np.meshgrid(qx, qy)
if 0: # pylint: disable=using-constant-test
level = np.asarray(255*(Iqxy - vmin)/(vmax - vmin), 'i')
level[level < 0] = 0
from matplotlib import pylab as plt
colors = plt.get_cmap()(level)
#from matplotlib import cm
#colors = cm.coolwarm(level)
#colors = cm.gist_yarg(level)
#colors = cm.Wistia(level)
colors[level <= 0, 3] = 0. # set floor to transparent
x, y = np.meshgrid(qx/qx.max(), qy/qy.max())
axes.plot_surface(x, y, -1.1*np.ones_like(x), facecolors=colors)
elif 1: # pylint: disable=using-constant-test
axes.contourf(qx/qx.max(), qy/qy.max(), Iqxy, zdir='z', offset=-1.1,
levels=np.linspace(vmin, vmax, 24))
else:
axes.pcolormesh(qx, qy, Iqxy)
def build_model(model_name, n=150, qmax=0.5, **pars):
"""
Build a calculator for the given shape.
*model_name* is any sasmodels model. *n* and *qmax* define an n x n mesh
on which to evaluate the model. The remaining parameters are stored in
the returned calculator as *calculator.pars*. They are used by
:func:`draw_scattering` to set the non-orientation parameters in the
calculation.
Returns a *calculator* function which takes a dictionary or parameters and
produces Iqxy. The Iqxy value needs to be reshaped to an n x n matrix
for plotting. See the :class:`.direct_model.DirectModel` class
for details.
"""
from sasmodels.core import load_model_info, build_model as build_sasmodel
from sasmodels.data import empty_data2D
from sasmodels.direct_model import DirectModel
model_info = load_model_info(model_name)
model = build_sasmodel(model_info) #, dtype='double!')
q = np.linspace(-qmax, qmax, n)
data = empty_data2D(q, q)
calculator = DirectModel(data, model)
# Remember the data axes so we can plot the results
calculator.qxqy = (q, q)
# stuff the values for non-orientation parameters into the calculator
calculator.pars = pars.copy()
calculator.pars.setdefault('backgound', 1e-3)
# fix the data limits so that we can see if the pattern fades
# under rotation or angular dispersion
Iqxy = calculator(theta=0, phi=0, psi=0, **calculator.pars)
Iqxy = log(Iqxy)
vmin, vmax = clipped_range(Iqxy, 0.95, mode='top')
calculator.limits = vmin, vmax+1
return calculator
def select_calculator(model_name, n=150, size=(10, 40, 100)):
"""
Create a model calculator for the given shape.
*model_name* is one of sphere, cylinder, ellipsoid, triaxial_ellipsoid,
parallelepiped or bcc_paracrystal. *n* is the number of points to use
in the q range. *qmax* is chosen based on model parameters for the
given model to show something intersting.
Returns *calculator* and tuple *size* (a,b,c) giving minor and major
equitorial axes and polar axis respectively. See :func:`build_model`
for details on the returned calculator.
"""
a, b, c = size
d_factor = 0.06 # for paracrystal models
if model_name == 'sphere':
calculator = build_model('sphere', n=n, radius=c)
a = b = c
elif model_name == 'sc_paracrystal':
a = b = c
dnn = c
radius = 0.5*c
calculator = build_model('sc_paracrystal', n=n, dnn=dnn,
d_factor=d_factor, radius=(1-d_factor)*radius,
background=0)
elif model_name == 'fcc_paracrystal':
a = b = c
# nearest neigbour distance dnn should be 2 radius, but I think the
# model uses lattice spacing rather than dnn in its calculations
dnn = 0.5*c
radius = sqrt(2)/4 * c
calculator = build_model('fcc_paracrystal', n=n, dnn=dnn,
d_factor=d_factor, radius=(1-d_factor)*radius,
background=0)
elif model_name == 'bcc_paracrystal':
a = b = c
# nearest neigbour distance dnn should be 2 radius, but I think the
# model uses lattice spacing rather than dnn in its calculations
dnn = 0.5*c
radius = sqrt(3)/2 * c
calculator = build_model('bcc_paracrystal', n=n, dnn=dnn,
d_factor=d_factor, radius=(1-d_factor)*radius,
background=0)
elif model_name == 'cylinder':
calculator = build_model('cylinder', n=n, qmax=0.3, radius=b, length=c)
a = b
elif model_name == 'ellipsoid':
calculator = build_model('ellipsoid', n=n, qmax=1.0,
radius_polar=c, radius_equatorial=b)
a = b
elif model_name == 'triaxial_ellipsoid':
calculator = build_model('triaxial_ellipsoid', n=n, qmax=0.5,
radius_equat_minor=a,
radius_equat_major=b,
radius_polar=c)
elif model_name == 'parallelepiped':
calculator = build_model('parallelepiped', n=n, a=a, b=b, c=c)
else:
raise ValueError("unknown model %s"%model_name)
return calculator, (a, b, c)
SHAPES = [
'parallelepiped',
'sphere', 'ellipsoid', 'triaxial_ellipsoid',
'cylinder',
'fcc_paracrystal', 'bcc_paracrystal', 'sc_paracrystal',
]
DRAW_SHAPES = {
'fcc_paracrystal': draw_fcc,
'bcc_paracrystal': draw_bcc,
'sc_paracrystal': draw_sc,
'parallelepiped': draw_parallelepiped,
}
DISTRIBUTIONS = [
'gaussian', 'rectangle', 'uniform',
]
DIST_LIMITS = {
'gaussian': 30,
'rectangle': 90/sqrt(3),
'uniform': 90,
}
def run(model_name='parallelepiped', size=(10, 40, 100),
view=(0, 0, 0), jitter=(0, 0, 0),
dist='gaussian', mesh=30,
projection='equirectangular'):
"""
Show an interactive orientation and jitter demo.
*model_name* is one of: sphere, ellipsoid, triaxial_ellipsoid,
parallelepiped, cylinder, or sc/fcc/bcc_paracrystal
*size* gives the dimensions (a, b, c) of the shape.
*view* gives the initial view (theta, phi, psi) of the shape.
*view* gives the initial jitter (dtheta, dphi, dpsi) of the shape.
*dist* is the type of dispersition: gaussian, rectangle, or uniform.
*mesh* is the number of points in the dispersion mesh.
*projection* is the map projection to use for the mesh: equirectangular,
sinusoidal, guyou, azimuthal_equidistance, or azimuthal_equal_area.
"""
# projection number according to 1-order position in list, but
# only 1 and 2 are implemented so far.
from sasmodels import generate
generate.PROJECTION = PROJECTIONS.index(projection) + 1
if generate.PROJECTION > 2:
print("*** PROJECTION %s not implemented in scattering function ***"%projection)
generate.PROJECTION = 2
# set up calculator
calculator, size = select_calculator(model_name, n=150, size=size)
draw_shape = DRAW_SHAPES.get(model_name, draw_parallelepiped)
#draw_shape = draw_fcc
## uncomment to set an independent the colour range for every view
## If left commented, the colour range is fixed for all views
calculator.limits = None
PLOT_ENGINE(calculator, draw_shape, size, view, jitter, dist, mesh, projection)
def _mpl_plot(calculator, draw_shape, size, view, jitter, dist, mesh, projection):
# Note: travis-ci does not support mpl_toolkits.mplot3d, but this shouldn't be
# an issue since we are lazy-loading the package on a path that isn't tested.
# Importing mplot3d adds projection='3d' option to subplot
import mpl_toolkits.mplot3d # pylint: disable=unused-import
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
## create the plot window
#plt.hold(True)
plt.subplots(num=None, figsize=(5.5, 5.5))
plt.set_cmap('gist_earth')
plt.clf()
plt.gcf().canvas.set_window_title(projection)
#gs = gridspec.GridSpec(2,1,height_ratios=[4,1])
#axes = plt.subplot(gs[0], projection='3d')
axes = plt.axes([0.0, 0.2, 1.0, 0.8], projection='3d')
try: # CRUFT: not all versions of matplotlib accept 'square' 3d projection
axes.axis('square')
except Exception:
pass
# CRUFT: use axisbg instead of facecolor for matplotlib<2
facecolor_prop = 'facecolor' if mpl.__version__ > '2' else 'axisbg'
props = {facecolor_prop: 'lightgoldenrodyellow'}
## add control widgets to plot
axes_theta = plt.axes([0.05, 0.15, 0.50, 0.04], **props)
axes_phi = plt.axes([0.05, 0.10, 0.50, 0.04], **props)
axes_psi = plt.axes([0.05, 0.05, 0.50, 0.04], **props)
stheta = Slider(axes_theta, u'θ', -90, 90, valinit=0)
sphi = Slider(axes_phi, u'φ', -180, 180, valinit=0)
spsi = Slider(axes_psi, u'ψ', -180, 180, valinit=0)
axes_dtheta = plt.axes([0.70, 0.15, 0.20, 0.04], **props)
axes_dphi = plt.axes([0.70, 0.1, 0.20, 0.04], **props)
axes_dpsi = plt.axes([0.70, 0.05, 0.20, 0.04], **props)
# Note: using ridiculous definition of rectangle distribution, whose width
# in sasmodels is sqrt(3) times the given width. Divide by sqrt(3) to keep
# the maximum width to 90.
dlimit = DIST_LIMITS[dist]
sdtheta = Slider(axes_dtheta, u'Δθ', 0, 2*dlimit, valinit=0)
sdphi = Slider(axes_dphi, u'Δφ', 0, 2*dlimit, valinit=0)
sdpsi = Slider(axes_dpsi, u'Δψ', 0, 2*dlimit, valinit=0)
## initial view and jitter
theta, phi, psi = view
stheta.set_val(theta)
sphi.set_val(phi)
spsi.set_val(psi)
dtheta, dphi, dpsi = jitter
sdtheta.set_val(dtheta)
sdphi.set_val(dphi)
sdpsi.set_val(dpsi)
## callback to draw the new view
def _update(val, axis=None):
# pylint: disable=unused-argument
view = stheta.val, sphi.val, spsi.val
jitter = sdtheta.val, sdphi.val, sdpsi.val
# set small jitter as 0 if multiple pd dims
dims = sum(v > 0 for v in jitter)
limit = [0, 0.5, 5, 5][dims]
jitter = [0 if v < limit else v for v in jitter]
axes.cla()
## Visualize as person on globe
#draw_sphere(axes, radius=0.5)
#draw_person_on_sphere(axes, view, radius=0.5)
## Move beam instead of shape
#draw_beam(axes, -view[:2])
#draw_jitter(axes, (0,0,0), (0,0,0), views=3)
## Move shape and draw scattering
draw_beam(axes, (0, 0), alpha=1.)
#draw_person_on_sphere(axes, view, radius=1.2, height=0.5)
draw_jitter(axes, view, jitter, dist=dist, size=size, alpha=1.,
draw_shape=draw_shape, projection=projection, views=3)
draw_mesh(axes, view, jitter, dist=dist, n=mesh, projection=projection)
draw_scattering(calculator, axes, view, jitter, dist=dist)
plt.gcf().canvas.draw()
## bind control widgets to view updater
stheta.on_changed(lambda v: _update(v, 'theta'))
sphi.on_changed(lambda v: _update(v, 'phi'))
spsi.on_changed(lambda v: _update(v, 'psi'))
sdtheta.on_changed(lambda v: _update(v, 'dtheta'))
sdphi.on_changed(lambda v: _update(v, 'dphi'))
sdpsi.on_changed(lambda v: _update(v, 'dpsi'))
## initialize view
_update(None, 'phi')
## go interactive
plt.show()
def map_colors(z, kw):
"""
Process matplotlib-style colour arguments.
Pulls 'cmap', 'alpha', 'vmin', and 'vmax' from th *kw* dictionary, setting
the *kw['color']* to an RGB array. These are ignored if 'c' or 'color' are
set inside *kw*.
"""
from matplotlib import cm
cmap = kw.pop('cmap', cm.coolwarm)
alpha = kw.pop('alpha', None)
vmin = kw.pop('vmin', z.min())
vmax = kw.pop('vmax', z.max())
c = kw.pop('c', None)
color = kw.pop('color', c)
if color is None:
znorm = ((z - vmin) / (vmax - vmin)).clip(0, 1)
color = cmap(znorm)
elif isinstance(color, np.ndarray) and color.shape == z.shape:
color = cmap(color)
if alpha is None:
if isinstance(color, np.ndarray):
color = color[..., :3]
else:
color[..., 3] = alpha
kw['color'] = color
def make_vec(*args):
"""Turn all elements of *args* into numpy arrays"""
#return [np.asarray(v, 'd').flatten() for v in args]
return [np.asarray(v, 'd') for v in args]
def make_image(z, kw):
"""Convert numpy array *z* into a *PIL* RGB image."""
import PIL.Image
from matplotlib import cm
cmap = kw.pop('cmap', cm.coolwarm)
znorm = (z-z.min())/z.ptp()
c = cmap(znorm)
c = c[..., :3]
rgb = np.asarray(c*255, 'u1')
image = PIL.Image.fromarray(rgb, mode='RGB')
return image
_IPV_MARKERS = {
'o': 'sphere',
}
_IPV_COLORS = {
'w': 'white',
'k': 'black',
'c': 'cyan',
'm': 'magenta',
'y': 'yellow',
'r': 'red',
'g': 'green',
'b': 'blue',
}
def _ipv_fix_color(kw):
alpha = kw.pop('alpha', None)
color = kw.get('color', None)
if isinstance(color, str):
color = _IPV_COLORS.get(color, color)
kw['color'] = color
if alpha is not None:
color = kw['color']
#TODO: convert color to [r, g, b, a] if not already
if isinstance(color, (tuple, list)):
if len(color) == 3:
color = (color[0], color[1], color[2], alpha)
else:
color = (color[0], color[1], color[2], alpha*color[3])
color = np.array(color)
if isinstance(color, np.ndarray) and color.shape[-1] == 4:
color[..., 3] = alpha
kw['color'] = color
def _ipv_set_transparency(kw, obj):
color = kw.get('color', None)
if (isinstance(color, np.ndarray)
and color.shape[-1] == 4
and (color[..., 3] != 1.0).any()):
obj.material.transparent = True
obj.material.side = "FrontSide"
def ipv_axes():
"""
Build a matplotlib style Axes interface for ipyvolume
"""
import ipyvolume as ipv
class Axes(object):
"""
Matplotlib Axes3D style interface to ipyvolume renderer.
"""
# pylint: disable=no-self-use,no-init
# transparency can be achieved by setting the following:
# mesh.color = [r, g, b, alpha]
# mesh.material.transparent = True
# mesh.material.side = "FrontSide"
# smooth(ish) rotation can be achieved by setting:
# slide.continuous_update = True
# figure.animation = 0.
# mesh.material.x = x
# mesh.material.y = y
# mesh.material.z = z
# maybe need to synchronize update of x/y/z to avoid shimmy when moving
def plot(self, x, y, z, **kw):
"""mpl style plot interface for ipyvolume"""
_ipv_fix_color(kw)
x, y, z = make_vec(x, y, z)
ipv.plot(x, y, z, **kw)
def plot_surface(self, x, y, z, **kw):
"""mpl style plot_surface interface for ipyvolume"""
facecolors = kw.pop('facecolors', None)
if facecolors is not None:
kw['color'] = facecolors
_ipv_fix_color(kw)
x, y, z = make_vec(x, y, z)
h = ipv.plot_surface(x, y, z, **kw)
_ipv_set_transparency(kw, h)
#h.material.side = "DoubleSide"
return h
def plot_trisurf(self, x, y, triangles=None, Z=None, **kw):
"""mpl style plot_trisurf interface for ipyvolume"""
kw.pop('linewidth', None)
_ipv_fix_color(kw)
x, y, z = make_vec(x, y, Z)
if triangles is not None:
triangles = np.asarray(triangles)
h = ipv.plot_trisurf(x, y, z, triangles=triangles, **kw)
_ipv_set_transparency(kw, h)
return h
def scatter(self, x, y, z, **kw):
"""mpl style scatter interface for ipyvolume"""
x, y, z = make_vec(x, y, z)
map_colors(z, kw)
marker = kw.get('marker', None)
kw['marker'] = _IPV_MARKERS.get(marker, marker)
h = ipv.scatter(x, y, z, **kw)
_ipv_set_transparency(kw, h)
return h
def contourf(self, x, y, v, zdir='z', offset=0, levels=None, **kw):
"""mpl style contourf interface for ipyvolume"""
# pylint: disable=unused-argument
# Don't use contour for now (although we might want to later)
self.pcolor(x, y, v, zdir='z', offset=offset, **kw)
def pcolor(self, x, y, v, zdir='z', offset=0, **kw):
"""mpl style pcolor interface for ipyvolume"""
# pylint: disable=unused-argument
x, y, v = make_vec(x, y, v)
image = make_image(v, kw)
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
x = np.array([[xmin, xmax], [xmin, xmax]])
y = np.array([[ymin, ymin], [ymax, ymax]])
z = x*0 + offset
u = np.array([[0., 1], [0, 1]])
v = np.array([[0., 0], [1, 1]])
h = ipv.plot_mesh(x, y, z, u=u, v=v, texture=image, wireframe=False)
_ipv_set_transparency(kw, h)
h.material.side = "DoubleSide"
return h
def text(self, *args, **kw):
"""mpl style text interface for ipyvolume"""
pass
def set_xlim(self, limits):
"""mpl style set_xlim interface for ipyvolume"""
ipv.xlim(*limits)
def set_ylim(self, limits):
"""mpl style set_ylim interface for ipyvolume"""
ipv.ylim(*limits)
def set_zlim(self, limits):
"""mpl style set_zlim interface for ipyvolume"""
ipv.zlim(*limits)
def set_axes_on(self):
"""mpl style set_axes_on interface for ipyvolume"""
ipv.style.axes_on()
def set_axis_off(self):
"""mpl style set_axes_off interface for ipyvolume"""
ipv.style.axes_off()
return Axes()
def _ipv_plot(calculator, draw_shape, size, view, jitter, dist, mesh, projection):
from IPython.display import display
import ipywidgets as widgets
import ipyvolume as ipv
axes = ipv_axes()
def _draw(view, jitter):
camera = ipv.gcf().camera
#print(ipv.gcf().__dict__.keys())
#print(dir(ipv.gcf()))
ipv.figure(animation=0.) # no animation when updating object mesh
# set small jitter as 0 if multiple pd dims
dims = sum(v > 0 for v in jitter)
limit = [0, 0.5, 5, 5][dims]
jitter = [0 if v < limit else v for v in jitter]
## Visualize as person on globe
#draw_beam(axes, (0, 0))
#draw_sphere(axes, radius=0.5)
#draw_person_on_sphere(axes, view, radius=0.5)
## Move beam instead of shape
#draw_beam(axes, view=(-view[0], -view[1]))
#draw_jitter(axes, view=(0,0,0), jitter=(0,0,0))
## Move shape and draw scattering
draw_beam(axes, (0, 0), steps=25)
draw_jitter(axes, view, jitter, dist=dist, size=size, alpha=1.0,
draw_shape=draw_shape, projection=projection)
draw_mesh(axes, view, jitter, dist=dist, n=mesh, radius=0.95,
projection=projection)
draw_scattering(calculator, axes, view, jitter, dist=dist)
draw_axes(axes, origin=(-1, -1, -1.1))
ipv.style.box_off()
ipv.style.axes_off()
ipv.xyzlabel(" ", " ", " ")
ipv.gcf().camera = camera
ipv.show()
trange, prange = (-180., 180., 1.), (-180., 180., 1.)
dtrange, dprange = (0., 180., 1.), (0., 180., 1.)
## Super simple interfaca, but uses non-ascii variable namese
# θ φ ψ Δθ Δφ Δψ
#def update(**kw):
# view = kw['θ'], kw['φ'], kw['ψ']
# jitter = kw['Δθ'], kw['Δφ'], kw['Δψ']
# draw(view, jitter)
#widgets.interact(update, θ=trange, φ=prange, ψ=prange, Δθ=dtrange, Δφ=dprange, Δψ=dprange)
def _update(theta, phi, psi, dtheta, dphi, dpsi):
_draw(view=(theta, phi, psi), jitter=(dtheta, dphi, dpsi))
def _slider(name, steps, init=0.):
return widgets.FloatSlider(
value=init,
min=steps[0],
max=steps[1],
step=steps[2],
description=name,
disabled=False,
#continuous_update=True,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
)
theta = _slider(u'θ', trange, view[0])
phi = _slider(u'φ', prange, view[1])
psi = _slider(u'ψ', prange, view[2])
dtheta = _slider(u'Δθ', dtrange, jitter[0])
dphi = _slider(u'Δφ', dprange, jitter[1])
dpsi = _slider(u'Δψ', dprange, jitter[2])
fields = {
'theta': theta, 'phi': phi, 'psi': psi,
'dtheta': dtheta, 'dphi': dphi, 'dpsi': dpsi,
}
ui = widgets.HBox([
widgets.VBox([theta, phi, psi]),
widgets.VBox([dtheta, dphi, dpsi])
])
out = widgets.interactive_output(_update, fields)
display(ui, out)
_ENGINES = {
"matplotlib": _mpl_plot,
"mpl": _mpl_plot,
#"plotly": _plotly_plot,
"ipvolume": _ipv_plot,
"ipv": _ipv_plot,
}
PLOT_ENGINE = _ENGINES["matplotlib"]
def set_plotter(name):
"""
Setting the plotting engine to matplotlib/ipyvolume or equivalently mpl/ipv.
"""
global PLOT_ENGINE
PLOT_ENGINE = _ENGINES[name]
def main():
"""
Command line interface to the jitter viewer.
"""
parser = argparse.ArgumentParser(
description="Display jitter",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('-p', '--projection', choices=PROJECTIONS,
default=PROJECTIONS[0],
help='coordinate projection')
parser.add_argument('-s', '--size', type=str, default='10,40,100',
help='a,b,c lengths')
parser.add_argument('-v', '--view', type=str, default='0,0,0',
help='initial view angles')
parser.add_argument('-j', '--jitter', type=str, default='0,0,0',
help='initial angular dispersion')
parser.add_argument('-d', '--distribution', choices=DISTRIBUTIONS,
default=DISTRIBUTIONS[0],
help='jitter distribution')
parser.add_argument('-m', '--mesh', type=int, default=30,
help='#points in theta-phi mesh')
parser.add_argument('shape', choices=SHAPES, nargs='?', default=SHAPES[0],
help='oriented shape')
opts = parser.parse_args()
size = tuple(float(v) for v in opts.size.split(','))
view = tuple(float(v) for v in opts.view.split(','))
jitter = tuple(float(v) for v in opts.jitter.split(','))
run(opts.shape, size=size, view=view, jitter=jitter,
mesh=opts.mesh, dist=opts.distribution,
projection=opts.projection)
if __name__ == "__main__":
main()
|
SasView/sasmodels
|
sasmodels/jitter.py
|
Python
|
bsd-3-clause
| 55,967
|
[
"CRYSTAL",
"Gaussian"
] |
dd70e3e03f7a64b9006123fe0abfebe47961dfd5ff9f03ef8a93f1c49edcb35b
|
import subprocess
import sys
import os
import json
def get_jobs():
p = subprocess.Popen(["./galaxy","jobs"], stdout=subprocess.PIPE)
out, err = p.communicate()
if err:
print "fail to list jobs"
sys.exit(-1)
lines = out.splitlines()
# job id and job name pair
jobs = {}
for line in lines[2:]:
parts = line.split(" ")
if len(parts) < 3:
continue
jobs[parts[2].strip()] = parts[3].strip()
return jobs
def get_all_pods(jobs):
pods = {}
for jobid in jobs:
print "get pods from job %s"%jobid
p = subprocess.Popen(["./galaxy", "pods", "--j=%s"%jobid], stdout=subprocess.PIPE)
out, err = p.communicate()
lines = out.splitlines()
for line in lines[2:]:
parts = line.split(" ")
if len(parts) < 3:
continue
pods[parts[2].strip()] = {"jobid": jobid, "name":jobs[jobid], "state":parts[3].strip()}
return pods
def preempt(pods, jobid, podid, endpoint):
pods_on_agent = []
p = subprocess.Popen(["./galaxy", "pods", "--e=%s"%endpoint], stdout=subprocess.PIPE)
out, err = p.communicate()
if err:
print "fail to ./galaxy pods -e %s for err %s"%(endpoint, err)
sys.exit(-1)
lines = out.splitlines()
print " %s will preempt the following pods on %s:"%(pods[podid]["name"], endpoint)
for line in lines[2:]:
parts = line.split(" ")
if len(parts) < 2:
continue
ipodid = parts[2].strip()
if ipodid not in pods:
continue
name = pods[ipodid]["name"]
if name.find("nfs") != -1:
continue
pods_on_agent.append(ipodid)
print name
yes = raw_input('sure to preempt (y/n):')
if yes != "y":
return False
preempt_json = {
"addr":endpoint,
"pending_pod":{
"jobid":jobid,
"podid":podid
},
"preempted_pods":[]
}
for pod in pods_on_agent:
preempt_json["preempted_pods"].append({"jobid": pods[pod]["jobid"], "podid":pod})
filename = endpoint.replace(".", "_").replace(":", "_")+ ".json"
with open(filename, "wb+") as fd:
content = json.dumps(preempt_json)
fd.write(content)
p = subprocess.Popen(["./galaxy", "preempt", "--f=%s"%filename], stdout=subprocess.PIPE)
out, err = p.communicate()
if p.returncode == 0:
return True
return False
def real_main(jobid, podid, endpoint):
pods = {}
if os.path.isfile("pods_cache"):
print "use pods_cache to load pods"
with open("pods_cache", "rb") as fd:
pods = json.load(fd)
else:
print "get pods from galaxy"
jobs = get_jobs()
pods = get_all_pods(jobs)
with open("pods_cache", "wb") as fd:
fd.write(json.dumps(pods))
ok = preempt(pods, jobid, podid, endpoint)
if ok :
print "preempt for pod %s successfully"%podid
else:
print "fail to preempt for pod %s "%podid
if __name__ == "__main__":
if len(sys.argv) < 4:
print "python preempt.py jobid podid endpoint"
sys.exit(-1)
real_main(sys.argv[1], sys.argv[2], sys.argv[3])
|
imotai/galaxy
|
optools/preempt.py
|
Python
|
bsd-3-clause
| 3,345
|
[
"Galaxy"
] |
65462e2a7e5d0835cfda04775ef7745252b10125016fadddf7a0b5976e0bd4a3
|
# Base node
class SourceElement(object):
'''
A SourceElement is the base class for all elements that occur in a Java
file parsed by plyj.
'''
def __init__(self):
super(SourceElement, self).__init__()
self._fields = []
def __repr__(self):
equals = ("{0}={1!r}".format(k, getattr(self, k))
for k in self._fields)
args = ", ".join(equals)
return "{0}({1})".format(self.__class__.__name__, args)
def __eq__(self, other):
try:
return self.__dict__ == other.__dict__
except AttributeError:
return False
def __ne__(self, other):
return not self == other
def accept(self, visitor):
"""
default implementation that visit the subnodes in the order
they are stored in self_field
"""
class_name = self.__class__.__name__
visit = getattr(visitor, 'visit_' + class_name)
if visit(self):
for f in self._fields:
field = getattr(self, f)
if field:
if isinstance(field, list):
for elem in field:
if isinstance(elem, SourceElement):
elem.accept(visitor)
elif isinstance(field, SourceElement):
field.accept(visitor)
getattr(visitor, 'leave_' + class_name)(self)
class CompilationUnit(SourceElement):
def __init__(self, package_declaration=None, import_declarations=None,
type_declarations=None):
super(CompilationUnit, self).__init__()
self._fields = [
'package_declaration', 'import_declarations', 'type_declarations']
if import_declarations is None:
import_declarations = []
if type_declarations is None:
type_declarations = []
self.package_declaration = package_declaration
self.import_declarations = import_declarations
self.type_declarations = type_declarations
class PackageDeclaration(SourceElement):
def __init__(self, name, modifiers=None):
super(PackageDeclaration, self).__init__()
self._fields = ['name', 'modifiers']
if modifiers is None:
modifiers = []
self.name = name
self.modifiers = modifiers
class ImportDeclaration(SourceElement):
def __init__(self, name, static=False, on_demand=False):
super(ImportDeclaration, self).__init__()
self._fields = ['name', 'static', 'on_demand']
self.name = name
self.static = static
self.on_demand = on_demand
class ClassDeclaration(SourceElement):
def __init__(self, name, body, modifiers=None, type_parameters=None,
extends=None, implements=None):
super(ClassDeclaration, self).__init__()
self._fields = ['name', 'body', 'modifiers',
'type_parameters', 'extends', 'implements']
if modifiers is None:
modifiers = []
if type_parameters is None:
type_parameters = []
if implements is None:
implements = []
self.name = name
self.body = body
self.modifiers = modifiers
self.type_parameters = type_parameters
self.extends = extends
self.implements = implements
class ClassInitializer(SourceElement):
def __init__(self, block, static=False):
super(ClassInitializer, self).__init__()
self._fields = ['block', 'static']
self.block = block
self.static = static
class ConstructorDeclaration(SourceElement):
def __init__(self, name, block, modifiers=None, type_parameters=None,
parameters=None, throws=None):
super(ConstructorDeclaration, self).__init__()
self._fields = ['name', 'block', 'modifiers',
'type_parameters', 'parameters', 'throws']
if modifiers is None:
modifiers = []
if type_parameters is None:
type_parameters = []
if parameters is None:
parameters = []
self.name = name
self.block = block
self.modifiers = modifiers
self.type_parameters = type_parameters
self.parameters = parameters
self.throws = throws
class EmptyDeclaration(SourceElement):
pass
class FieldDeclaration(SourceElement):
def __init__(self, type, variable_declarators, modifiers=None):
super(FieldDeclaration, self).__init__()
self._fields = ['type', 'variable_declarators', 'modifiers']
if modifiers is None:
modifiers = []
self.type = type
self.variable_declarators = variable_declarators
self.modifiers = modifiers
class MethodDeclaration(SourceElement):
def __init__(self, name, modifiers=None, type_parameters=None,
parameters=None, return_type='void', body=None, abstract=False,
extended_dims=0, throws=None):
super(MethodDeclaration, self).__init__()
self._fields = ['name', 'modifiers', 'type_parameters', 'parameters',
'return_type', 'body', 'abstract', 'extended_dims',
'throws']
if modifiers is None:
modifiers = []
if type_parameters is None:
type_parameters = []
if parameters is None:
parameters = []
self.name = name
self.modifiers = modifiers
self.type_parameters = type_parameters
self.parameters = parameters
self.return_type = return_type
self.body = body
self.abstract = abstract
self.extended_dims = extended_dims
self.throws = throws
class FormalParameter(SourceElement):
def __init__(self, variable, type, modifiers=None, vararg=False):
super(FormalParameter, self).__init__()
self._fields = ['variable', 'type', 'modifiers', 'vararg']
if modifiers is None:
modifiers = []
self.variable = variable
self.type = type
self.modifiers = modifiers
self.vararg = vararg
class Variable(SourceElement):
# I would like to remove this class. In theory, the dimension could be added
# to the type but this means variable declarations have to be changed
# somehow. Consider 'int i, j[];'. In this case there currently is only one
# type with two variable declarators;This closely resembles the source code.
# If the variable is to go away, the type has to be duplicated for every
# variable...
def __init__(self, name, dimensions=0):
super(Variable, self).__init__()
self._fields = ['name', 'dimensions']
self.name = name
self.dimensions = dimensions
class VariableDeclarator(SourceElement):
def __init__(self, variable, initializer=None):
super(VariableDeclarator, self).__init__()
self._fields = ['variable', 'initializer']
self.variable = variable
self.initializer = initializer
class Throws(SourceElement):
def __init__(self, types):
super(Throws, self).__init__()
self._fields = ['types']
self.types = types
class InterfaceDeclaration(SourceElement):
def __init__(self, name, modifiers=None, extends=None, type_parameters=None,
body=None):
super(InterfaceDeclaration, self).__init__()
self._fields = [
'name', 'modifiers', 'extends', 'type_parameters', 'body']
if modifiers is None:
modifiers = []
if extends is None:
extends = []
if type_parameters is None:
type_parameters = []
if body is None:
body = []
self.name = name
self.modifiers = modifiers
self.extends = extends
self.type_parameters = type_parameters
self.body = body
class EnumDeclaration(SourceElement):
def __init__(self, name, implements=None, modifiers=None,
type_parameters=None, body=None):
super(EnumDeclaration, self).__init__()
self._fields = [
'name', 'implements', 'modifiers', 'type_parameters', 'body']
if implements is None:
implements = []
if modifiers is None:
modifiers = []
if type_parameters is None:
type_parameters = []
if body is None:
body = []
self.name = name
self.implements = implements
self.modifiers = modifiers
self.type_parameters = type_parameters
self.body = body
class EnumConstant(SourceElement):
def __init__(self, name, arguments=None, modifiers=None, body=None):
super(EnumConstant, self).__init__()
self._fields = ['name', 'arguments', 'modifiers', 'body']
if arguments is None:
arguments = []
if modifiers is None:
modifiers = []
if body is None:
body = []
self.name = name
self.arguments = arguments
self.modifiers = modifiers
self.body = body
class AnnotationDeclaration(SourceElement):
def __init__(self, name, modifiers=None, type_parameters=None, extends=None,
implements=None, body=None):
super(AnnotationDeclaration, self).__init__()
self._fields = [
'name', 'modifiers', 'type_parameters', 'extends', 'implements',
'body']
if modifiers is None:
modifiers = []
if type_parameters is None:
type_parameters = []
if implements is None:
implements = []
if body is None:
body = []
self.name = name
self.modifiers = modifiers
self.type_parameters = type_parameters
self.extends = extends
self.implements = implements
self.body = body
class AnnotationMethodDeclaration(SourceElement):
def __init__(self, name, type, parameters=None, default=None,
modifiers=None, type_parameters=None, extended_dims=0):
super(AnnotationMethodDeclaration, self).__init__()
self._fields = ['name', 'type', 'parameters', 'default',
'modifiers', 'type_parameters', 'extended_dims']
if parameters is None:
parameters = []
if modifiers is None:
modifiers = []
if type_parameters is None:
type_parameters = []
self.name = name
self.type = type
self.parameters = parameters
self.default = default
self.modifiers = modifiers
self.type_parameters = type_parameters
self.extended_dims = extended_dims
class Annotation(SourceElement):
def __init__(self, name, members=None, single_member=None):
super(Annotation, self).__init__()
self._fields = ['name', 'members', 'single_member']
if members is None:
members = []
self.name = name
self.members = members
self.single_member = single_member
class AnnotationMember(SourceElement):
def __init__(self, name, value):
super(SourceElement, self).__init__()
self._fields = ['name', 'value']
self.name = name
self.value = value
class Type(SourceElement):
def __init__(self, name, type_arguments=None, enclosed_in=None,
dimensions=0):
super(Type, self).__init__()
self._fields = ['name', 'type_arguments', 'enclosed_in', 'dimensions']
if type_arguments is None:
type_arguments = []
self.name = name
self.type_arguments = type_arguments
self.enclosed_in = enclosed_in
self.dimensions = dimensions
class Wildcard(SourceElement):
def __init__(self, bounds=None):
super(Wildcard, self).__init__()
self._fields = ['bounds']
if bounds is None:
bounds = []
self.bounds = bounds
class WildcardBound(SourceElement):
def __init__(self, type, extends=False, _super=False):
super(WildcardBound, self).__init__()
self._fields = ['type', 'extends', '_super']
self.type = type
self.extends = extends
self._super = _super
class TypeParameter(SourceElement):
def __init__(self, name, extends=None):
super(TypeParameter, self).__init__()
self._fields = ['name', 'extends']
if extends is None:
extends = []
self.name = name
self.extends = extends
class Expression(SourceElement):
def __init__(self):
super(Expression, self).__init__()
self._fields = []
class BinaryExpression(Expression):
def __init__(self, operator, lhs, rhs):
super(BinaryExpression, self).__init__()
self._fields = ['operator', 'lhs', 'rhs']
self.operator = operator
self.lhs = lhs
self.rhs = rhs
class Assignment(BinaryExpression):
pass
class Conditional(Expression):
def __init__(self, predicate, if_true, if_false):
super(self.__class__, self).__init__()
self._fields = ['predicate', 'if_true', 'if_false']
self.predicate = predicate
self.if_true = if_true
self.if_false = if_false
class ConditionalOr(BinaryExpression):
pass
class ConditionalAnd(BinaryExpression):
pass
class Or(BinaryExpression):
pass
class Xor(BinaryExpression):
pass
class And(BinaryExpression):
pass
class Equality(BinaryExpression):
pass
class InstanceOf(BinaryExpression):
pass
class Relational(BinaryExpression):
pass
class Shift(BinaryExpression):
pass
class Additive(BinaryExpression):
pass
class Multiplicative(BinaryExpression):
pass
class Unary(Expression):
def __init__(self, sign, expression):
super(Unary, self).__init__()
self._fields = ['sign', 'expression']
self.sign = sign
self.expression = expression
class Cast(Expression):
def __init__(self, target, expression):
super(Cast, self).__init__()
self._fields = ['target', 'expression']
self.target = target
self.expression = expression
class Statement(SourceElement):
pass
class Empty(Statement):
pass
class Block(Statement):
def __init__(self, statements=None):
super(Statement, self).__init__()
self._fields = ['statements']
if statements is None:
statements = []
self.statements = statements
def __iter__(self):
for s in self.statements:
yield s
class VariableDeclaration(Statement, FieldDeclaration):
pass
class ArrayInitializer(SourceElement):
def __init__(self, elements=None):
super(ArrayInitializer, self).__init__()
self._fields = ['elements']
if elements is None:
elements = []
self.elements = elements
class MethodInvocation(Expression):
def __init__(self, name, arguments=None, type_arguments=None, target=None):
super(MethodInvocation, self).__init__()
self._fields = ['name', 'arguments', 'type_arguments', 'target']
if arguments is None:
arguments = []
if type_arguments is None:
type_arguments = []
self.name = name
self.arguments = arguments
self.type_arguments = type_arguments
self.target = target
class IfThenElse(Statement):
def __init__(self, predicate, if_true=None, if_false=None):
super(IfThenElse, self).__init__()
self._fields = ['predicate', 'if_true', 'if_false']
self.predicate = predicate
self.if_true = if_true
self.if_false = if_false
class While(Statement):
def __init__(self, predicate, body=None):
super(While, self).__init__()
self._fields = ['predicate', 'body']
self.predicate = predicate
self.body = body
class For(Statement):
def __init__(self, init, predicate, update, body):
super(For, self).__init__()
self._fields = ['init', 'predicate', 'update', 'body']
self.init = init
self.predicate = predicate
self.update = update
self.body = body
class ForEach(Statement):
def __init__(self, type, variable, iterable, body, modifiers=None):
super(ForEach, self).__init__()
self._fields = ['type', 'variable', 'iterable', 'body', 'modifiers']
if modifiers is None:
modifiers = []
self.type = type
self.variable = variable
self.iterable = iterable
self.body = body
self.modifiers = modifiers
class Assert(Statement):
def __init__(self, predicate, message=None):
super(Assert, self).__init__()
self._fields = ['predicate', 'message']
self.predicate = predicate
self.message = message
class Switch(Statement):
def __init__(self, expression, switch_cases):
super(Switch, self).__init__()
self._fields = ['expression', 'switch_cases']
self.expression = expression
self.switch_cases = switch_cases
class SwitchCase(SourceElement):
def __init__(self, cases, body=None):
super(SwitchCase, self).__init__()
self._fields = ['cases', 'body']
if body is None:
body = []
self.cases = cases
self.body = body
class DoWhile(Statement):
def __init__(self, predicate, body=None):
super(DoWhile, self).__init__()
self._fields = ['predicate', 'body']
self.predicate = predicate
self.body = body
class Continue(Statement):
def __init__(self, label=None):
super(Continue, self).__init__()
self._fields = ['label']
self.label = label
class Break(Statement):
def __init__(self, label=None):
super(Break, self).__init__()
self._fields = ['label']
self.label = label
class Return(Statement):
def __init__(self, result=None):
super(Return, self).__init__()
self._fields = ['result']
self.result = result
class Synchronized(Statement):
def __init__(self, monitor, body):
super(Synchronized, self).__init__()
self._fields = ['monitor', 'body']
self.monitor = monitor
self.body = body
class Throw(Statement):
def __init__(self, exception):
super(Throw, self).__init__()
self._fields = ['exception']
self.exception = exception
class Try(Statement):
def __init__(self, block, catches=None, _finally=None, resources=None):
super(Try, self).__init__()
self._fields = ['block', 'catches', '_finally', 'resources']
if catches is None:
catches = []
if resources is None:
resources = []
self.block = block
self.catches = catches
self._finally = _finally
self.resources = resources
def accept(self, visitor):
if visitor.visit_Try(self):
for s in self.block:
s.accept(visitor)
for c in self.catches:
visitor.visit_Catch(c)
if self._finally:
self._finally.accept(visitor)
class Catch(SourceElement):
def __init__(self, variable, modifiers=None, types=None, block=None):
super(Catch, self).__init__()
self._fields = ['variable', 'modifiers', 'types', 'block']
if modifiers is None:
modifiers = []
if types is None:
types = []
self.variable = variable
self.modifiers = modifiers
self.types = types
self.block = block
class Resource(SourceElement):
def __init__(self, variable, type=None, modifiers=None, initializer=None):
super(Resource, self).__init__()
self._fields = ['variable', 'type', 'modifiers', 'initializer']
if modifiers is None:
modifiers = []
self.variable = variable
self.type = type
self.modifiers = modifiers
self.initializer = initializer
class ConstructorInvocation(Statement):
"""An explicit invocations of a class's constructor.
This is a variant of either this() or super(), NOT a "new" expression.
"""
def __init__(self, name, target=None, type_arguments=None, arguments=None):
super(ConstructorInvocation, self).__init__()
self._fields = ['name', 'target', 'type_arguments', 'arguments']
if type_arguments is None:
type_arguments = []
if arguments is None:
arguments = []
self.name = name
self.target = target
self.type_arguments = type_arguments
self.arguments = arguments
class InstanceCreation(Expression):
def __init__(self, type, type_arguments=None, arguments=None, body=None,
enclosed_in=None):
super(InstanceCreation, self).__init__()
self._fields = [
'type', 'type_arguments', 'arguments', 'body', 'enclosed_in']
if type_arguments is None:
type_arguments = []
if arguments is None:
arguments = []
if body is None:
body = []
self.type = type
self.type_arguments = type_arguments
self.arguments = arguments
self.body = body
self.enclosed_in = enclosed_in
class FieldAccess(Expression):
def __init__(self, name, target):
super(FieldAccess, self).__init__()
self._fields = ['name', 'target']
self.name = name
self.target = target
class ArrayAccess(Expression):
def __init__(self, index, target):
super(ArrayAccess, self).__init__()
self._fields = ['index', 'target']
self.index = index
self.target = target
class ArrayCreation(Expression):
def __init__(self, type, dimensions=None, initializer=None):
super(ArrayCreation, self).__init__()
self._fields = ['type', 'dimensions', 'initializer']
if dimensions is None:
dimensions = []
self.type = type
self.dimensions = dimensions
self.initializer = initializer
class Literal(SourceElement):
def __init__(self, value):
super(Literal, self).__init__()
self._fields = ['value']
self.value = value
class ClassLiteral(SourceElement):
def __init__(self, type):
super(ClassLiteral, self).__init__()
self._fields = ['type']
self.type = type
class Name(SourceElement):
def __init__(self, value):
super(Name, self).__init__()
self._fields = ['value']
self.value = value
def append_name(self, name):
try:
self.value = self.value + '.' + name.value
except:
self.value = self.value + '.' + name
class Visitor(object):
def __init__(self, verbose=False):
self.verbose = verbose
def __getattr__(self, name):
if not (name.startswith('visit_') or name.startswith('leave_')):
raise AttributeError('name must start with visit_ or leave_ but was {}'
.format(name))
def f(element):
if self.verbose:
msg = 'unimplemented call to {}; ignoring ({})'
print(msg.format(name, element))
return True
return f
|
RealTimeWeb/program-analyzer
|
plyj/model.py
|
Python
|
apache-2.0
| 23,359
|
[
"VisIt"
] |
56408813c2eb58f9dc0ac6d22db10558b050b602d63a8dedaac3b291b815eb7f
|
# -*- coding: utf-8 -*-
'''
Created on 21 Oct 2015
@author: Kimon Tsitsikas
Copyright © 2014 Kimon Tsitsikas, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with Odemis. If not, see http://www.gnu.org/licenses/.
'''
from __future__ import division
import logging
import numpy
from odemis.dataio import hdf5
from odemis.util import peak
import os
import unittest
import matplotlib.pyplot as plt
logging.getLogger().setLevel(logging.DEBUG)
PATH = os.path.dirname(__file__)
class TestPeak(unittest.TestCase):
"""
Test peak fitting
"""
def setUp(self):
data = hdf5.read_data(os.path.join(PATH, "spectrum_fitting.h5"))[1]
data = numpy.squeeze(data)
self.data = data
self.wl = numpy.linspace(470, 1030, 167)
self._peak_fitter = peak.PeakFitter()
def test_precomputed(self):
data = self.data
wl = self.wl
spec = data[:, 20, 20]
# Try gaussian
f = self._peak_fitter.Fit(spec, wl)
params, offset = f.result()
self.assertTrue(1 <= len(params) < 20)
# Parameters should be positive
for pos, width, amplitude in params:
self.assertGreater(pos, 0)
self.assertGreater(width, 0)
self.assertGreater(amplitude, 0)
# offset doesn't officially needs to be positive
# self.assertTrue(offset >= 0)
# Create curve
curve = peak.Curve(wl, params, offset)
self.assertEqual(len(curve), len(wl))
# TODO: find peaks on curve, and see we about the same peaks
wlhr = numpy.linspace(470, 1030, 512)
curve = peak.Curve(wlhr, params, offset)
self.assertEqual(len(curve), len(wlhr))
#plt.figure()
#plt.plot(wl, spec, 'r', wl, curve, 'r', linewidth=2)
# Try lorentzian
f = self._peak_fitter.Fit(spec, wl, type='lorentzian')
params, offset = f.result()
self.assertTrue(1 <= len(params) < 20)
# Parameters should be positive
for pos, width, amplitude in params:
self.assertGreater(pos, 0)
self.assertGreater(width, 0)
self.assertGreater(amplitude, 0)
curve = peak.Curve(wl, params, offset, type='lorentzian')
#plt.figure()
#plt.plot(wl, spec, 'r', wl, curve, 'r', linewidth=2)
#plt.show(block=False)
# Assert wrong fitting type
self.assertRaises(KeyError, peak.Curve, wl, params, offset, type='wrongType')
if __name__ == "__main__":
unittest.main()
|
gstiebler/odemis
|
src/odemis/util/test/peak_test.py
|
Python
|
gpl-2.0
| 2,985
|
[
"Gaussian"
] |
4b15bafaccacdff193bab18ee2381955f8f47263460d8bb99c96fc3d53430a87
|
import numpy as np
from types import FloatType
from ase.parallel import rank
import _gpaw
from gpaw.xc import XC
from gpaw.xc.kernel import XCKernel
from gpaw.xc.libxc import LibXC
from gpaw.xc.vdw import FFTVDWFunctional
from gpaw import debug
class BEE1(XCKernel):
"""GGA exchange expanded in a PBE-like basis."""
def __init__(self, parameters=None):
"""BEE1.
parameters : array
[thetas,coefs] for the basis expansion.
"""
if parameters is None:
self.name = 'BEE1'
parameters = [0.0, 1.0]
else:
self.name = 'BEE1?'
parameters = np.array(parameters, dtype=float).ravel()
self.xc = _gpaw.XCFunctional(18, parameters)
self.type = 'GGA'
class BEE2(XCKernel):
"""GGA exchange expanded in Legendre polynomials."""
def __init__(self, parameters=None):
"""BEE2.
parameters: array
[transformation,0.0,[orders],[coefs]].
"""
if parameters is None:
# LDA exchange
t = [1.0, 0.0]
coefs = [1.0]
orders = [0.0]
parameters = np.append(t, np.append(orders, coefs))
else:
assert len(parameters) > 2
assert np.mod(len(parameters), 2) == 0
assert parameters[1] == 0.0
parameters = np.array(parameters, dtype=float).ravel()
self.xc = _gpaw.XCFunctional(17, parameters)
self.type = 'GGA'
self.name = 'BEE2'
class BEEVDWKernel(XCKernel):
"""Kernel for BEEVDW functionals."""
def __init__(self, bee, xcoefs, ldac, pbec):
"""BEEVDW kernel.
parameters:
bee : str
choose BEE1 or BEE2 exchange basis expansion.
xcoefs : array
coefficients for exchange.
ldac : float
coefficient for LDA correlation.
pbec : float
coefficient for PBE correlation.
"""
if bee is 'BEE1':
self.BEE = BEE1(xcoefs)
elif bee is 'BEE2':
self.BEE = BEE2(xcoefs)
else:
raise ValueError('Unknown BEE exchange: %s', bee)
self.LDAc = LibXC('LDA_C_PW')
self.PBEc = LibXC('GGA_C_PBE')
self.ldac = ldac
self.pbec = pbec
self.type = 'GGA'
self.name = 'BEEVDW'
def calculate(self, e_g, n_sg, dedn_sg,
sigma_xg=None, dedsigma_xg=None,
tau_sg=None, dedtau_sg=None):
if debug:
self.check_arguments(e_g, n_sg, dedn_sg, sigma_xg, dedsigma_xg,
tau_sg, dedtau_sg)
self.BEE.calculate(e_g, n_sg, dedn_sg, sigma_xg, dedsigma_xg)
e0_g = np.empty_like(e_g)
dedn0_sg = np.empty_like(dedn_sg)
dedsigma0_xg = np.empty_like(dedsigma_xg)
for coef, kernel in [
(self.ldac, self.LDAc),
(self.pbec - 1.0, self.PBEc)]:
dedn0_sg[:] = 0.0
kernel.calculate(e0_g, n_sg, dedn0_sg, sigma_xg, dedsigma0_xg)
e_g += coef * e0_g
dedn_sg += coef * dedn0_sg
if kernel.type == 'GGA':
dedsigma_xg += coef * dedsigma0_xg
class BEEVDWFunctional(FFTVDWFunctional):
"""Base class for BEEVDW functionals."""
def __init__(self, bee='BEE1', xcoefs=(0.0, 1.0),
ccoefs=(0.0, 1.0, 0.0), t=4.0, orders=None, Nr=2048,
**kwargs):
"""BEEVDW functionals.
parameters:
bee : str
choose BEE1 or BEE2 exchange basis expansion.
xcoefs : array-like
coefficients for exchange.
ccoefs : array-like
LDA, PBE, nonlocal correlation coefficients
t : float
transformation for BEE2 exchange
orders : array
orders of Legendre polynomials for BEE2 exchange
Nr : int
Nr for FFT evaluation of vdW
"""
if bee is 'BEE1':
name = 'BEE1VDW'
Zab = -0.8491
soft_corr = False
elif bee is 'BEE2':
name = 'BEE2VDW'
Zab = -1.887
soft_corr = False
if orders is None:
orders = range(len(xcoefs))
xcoefs = np.append([t, 0.0], np.append(orders, xcoefs))
elif bee == 'BEEF-vdW':
bee = 'BEE2'
name = 'BEEF-vdW'
Zab = -1.887
soft_corr = True
t, x, o, ccoefs = self.load_xc_pars('BEEF-vdW')
xcoefs = np.append(t, np.append(o, x))
self.t, self.x, self.o, self.c = t, x, o, ccoefs
self.nl_type = 2
else:
raise KeyError('Unknown BEEVDW functional: %s', bee)
assert isinstance(Nr, int)
assert Nr % 512 == 0
ldac, pbec, vdw = ccoefs
kernel = BEEVDWKernel(bee, xcoefs, ldac, pbec)
FFTVDWFunctional.__init__(self, name=name, soft_correction=soft_corr,
kernel=kernel, Zab=Zab, vdwcoef=vdw, Nr=Nr,
**kwargs)
def get_setup_name(self):
return 'PBE'
def load_xc_pars(self, name):
"""Get BEEF-vdW parameters"""
assert name == 'BEEF-vdW'
t = np.array([4.0, 0.0])
c = np.array([ 0.600166476948828631066,
0.399833523051171368934,
1.0])
x = np.array([ 1.516501714304992365356,
0.441353209874497942611,
-0.091821352411060291887,
-0.023527543314744041314,
0.034188284548603550816,
0.002411870075717384172,
-0.014163813515916020766,
0.000697589558149178113,
0.009859205136982565273,
-0.006737855050935187551,
-0.001573330824338589097,
0.005036146253345903309,
-0.002569472452841069059,
-0.000987495397608761146,
0.002033722894696920677,
-0.000801871884834044583,
-0.000668807872347525591,
0.001030936331268264214,
-0.000367383865990214423,
-0.000421363539352619543,
0.000576160799160517858,
-0.000083465037349510408,
-0.000445844758523195788,
0.000460129009232047457,
-0.000005231775398304339,
-0.000423957047149510404,
0.000375019067938866537,
0.000021149381251344578,
-0.000190491156503997170,
0.000073843624209823442])
o = range(len(x))
return t, x, o, c
class BEEF_Ensemble:
"""BEEF ensemble error estimation."""
def __init__(self, calc=None, exch=None, corr=None):
"""BEEF ensemble
parameters:
calc : object
Calculator holding a selfconsistent BEEF type electron density.
May be BEEF-vdW or mBEEF.
exch : array
Exchange basis function contributions to the total energy.
Defaults to None.
corr : array
Correlation basis function contributions to the total energy.
Defaults to None.
"""
self.calc = calc
self.exch = exch
self.corr = corr
self.e_dft = None
self.e0 = None
if self.calc is None:
raise KeyError('calculator not specified')
# determine functional and read parameters
self.xc = self.calc.get_xc_functional()
if self.xc in ['BEEF-vdW', 'BEEF-1']:
self.bee = BEEVDWFunctional('BEEF-vdW')
self.bee_type = 1
self.nl_type = self.bee.nl_type
self.t = self.bee.t
self.x = self.bee.x
self.o = self.bee.o
self.c = self.bee.c
elif self.xc == 'mBEEF':
self.bee = LibXC('mBEEF')
self.bee_type = 2
self.max_order = 8
self.trans = [6.5124, -1.0]
if self.exch is None and rank == 0:
self.calc.converge_wave_functions()
print 'wave functions converged'
else:
raise NotImplementedError('xc = %s not implemented' % self.xc)
def create_xc_contributions(self, type):
"""General function for creating exchange or correlation energies"""
assert type in ['exch', 'corr']
err = 'bee_type %i not implemented' % self.bee_type
if type == 'exch':
if self.bee_type == 1:
out = self.beefvdw_energy_contribs_x()
elif self.bee_type == 2:
out = self.mbeef_exchange_energy_contribs()
else:
raise NotImplementedError(err)
else:
if self.bee_type == 1:
out = self.beefvdw_energy_contribs_c()
elif self.bee_type == 2:
out = np.array([])
else:
raise NotImplementedError(err)
return out
def get_non_xc_total_energies(self):
"""Compile non-XC total energy contributions"""
if self.e_dft is None:
self.e_dft = self.calc.get_potential_energy()
if self.e0 is None:
from gpaw.xc.kernel import XCNull
xc_null = XC(XCNull())
self.e0 = self.e_dft + self.calc.get_xc_difference(xc_null)
isinstance(self.e_dft, FloatType)
isinstance(self.e0, FloatType)
def mbeef_exchange_energy_contribs(self):
"""Legendre polynomial exchange contributions to mBEEF Etot"""
self.get_non_xc_total_energies()
e_x = np.zeros((self.max_order, self.max_order))
for p1 in range(self.max_order): # alpha
for p2 in range(self.max_order): # s2
pars_i = np.array([1, self.trans[0], p2, 1.0])
pars_j = np.array([1, self.trans[1], p1, 1.0])
pars = np.hstack((pars_i, pars_j))
x = XC('2D-MGGA', pars)
e_x[p1, p2] = self.e_dft + self.calc.get_xc_difference(x) - self.e0
del x
return e_x
def beefvdw_energy_contribs_x(self):
"""Legendre polynomial exchange contributions to BEEF-vdW Etot"""
self.get_non_xc_total_energies()
e_pbe = self.e_dft + self.calc.get_xc_difference('GGA_C_PBE') - self.e0
exch = np.zeros(len(self.o))
for p in self.o:
pars = [self.t[0], self.t[1], p, 1.0]
bee = XC('BEE2', pars)
exch[p] = self.e_dft + self.calc.get_xc_difference(bee) - self.e0 - e_pbe
del bee
return exch
def beefvdw_energy_contribs_c(self):
"""LDA and PBE correlation contributions to BEEF-vdW Etot"""
self.get_non_xc_total_energies()
e_lda = self.e_dft + self.calc.get_xc_difference('LDA_C_PW') - self.e0
e_pbe = self.e_dft + self.calc.get_xc_difference('GGA_C_PBE') - self.e0
corr = np.array([e_lda, e_pbe])
return corr
|
robwarm/gpaw-symm
|
gpaw/xc/bee.py
|
Python
|
gpl-3.0
| 11,238
|
[
"ASE",
"GPAW"
] |
0034bc511c4c36ae231a1d06cd642279738dc75149d318cb19ded1a5bb79ab62
|
from django.conf import settings
BIGQUERY_CONFIG = {
"reference_config": {
"project_name": "isb-cgc",
"dataset_name": "platform_reference"
},
"supported_genomic_builds": ['hg19', 'hg38'],
"tables": [
{
"table_id": "{}:TCGA_hg19_data_v0.RNAseq_Gene_Expression_UNC_RSEM".format(settings.BIGQUERY_DATA_PROJECT_ID),
"genomic_build": "hg19",
"platform": "Illumina HiSeq",
"gene_label_field": "HGNC_gene_symbol",
"generating_center": "UNC",
"internal_table_id": "rnaseq_unc_rsem",
"value_label": "RSEM",
"value_field": "normalized_count",
"program": "tcga"
},
{
"table_id": "{}:TCGA_hg38_data_v0.RNAseq_Gene_Expression".format(settings.BIGQUERY_DATA_PROJECT_ID),
"genomic_build": "hg38",
"platform": "Illumina HiSeq",
"gene_label_field": "gene_name",
"generating_center": "GDC",
"internal_table_id": "rnaseq",
"value_label": "HTSeq counts",
"value_field": "HTSeq__Counts",
"program": "tcga"
},
{
"table_id": "{}:TARGET_hg38_data_v0.RNAseq_Gene_Expression".format(settings.BIGQUERY_DATA_PROJECT_ID),
"genomic_build": "hg38",
"platform": "Illumina HiSeq",
"gene_label_field": "gene_name",
"generating_center": "GDC",
"internal_table_id": "rnaseq",
"value_label": "HTSeq counts",
"value_field": "HTSeq__Counts",
"program": "target"
}
]
}
|
isb-cgc/ISB-CGC-Webapp
|
bq_data_access/data_types/gexp.py
|
Python
|
apache-2.0
| 1,638
|
[
"HTSeq"
] |
b28ce1364a2674a9fcb33d547789c9f255516877b952d128b823a1186534717c
|
# -*- coding: utf-8 -*-
"""
pysteps.nowcasts.steps
======================
Implementation of the STEPS stochastic nowcasting method as described in
:cite:`Seed2003`, :cite:`BPS2006` and :cite:`SPN2013`.
.. autosummary::
:toctree: ../generated/
forecast
"""
import numpy as np
import scipy.ndimage
import time
from pysteps import cascade
from pysteps import extrapolation
from pysteps import noise
from pysteps import utils
from pysteps.nowcasts import utils as nowcast_utils
from pysteps.postprocessing import probmatching
from pysteps.timeseries import autoregression, correlation
try:
import dask
DASK_IMPORTED = True
except ImportError:
DASK_IMPORTED = False
def forecast(
R,
V,
timesteps,
n_ens_members=24,
n_cascade_levels=6,
R_thr=None,
kmperpixel=None,
timestep=None,
extrap_method="semilagrangian",
decomp_method="fft",
bandpass_filter_method="gaussian",
noise_method="nonparametric",
noise_stddev_adj=None,
ar_order=2,
vel_pert_method="bps",
conditional=False,
probmatching_method="cdf",
mask_method="incremental",
callback=None,
return_output=True,
seed=None,
num_workers=1,
fft_method="numpy",
domain="spatial",
extrap_kwargs=None,
filter_kwargs=None,
noise_kwargs=None,
vel_pert_kwargs=None,
mask_kwargs=None,
measure_time=False,
):
"""
Generate a nowcast ensemble by using the Short-Term Ensemble Prediction
System (STEPS) method.
Parameters
----------
R: array-like
Array of shape (ar_order+1,m,n) containing the input precipitation fields
ordered by timestamp from oldest to newest. The time steps between the
inputs are assumed to be regular.
V: array-like
Array of shape (2,m,n) containing the x- and y-components of the advection
field. The velocities are assumed to represent one time step between the
inputs. All values are required to be finite.
timesteps: int or list of floats
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements of
the list are required to be in ascending order.
n_ens_members: int, optional
The number of ensemble members to generate.
n_cascade_levels: int, optional
The number of cascade levels to use.
R_thr: float, optional
Specifies the threshold value for minimum observable precipitation
intensity. Required if mask_method is not None or conditional is True.
kmperpixel: float, optional
Spatial resolution of the input data (kilometers/pixel). Required if
vel_pert_method is not None or mask_method is 'incremental'.
timestep: float, optional
Time step of the motion vectors (minutes). Required if vel_pert_method is
not None or mask_method is 'incremental'.
extrap_method: str, optional
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
decomp_method: {'fft'}, optional
Name of the cascade decomposition method to use. See the documentation
of pysteps.cascade.interface.
bandpass_filter_method: {'gaussian', 'uniform'}, optional
Name of the bandpass filter method to use with the cascade decomposition.
See the documentation of pysteps.cascade.interface.
noise_method: {'parametric','nonparametric','ssft','nested',None}, optional
Name of the noise generator to use for perturbating the precipitation
field. See the documentation of pysteps.noise.interface. If set to None,
no noise is generated.
noise_stddev_adj: {'auto','fixed',None}, optional
Optional adjustment for the standard deviations of the noise fields added
to each cascade level. This is done to compensate incorrect std. dev.
estimates of casace levels due to presence of no-rain areas. 'auto'=use
the method implemented in pysteps.noise.utils.compute_noise_stddev_adjs.
'fixed'= use the formula given in :cite:`BPS2006` (eq. 6), None=disable
noise std. dev adjustment.
ar_order: int, optional
The order of the autoregressive model to use. Must be >= 1.
vel_pert_method: {'bps',None}, optional
Name of the noise generator to use for perturbing the advection field. See
the documentation of pysteps.noise.interface. If set to None, the advection
field is not perturbed.
conditional: bool, optional
If set to True, compute the statistics of the precipitation field
conditionally by excluding pixels where the values are below the threshold
R_thr.
mask_method: {'obs','sprog','incremental',None}, optional
The method to use for masking no precipitation areas in the forecast field.
The masked pixels are set to the minimum value of the observations.
'obs' = apply R_thr to the most recently observed precipitation intensity
field, 'sprog' = use the smoothed forecast field from S-PROG, where the
AR(p) model has been applied, 'incremental' = iteratively buffer the mask
with a certain rate (currently it is 1 km/min), None=no masking.
probmatching_method: {'cdf','mean',None}, optional
Method for matching the statistics of the forecast field with those of
the most recently observed one. 'cdf'=map the forecast CDF to the observed
one, 'mean'=adjust only the conditional mean value of the forecast field
in precipitation areas, None=no matching applied. Using 'mean' requires
that mask_method is not None.
callback: function, optional
Optional function that is called after computation of each time step of
the nowcast. The function takes one argument: a three-dimensional array
of shape (n_ens_members,h,w), where h and w are the height and width
of the input field R, respectively. This can be used, for instance,
writing the outputs into files.
return_output: bool, optional
Set to False to disable returning the outputs as numpy arrays. This can
save memory if the intermediate results are written to output files using
the callback function.
seed: int, optional
Optional seed number for the random generators.
num_workers: int, optional
The number of workers to use for parallel computation. Applicable if dask
is enabled or pyFFTW is used for computing the FFT. When num_workers>1, it
is advisable to disable OpenMP by setting the environment variable
OMP_NUM_THREADS to 1. This avoids slowdown caused by too many simultaneous
threads.
fft_method: str, optional
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
domain: {"spatial", "spectral"}
If "spatial", all computations are done in the spatial domain (the
classical STEPS model). If "spectral", the AR(2) models and stochastic
perturbations are applied directly in the spectral domain to reduce
memory footprint and improve performance :cite:`PCH2019b`.
extrap_kwargs: dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict, optional
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
noise_kwargs: dict, optional
Optional dictionary containing keyword arguments for the initializer of
the noise generator. See the documentation of pysteps.noise.fftgenerators.
vel_pert_kwargs: dict, optional
Optional dictionary containing keyword arguments 'p_par' and 'p_perp' for
the initializer of the velocity perturbator. The choice of the optimal
parameters depends on the domain and the used optical flow method.
Default parameters from :cite:`BPS2006`:
p_par = [10.88, 0.23, -7.68]
p_perp = [5.76, 0.31, -2.72]
Parameters fitted to the data (optical flow/domain):
darts/fmi:
p_par = [13.71259667, 0.15658963, -16.24368207]
p_perp = [8.26550355, 0.17820458, -9.54107834]
darts/mch:
p_par = [24.27562298, 0.11297186, -27.30087471]
p_perp = [-7.80797846e+01, -3.38641048e-02, 7.56715304e+01]
darts/fmi+mch:
p_par = [16.55447057, 0.14160448, -19.24613059]
p_perp = [14.75343395, 0.11785398, -16.26151612]
lucaskanade/fmi:
p_par = [2.20837526, 0.33887032, -2.48995355]
p_perp = [2.21722634, 0.32359621, -2.57402761]
lucaskanade/mch:
p_par = [2.56338484, 0.3330941, -2.99714349]
p_perp = [1.31204508, 0.3578426, -1.02499891]
lucaskanade/fmi+mch:
p_par = [2.31970635, 0.33734287, -2.64972861]
p_perp = [1.90769947, 0.33446594, -2.06603662]
vet/fmi:
p_par = [0.25337388, 0.67542291, 11.04895538]
p_perp = [0.02432118, 0.99613295, 7.40146505]
vet/mch:
p_par = [0.5075159, 0.53895212, 7.90331791]
p_perp = [0.68025501, 0.41761289, 4.73793581]
vet/fmi+mch:
p_par = [0.29495222, 0.62429207, 8.6804131 ]
p_perp = [0.23127377, 0.59010281, 5.98180004]
fmi=Finland, mch=Switzerland, fmi+mch=both pooled into the same data set
The above parameters have been fitten by using run_vel_pert_analysis.py
and fit_vel_pert_params.py located in the scripts directory.
See pysteps.noise.motion for additional documentation.
mask_kwargs: dict
Optional dictionary containing mask keyword arguments 'mask_f' and
'mask_rim', the factor defining the the mask increment and the rim size,
respectively.
The mask increment is defined as mask_f*timestep/kmperpixel.
measure_time: bool
If set to True, measure, print and return the computation time.
Returns
-------
out: ndarray
If return_output is True, a four-dimensional array of shape
(n_ens_members,num_timesteps,m,n) containing a time series of forecast
precipitation fields for each ensemble member. Otherwise, a None value
is returned. The time series starts from t0+timestep, where timestep is
taken from the input precipitation fields R. If measure_time is True, the
return value is a three-element tuple containing the nowcast array, the
initialization time of the nowcast generator and the time used in the
main loop (seconds).
See also
--------
pysteps.extrapolation.interface, pysteps.cascade.interface,
pysteps.noise.interface, pysteps.noise.utils.compute_noise_stddev_adjs
References
----------
:cite:`Seed2003`, :cite:`BPS2006`, :cite:`SPN2013`, :cite:`PCH2019b`
"""
_check_inputs(R, V, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
if filter_kwargs is None:
filter_kwargs = dict()
if noise_kwargs is None:
noise_kwargs = dict()
if vel_pert_kwargs is None:
vel_pert_kwargs = dict()
if mask_kwargs is None:
mask_kwargs = dict()
if np.any(~np.isfinite(V)):
raise ValueError("V contains non-finite values")
if mask_method not in ["obs", "sprog", "incremental", None]:
raise ValueError(
"unknown mask method %s: must be 'obs', 'sprog' or 'incremental' or None"
% mask_method
)
if conditional and R_thr is None:
raise ValueError("conditional=True but R_thr is not set")
if mask_method is not None and R_thr is None:
raise ValueError("mask_method!=None but R_thr=None")
if noise_stddev_adj not in ["auto", "fixed", None]:
raise ValueError(
"unknown noise_std_dev_adj method %s: must be 'auto', 'fixed', or None"
% noise_stddev_adj
)
if kmperpixel is None:
if vel_pert_method is not None:
raise ValueError("vel_pert_method is set but kmperpixel=None")
if mask_method == "incremental":
raise ValueError("mask_method='incremental' but kmperpixel=None")
if timestep is None:
if vel_pert_method is not None:
raise ValueError("vel_pert_method is set but timestep=None")
if mask_method == "incremental":
raise ValueError("mask_method='incremental' but timestep=None")
print("Computing STEPS nowcast:")
print("------------------------")
print("")
print("Inputs:")
print("-------")
print("input dimensions: %dx%d" % (R.shape[1], R.shape[2]))
if kmperpixel is not None:
print("km/pixel: %g" % kmperpixel)
if timestep is not None:
print("time step: %d minutes" % timestep)
print("")
print("Methods:")
print("--------")
print("extrapolation: %s" % extrap_method)
print("bandpass filter: %s" % bandpass_filter_method)
print("decomposition: %s" % decomp_method)
print("noise generator: %s" % noise_method)
print("noise adjustment: %s" % ("yes" if noise_stddev_adj else "no"))
print("velocity perturbator: %s" % vel_pert_method)
print("conditional statistics: %s" % ("yes" if conditional else "no"))
print("precip. mask method: %s" % mask_method)
print("probability matching: %s" % probmatching_method)
print("FFT method: %s" % fft_method)
print("domain: %s" % domain)
print("")
print("Parameters:")
print("-----------")
if isinstance(timesteps, int):
print("number of time steps: %d" % timesteps)
else:
print("time steps: %s" % timesteps)
print("ensemble size: %d" % n_ens_members)
print("parallel threads: %d" % num_workers)
print("number of cascade levels: %d" % n_cascade_levels)
print("order of the AR(p) model: %d" % ar_order)
if vel_pert_method == "bps":
vp_par = vel_pert_kwargs.get("p_par", noise.motion.get_default_params_bps_par())
vp_perp = vel_pert_kwargs.get(
"p_perp", noise.motion.get_default_params_bps_perp()
)
print(
"velocity perturbations, parallel: %g,%g,%g"
% (vp_par[0], vp_par[1], vp_par[2])
)
print(
"velocity perturbations, perpendicular: %g,%g,%g"
% (vp_perp[0], vp_perp[1], vp_perp[2])
)
if conditional or mask_method is not None:
print("precip. intensity threshold: %g" % R_thr)
num_ensemble_workers = n_ens_members if num_workers > n_ens_members else num_workers
if measure_time:
starttime_init = time.time()
fft = utils.get_method(fft_method, shape=R.shape[1:], n_threads=num_workers)
M, N = R.shape[1:]
# initialize the band-pass filter
filter_method = cascade.get_method(bandpass_filter_method)
filter = filter_method((M, N), n_cascade_levels, **filter_kwargs)
decomp_method, recomp_method = cascade.get_method(decomp_method)
extrapolator_method = extrapolation.get_method(extrap_method)
x_values, y_values = np.meshgrid(np.arange(R.shape[2]), np.arange(R.shape[1]))
xy_coords = np.stack([x_values, y_values])
R = R[-(ar_order + 1) :, :, :].copy()
# determine the domain mask from non-finite values
domain_mask = np.logical_or.reduce(
[~np.isfinite(R[i, :]) for i in range(R.shape[0])]
)
# determine the precipitation threshold mask
if conditional:
MASK_thr = np.logical_and.reduce(
[R[i, :, :] >= R_thr for i in range(R.shape[0])]
)
else:
MASK_thr = None
# advect the previous precipitation fields to the same position with the
# most recent one (i.e. transform them into the Lagrangian coordinates)
extrap_kwargs = extrap_kwargs.copy()
extrap_kwargs["xy_coords"] = xy_coords
extrap_kwargs["allow_nonfinite_values"] = True
res = list()
def f(R, i):
return extrapolator_method(R[i, :, :], V, ar_order - i, "min", **extrap_kwargs)[
-1
]
for i in range(ar_order):
if not DASK_IMPORTED:
R[i, :, :] = f(R, i)
else:
res.append(dask.delayed(f)(R, i))
if DASK_IMPORTED:
num_workers_ = len(res) if num_workers > len(res) else num_workers
R = np.stack(list(dask.compute(*res, num_workers=num_workers_)) + [R[-1, :, :]])
# replace non-finite values with the minimum value
R = R.copy()
for i in range(R.shape[0]):
R[i, ~np.isfinite(R[i, :])] = np.nanmin(R[i, :])
if noise_method is not None:
# get methods for perturbations
init_noise, generate_noise = noise.get_method(noise_method)
# initialize the perturbation generator for the precipitation field
pp = init_noise(R, fft_method=fft, **noise_kwargs)
if noise_stddev_adj == "auto":
print("Computing noise adjustment coefficients... ", end="", flush=True)
if measure_time:
starttime = time.time()
R_min = np.min(R)
noise_std_coeffs = noise.utils.compute_noise_stddev_adjs(
R[-1, :, :],
R_thr,
R_min,
filter,
decomp_method,
pp,
generate_noise,
20,
conditional=True,
num_workers=num_workers,
)
if measure_time:
print("%.2f seconds." % (time.time() - starttime))
else:
print("done.")
elif noise_stddev_adj == "fixed":
f = lambda k: 1.0 / (0.75 + 0.09 * k)
noise_std_coeffs = [f(k) for k in range(1, n_cascade_levels + 1)]
else:
noise_std_coeffs = np.ones(n_cascade_levels)
if noise_stddev_adj is not None:
print("noise std. dev. coeffs: %s" % str(noise_std_coeffs))
# compute the cascade decompositions of the input precipitation fields
R_d = []
for i in range(ar_order + 1):
R_ = decomp_method(
R[i, :, :],
filter,
mask=MASK_thr,
fft_method=fft,
output_domain=domain,
normalize=True,
compute_stats=True,
compact_output=True,
)
R_d.append(R_)
# normalize the cascades and rearrange them into a four-dimensional array
# of shape (n_cascade_levels,ar_order+1,m,n) for the autoregressive model
R_c = nowcast_utils.stack_cascades(R_d, n_cascade_levels)
R_d = R_d[-1]
R_d = [R_d.copy() for j in range(n_ens_members)]
# compute lag-l temporal autocorrelation coefficients for each cascade level
GAMMA = np.empty((n_cascade_levels, ar_order))
for i in range(n_cascade_levels):
GAMMA[i, :] = correlation.temporal_autocorrelation(R_c[i], mask=MASK_thr)
nowcast_utils.print_corrcoefs(GAMMA)
if ar_order == 2:
# adjust the lag-2 correlation coefficient to ensure that the AR(p)
# process is stationary
for i in range(n_cascade_levels):
GAMMA[i, 1] = autoregression.adjust_lag2_corrcoef2(GAMMA[i, 0], GAMMA[i, 1])
# estimate the parameters of the AR(p) model from the autocorrelation
# coefficients
PHI = np.empty((n_cascade_levels, ar_order + 1))
for i in range(n_cascade_levels):
PHI[i, :] = autoregression.estimate_ar_params_yw(GAMMA[i, :])
nowcast_utils.print_ar_params(PHI)
# discard all except the p-1 last cascades because they are not needed for
# the AR(p) model
R_c = [R_c[i][-ar_order:] for i in range(n_cascade_levels)]
# stack the cascades into a list containing all ensemble members
R_c = [
[R_c[j].copy() for j in range(n_cascade_levels)] for i in range(n_ens_members)
]
# initialize the random generators
if noise_method is not None:
randgen_prec = []
randgen_motion = []
np.random.seed(seed)
for j in range(n_ens_members):
rs = np.random.RandomState(seed)
randgen_prec.append(rs)
seed = rs.randint(0, high=1e9)
rs = np.random.RandomState(seed)
randgen_motion.append(rs)
seed = rs.randint(0, high=1e9)
if vel_pert_method is not None:
init_vel_noise, generate_vel_noise = noise.get_method(vel_pert_method)
# initialize the perturbation generators for the motion field
vps = []
for j in range(n_ens_members):
kwargs = {
"randstate": randgen_motion[j],
"p_par": vp_par,
"p_perp": vp_perp,
}
vp_ = init_vel_noise(V, 1.0 / kmperpixel, timestep, **kwargs)
vps.append(vp_)
D = [None for j in range(n_ens_members)]
R_f = [[] for j in range(n_ens_members)]
if probmatching_method == "mean":
mu_0 = np.mean(R[-1, :, :][R[-1, :, :] >= R_thr])
R_m = None
if mask_method is not None:
MASK_prec = R[-1, :, :] >= R_thr
if mask_method == "obs":
pass
elif mask_method == "sprog":
# compute the wet area ratio and the precipitation mask
war = 1.0 * np.sum(MASK_prec) / (R.shape[1] * R.shape[2])
R_m = [R_c[0][i].copy() for i in range(n_cascade_levels)]
R_m_d = R_d[0].copy()
elif mask_method == "incremental":
# get mask parameters
mask_rim = mask_kwargs.get("mask_rim", 10)
mask_f = mask_kwargs.get("mask_f", 1.0)
# initialize the structuring element
struct = scipy.ndimage.generate_binary_structure(2, 1)
# iterate it to expand it nxn
n = mask_f * timestep / kmperpixel
struct = scipy.ndimage.iterate_structure(struct, int((n - 1) / 2.0))
# initialize precip mask for each member
MASK_prec = _compute_incremental_mask(MASK_prec, struct, mask_rim)
MASK_prec = [MASK_prec.copy() for j in range(n_ens_members)]
if noise_method is None and R_m is None:
R_m = [R_c[0][i].copy() for i in range(n_cascade_levels)]
fft_objs = []
for i in range(n_ens_members):
fft_objs.append(utils.get_method(fft_method, shape=R.shape[1:]))
if measure_time:
init_time = time.time() - starttime_init
R = R[-1, :, :]
print("Starting nowcast computation.")
if measure_time:
starttime_mainloop = time.time()
if isinstance(timesteps, int):
timesteps = range(timesteps + 1)
timestep_type = "int"
else:
original_timesteps = [0] + list(timesteps)
timesteps = nowcast_utils.binned_timesteps(original_timesteps)
timestep_type = "list"
extrap_kwargs["return_displacement"] = True
R_f_prev = [R for i in range(n_ens_members)]
t_prev = [0.0 for j in range(n_ens_members)]
t_total = [0.0 for j in range(n_ens_members)]
# iterate each time step
for t, subtimestep_idx in enumerate(timesteps):
if timestep_type == "list":
subtimesteps = [original_timesteps[t_] for t_ in subtimestep_idx]
else:
subtimesteps = [t]
if (timestep_type == "list" and subtimesteps) or (
timestep_type == "int" and t > 0
):
is_nowcast_time_step = True
else:
is_nowcast_time_step = False
if is_nowcast_time_step:
print(
"Computing nowcast for time step %d... " % t,
end="",
flush=True,
)
if measure_time:
starttime = time.time()
if noise_method is None or mask_method == "sprog":
for i in range(n_cascade_levels):
# use a separate AR(p) model for the non-perturbed forecast,
# from which the mask is obtained
R_m[i] = autoregression.iterate_ar_model(R_m[i], PHI[i, :])
R_m_d["cascade_levels"] = [R_m[i][-1] for i in range(n_cascade_levels)]
if domain == "spatial":
R_m_d["cascade_levels"] = np.stack(R_m_d["cascade_levels"])
R_m_ = recomp_method(R_m_d)
if domain == "spectral":
R_m_ = fft.irfft2(R_m_)
if mask_method == "sprog":
MASK_prec = _compute_sprog_mask(R_m_, war)
# the nowcast iteration for each ensemble member
def worker(j):
if noise_method is not None:
# generate noise field
EPS = generate_noise(
pp, randstate=randgen_prec[j], fft_method=fft_objs[j], domain=domain
)
# decompose the noise field into a cascade
EPS = decomp_method(
EPS,
filter,
fft_method=fft_objs[j],
input_domain=domain,
output_domain=domain,
compute_stats=True,
normalize=True,
compact_output=True,
)
else:
EPS = None
# iterate the AR(p) model for each cascade level
for i in range(n_cascade_levels):
# normalize the noise cascade
if EPS is not None:
EPS_ = EPS["cascade_levels"][i]
EPS_ *= noise_std_coeffs[i]
else:
EPS_ = None
# apply AR(p) process to cascade level
if EPS is not None or vel_pert_method is not None:
R_c[j][i] = autoregression.iterate_ar_model(
R_c[j][i], PHI[i, :], eps=EPS_
)
else:
# use the deterministic AR(p) model computed above if
# perturbations are disabled
R_c[j][i] = R_m[i]
EPS = None
EPS_ = None
# compute the recomposed precipitation field(s) from the cascades
# obtained from the AR(p) model(s)
R_d[j]["cascade_levels"] = [
R_c[j][i][-1, :] for i in range(n_cascade_levels)
]
if domain == "spatial":
R_d[j]["cascade_levels"] = np.stack(R_d[j]["cascade_levels"])
R_f_new = recomp_method(R_d[j])
if domain == "spectral":
R_f_new = fft_objs[j].irfft2(R_f_new)
if mask_method is not None:
# apply the precipitation mask to prevent generation of new
# precipitation into areas where it was not originally
# observed
R_cmin = R_f_new.min()
if mask_method == "incremental":
R_f_new = R_cmin + (R_f_new - R_cmin) * MASK_prec[j]
MASK_prec_ = R_f_new > R_cmin
else:
MASK_prec_ = MASK_prec
# Set to min value outside of mask
R_f_new[~MASK_prec_] = R_cmin
if probmatching_method == "cdf":
# adjust the CDF of the forecast to match the most recently
# observed precipitation field
R_f_new = probmatching.nonparam_match_empirical_cdf(R_f_new, R)
elif probmatching_method == "mean":
MASK = R_f_new >= R_thr
mu_fct = np.mean(R_f_new[MASK])
R_f_new[MASK] = R_f_new[MASK] - mu_fct + mu_0
if mask_method == "incremental":
MASK_prec[j] = _compute_incremental_mask(
R_f_new >= R_thr, struct, mask_rim
)
R_f_new[domain_mask] = np.nan
R_f_out = []
extrap_kwargs_ = extrap_kwargs.copy()
V_pert = V
# advect the recomposed precipitation field to obtain the forecast for
# the current time step (or subtimesteps if non-integer time steps are
# given)
for t_sub in subtimesteps:
if t_sub > 0:
t_diff_prev_int = t_sub - int(t_sub)
if t_diff_prev_int > 0.0:
R_f_ip = (1.0 - t_diff_prev_int) * R_f_prev[
j
] + t_diff_prev_int * R_f_new
else:
R_f_ip = R_f_prev[j]
t_diff_prev = t_sub - t_prev[j]
t_total[j] += t_diff_prev
# compute the perturbed motion field
if vel_pert_method is not None:
V_pert = V + generate_vel_noise(vps[j], t_total[j] * timestep)
extrap_kwargs_["displacement_prev"] = D[j]
R_f_ep, D[j] = extrapolator_method(
R_f_ip,
V_pert,
[t_diff_prev],
**extrap_kwargs_,
)
R_f_out.append(R_f_ep[0])
t_prev[j] = t_sub
# advect the forecast field by one time step if no subtimesteps in the
# current interval were found
if not subtimesteps:
t_diff_prev = t + 1 - t_prev[j]
t_total[j] += t_diff_prev
# compute the perturbed motion field
if vel_pert_method is not None:
V_pert = V + generate_vel_noise(vps[j], t_total[j] * timestep)
extrap_kwargs_["displacement_prev"] = D[j]
_, D[j] = extrapolator_method(
None,
V_pert,
[t_diff_prev],
**extrap_kwargs_,
)
t_prev[j] = t + 1
R_f_prev[j] = R_f_new
return R_f_out
res = []
for j in range(n_ens_members):
if not DASK_IMPORTED or n_ens_members == 1:
res.append(worker(j))
else:
res.append(dask.delayed(worker)(j))
R_f_ = (
dask.compute(*res, num_workers=num_ensemble_workers)
if DASK_IMPORTED and n_ens_members > 1
else res
)
res = None
if is_nowcast_time_step:
if measure_time:
print("%.2f seconds." % (time.time() - starttime))
else:
print("done.")
if callback is not None:
R_f_stacked = np.stack(R_f_)
if R_f_stacked.shape[1] > 0:
callback(R_f_stacked.squeeze())
if return_output:
for j in range(n_ens_members):
R_f[j].extend(R_f_[j])
R_f_ = None
if measure_time:
mainloop_time = time.time() - starttime_mainloop
if return_output:
outarr = np.stack([np.stack(R_f[j]) for j in range(n_ens_members)])
if measure_time:
return outarr, init_time, mainloop_time
else:
return outarr
else:
return None
def _check_inputs(R, V, timesteps, ar_order):
if R.ndim != 3:
raise ValueError("R must be a three-dimensional array")
if R.shape[0] < ar_order + 1:
raise ValueError("R.shape[0] < ar_order+1")
if V.ndim != 3:
raise ValueError("V must be a three-dimensional array")
if R.shape[1:3] != V.shape[1:3]:
raise ValueError(
"dimension mismatch between R and V: shape(R)=%s, shape(V)=%s"
% (str(R.shape), str(V.shape))
)
if isinstance(timesteps, list) and not sorted(timesteps) == timesteps:
raise ValueError("timesteps is not in ascending order")
def _compute_incremental_mask(Rbin, kr, r):
# buffer the observation mask Rbin using the kernel kr
# add a grayscale rim r (for smooth rain/no-rain transition)
# buffer observation mask
Rbin = np.ndarray.astype(Rbin.copy(), "uint8")
Rd = scipy.ndimage.morphology.binary_dilation(Rbin, kr)
# add grayscale rim
kr1 = scipy.ndimage.generate_binary_structure(2, 1)
mask = Rd.astype(float)
for n in range(r):
Rd = scipy.ndimage.morphology.binary_dilation(Rd, kr1)
mask += Rd
# normalize between 0 and 1
return mask / mask.max()
def _compute_sprog_mask(R, war):
# obtain the CDF from the non-perturbed forecast that is
# scale-filtered by the AR(p) model
R_s = R.flatten()
# compute the threshold value R_pct_thr corresponding to the
# same fraction of precipitation pixels (forecast values above
# R_thr) as in the most recently observed precipitation field
R_s.sort(kind="quicksort")
x = 1.0 * np.arange(1, len(R_s) + 1)[::-1] / len(R_s)
i = np.argmin(abs(x - war))
# handle ties
if R_s[i] == R_s[i + 1]:
i = np.where(R_s == R_s[i])[0][-1] + 1
R_pct_thr = R_s[i]
# determine a mask using the above threshold value to preserve the
# wet-area ratio
return R >= R_pct_thr
|
pySTEPS/pysteps
|
pysteps/nowcasts/steps.py
|
Python
|
bsd-3-clause
| 33,095
|
[
"Gaussian"
] |
23887325f7c57a54f6e7417c14dfcf6e8a1f66911161b3e833e1fa9afe4c4cae
|
"""Test case for autocomplete implementations."""
import os
import uuid
from django import VERSION
from django.contrib.contenttypes.models import ContentType
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django.utils import six
from splinter import Browser
GLOBAL_BROWSER = None
class AutocompleteTestCase(StaticLiveServerTestCase):
"""Provide a class-persistent selenium instance and assertions."""
@classmethod
def setUpClass(cls):
"""Instanciate a browser for the whole test session."""
global GLOBAL_BROWSER
if GLOBAL_BROWSER is None:
GLOBAL_BROWSER = Browser(os.environ.get('BROWSER', 'firefox'))
cls.browser = GLOBAL_BROWSER
super(AutocompleteTestCase, cls).setUpClass()
def get(self, url):
"""Open a URL."""
self.browser.visit('%s%s' % (
self.live_server_url,
url
))
if '/admin/login/' in self.browser.url:
# Should be pre-filled by HTML template
# self.browser.fill('username', 'test')
# self.browser.fill('password', 'test')
self.browser.find_by_value('Log in').first.click()
self.wait_script()
def click(self, selector):
"""Click an element by css selector."""
self.browser.find_by_css(selector).first.click()
def enter_text(self, selector, text):
"""Enter text in an element by css selector."""
self.browser.find_by_css(selector).first.value = ''
self.browser.find_by_css(selector).first.type(text)
def assert_not_visible(self, selector):
"""Assert an element is not visible by css selector."""
e = self.browser.find_by_css(selector)
assert not e or e.first.visible is False
def assert_visible(self, selector):
"""Assert an element is visible by css selector."""
e = self.browser.find_by_css(selector).first
assert e.visible is True
class AdminMixin(object):
"""Mixin for tests that should happen in ModelAdmin."""
def get_modeladmin_url(self, action, **kwargs):
"""Return a modeladmin url for a model and action."""
return reverse('admin:%s_%s_%s' % (
self.model._meta.app_label,
self.model._meta.model_name,
action
), kwargs=kwargs)
def fill_name(self):
"""Fill in the name input."""
i = self.id()
half = int(len(i))
not_id = i[half:] + i[:half]
self.browser.fill('name', not_id)
class OptionMixin(object):
"""Mixin to make a unique option per test."""
def create_option(self):
"""Create a unique option from self.model into self.option."""
unique_name = six.text_type(uuid.uuid1())
if VERSION < (1, 10):
# Support for the name to be changed through a popup in the admin.
unique_name = unique_name.replace('-', '')
option, created = self.model.objects.get_or_create(
name=unique_name)
return option
class ContentTypeOptionMixin(OptionMixin):
"""Same as option mixin, with content type."""
def create_option(self):
"""Return option, content type."""
option = super(ContentTypeOptionMixin, self).create_option()
ctype = ContentType.objects.get_for_model(option)
return option, ctype
|
shubhamdipt/django-autocomplete-light
|
src/dal/test/case.py
|
Python
|
mit
| 3,484
|
[
"VisIt"
] |
e639991e7b98ba1b97a9b57447a3901bc7a82343fb849794271ed3f4f8a074c1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.