text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
import warnings
from chempy.units import allclose
from ..water_density_tanaka_2001 import water_density
def test_water_density():
warnings.filterwarnings("error")
assert abs(water_density(273.15 + 0) - 999.8395) < 0.004
assert abs(water_density(273.15 + 4) - 999.9720) < 0.003
assert abs(water_density(273.15 + 10) - 999.7026) < 0.0003
assert abs(water_density(273.15 + 15) - 999.1026) < 0.0001
assert abs(water_density(273.15 + 20) - 998.2071) < 0.0005
assert abs(water_density(273.15 + 22) - 997.7735) < 0.0007
assert abs(water_density(273.15 + 25) - 997.0479) < 0.0009
assert abs(water_density(273.15 + 30) - 995.6502) < 0.0016
assert abs(water_density(273.15 + 40) - 992.2) < 0.02
try:
water_density(1)
except UserWarning:
pass # good warning raised
else:
raise
warnings.resetwarnings()
try:
import quantities as pq
import numpy as np
unit = pq.kg / pq.m ** 3
assert allclose(
water_density(298.15 * pq.K, units=pq),
997.047021671824 * unit,
atol=1e-8 * unit,
)
assert allclose(
water_density(np.linspace(297, 299) * pq.K, units=pq),
997 * unit,
rtol=1e-3,
atol=1e-3 * unit,
)
except ImportError:
pass
| bjodah/aqchem | chempy/properties/tests/test_water_density_tanaka_2001.py | Python | bsd-2-clause | 1,349 | [
"ChemPy"
] | a3fb1bdb0695fdf364f98b709fb17c0a03d10b38de618c0f3d223ea9d6fccbb2 |
"""Collection of useful data transforms."""
# Imports
import numpy as np
from McNeuron import Neuron
import scipy
import scipy.linalg # SciPy Linear Algebra Library
from numpy.linalg import inv
def get_leaves(nodes, parents):
"""
Compute the list of leaf nodes.
Parameters
----------
nodes: list
list of all nodes in the tree
parents: list
list of parents for each node
Returns
-------
leaves: list
sorted list of leaf nodes
"""
leaves = np.sort(list(set(nodes) - set(parents)))
return leaves
def encode_prufer(parents, verbose=0):
"""
Convert the parents sequence to a prufer sequence.
Parameters
----------
parents: list
list of parents for each node
verbose: bool
default is False
Returns
-------
prufer: list
corresponding prufer sequence
"""
n_nodes = len(parents)
nodes = range(n_nodes)
prufer = list()
for n in range(n_nodes - 2):
# Recalculate all the leaves
leaves = get_leaves(nodes, parents)
if verbose:
print 'leaves', leaves
# Add the parent of the lowest numbered leaf to the sequence
leaf_idx = np.where(nodes == leaves[0])[0][0]
prufer.append(parents[leaf_idx])
if verbose:
print 'prufer', prufer
# Remove the lowest numbered leaf and its corresponding parent
del nodes[leaf_idx]
del parents[leaf_idx]
if verbose:
print 'nodes', nodes
print 'parents', parents
print 60*'-'
return prufer
def decode_prufer(prufer, verbose=0):
"""
Convert the prufer sequence to a parents sequence.
Parameters
----------
prufer: list
prufer sequence
verbose: bool
default is False
Returns
-------
parents: list
corresponding list of parents for each node
"""
n_nodes = len(prufer) + 2
n_prufer = len(prufer)
nodes = range(n_nodes)
parents = -1 * np.ones(n_nodes)
for n in range(n_prufer):
if verbose:
print nodes
print prufer
leaves = list(get_leaves(nodes, prufer))
k = leaves[0]
j = prufer[0]
if k == 0:
k = leaves[1]
if verbose:
print k, j
parents[k] = j
leaf_idx = np.where(nodes == k)[0][0]
del nodes[leaf_idx]
del prufer[0]
if verbose:
print 60*'-'
parents[nodes[1]] = nodes[0]
return list(parents.astype(int))
def reordering_prufer(parents, locations):
"""
Reorder a given parents sequence.
Parent labels < children labels.
Parameters
----------
parents: numpy array
sequence of parents indices
starts with -1
locations: numpy array
n - 1 x 3
Returns
-------
parents_reordered: numpy array
sequence of parents indices
locations_reordered: numpy array
n - 1 x 3
"""
length = len(parents)
# Construct the adjacency matrix
adjacency = np.zeros([length, length])
adjacency[parents[1:], range(1, length)] = 1
# Discover the permutation with Schur decomposition
full_adjacency = np.linalg.inv(np.eye(length) - adjacency)
full_adjacency_permuted, permutation_matrix = \
scipy.linalg.schur(full_adjacency)
# Reorder the parents
parents_reordered = \
np.argmax(np.eye(length) - np.linalg.inv(full_adjacency_permuted),
axis=0)
parents_reordered[0] = -1
# Reorder the locations
locations = np.append([[0., 0., 0.]], locations, axis=0)
locations_reordered = np.dot(permutation_matrix, locations)
return parents_reordered, locations_reordered[1:, :]
def swc_to_neuron(matrix):
"""
Return the Neuron object from swc matrix.
Parameters
----------
matrix: numpy array
numpy array of the size n_nodes*7.
Return
------
Neuron: Neuron
a neuron obj with the given swc format.
"""
return Neuron(file_format='Matrix of swc', input_file=matrix)
def downsample_neuron(neuron,
method='random',
number=30):
"""
Downsampling neuron with different methods.
Parameters
----------
neuron: Neuron
given neuron to subsample.
number: int
the number of subsamling.
method: str
the methods to subsample. It can be: 'random', 'regularize','prune',
'strighten', 'strighten-prune'.
Return
------
Neuron: Neuron
a subsampled neuron with given number of nodes.
"""
if(method == 'random'):
return subsample.random_subsample(neuron, number)
def get_data(neuron_database, method, subsampling_numbers):
"""
Preparing data for the learning.
Parameters
----------
neuron_database: list
the elements of the list are Neuron obj.
method: str
the method to subsample.
subsampling_numbers: array of int
The range of number to subsample.
Returns
-------
data: dic
a dic of two classes: 'morphology' and 'geometry'.
'geometry' is a list of size sampling_division. The i-th element of the
list is an array of size (datasize* n_nodes - 1*3).
'morphology' is a list of size sampling_division. The i-th element of
the list is an array of size (datasize* n_nodes* n_nodes -2).
"""
l = len(neuron_database)
morph = np.zeros([l, subsampling_numbers - 2])
geo = np.zeros([l, subsampling_numbers - 1, 3])
data = dict()
for i in range(l):
sub_neuron = downsample_neuron(neuron=neuron_database[i],
method=method,
number=subsampling_numbers)
par = sub_neuron.parent_index
par[0] = -1
morph[i, :] = encode_prufer(par.tolist())
geo[i, :, :] = sub_neuron.location[:, 1:].T
data['morphology'] = dict()
data['morphology']['n'+str(subsampling_numbers)] = morph
data['geometry'] = dict()
data['geometry']['n'+str(subsampling_numbers)] = geo
return data
def make_swc_from_prufer_and_locations(data):
# the prufer code and the location are given.
parents_code = np.array(decode_prufer(list(data['morphology'])))
location = data['geometry']
M = np.zeros([len(parents_code), 7])
M[:, 0] = np.arange(1, len(parents_code)+1)
M[0, 1] = 1
M[1:, 1] = 2
M[1:, 2:5] = location
parents_code[1:] = parents_code[1:] + 1
M[:, 6] = parents_code
return Neuron(file_format='Matrix of swc', input_file=M)
| RoozbehFarhoodi/McNeuron | data_transforms.py | Python | mit | 6,699 | [
"NEURON"
] | 17f363a2fff60b9350218f94554bf030c198f6716c95d9b49b23a9b0704c61a0 |
#
# Copyright (c) 2017 Brian J. Kidney
# Copyright (c) 2017 Jonathan Anderson
# All rights reserved.
#
# This software was developed by BAE Systems, the University of Cambridge
# Computer Laboratory, and Memorial University under DARPA/AFRL contract
# FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
# (TC) research program.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def is_simple_node(graph, node):
"""A node is "Simple" if none of the following is true
- it has multiple inputs (it joins chains together)
- it has no inputs (it's a root node)
- it has multiple outputs (it splits chains apart)
- it has no outputs (it's a leaf node)
Keyword arguments:
node -- A networkx DiGraph Node
"""
return graph.in_degree(node) == 1 and graph.out_degree(node) == 1
def simplified(graph):
"""Simplify a CallGraph by coalescing call chains and dropping
any unreferenced calls.
Keyword arguments:
graph -- A networkx DiGraph
"""
g = graph.full_copy()
for n in graph:
if is_simple_node(graph, n):
(pre,) = g.predecessors(n)
(suc,) = g.successors(n)
g.add_edge(pre, suc)
g.remove_node(n)
return g
| musec/py-cdg | cdg/simplify.py | Python | apache-2.0 | 1,742 | [
"Brian"
] | 1f6971467bf2a6c9ece0111647bbe195a2a1b0ad34bbe646bcf4cbdc71c80d72 |
import json
from coalib.bearlib.abstractions.Linter import linter
from coalib.bears.requirements.NpmRequirement import NpmRequirement
def bool_or_str(value):
try:
return bool(value)
except:
return str(value)
def bool_or_int(value):
try:
return bool(value)
except:
return int(value)
@linter(executable='jshint',
output_format='regex',
output_regex=r'.+?: line (?P<line>\d+), col (?P<column>\d+), '
r'(?P<message>.+) \((?P<severity>[EWI])\d+\)')
class JSHintBear:
"""
Detect errors and potential problems in JavaScript code and to enforce
appropriate coding conventions. For example, problems like syntax errors,
bugs due to implicit type conversion, leaking variables and much more
can be detected.
For more information on the analysis visit <http://jshint.com/>
"""
LANGUAGES = {"JavaScript"}
REQUIREMENTS = {NpmRequirement('jshint', '2')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Formatting', 'Syntax', 'Complexity', 'Unused Code'}
@staticmethod
def generate_config(filename, file,
allow_bitwise_operators: bool=False,
allow_prototype_overwrite: bool=False,
force_braces: bool=True,
allow_type_coercion: bool=False,
allow_future_identifiers: bool=True,
allow_typeof: bool=True,
allow_filter_in_forin: bool=True,
allow_funcscope: bool=False,
allow_iterator_property: bool=False,
allow_argument_caller_and_callee: bool=False,
allow_comma_operator: bool=True,
allow_non_breaking_whitespace: bool=False,
allow_constructor_functions: bool=True,
allow_grouping_operator: bool=True,
allow_var_statement: bool=True,
allow_missing_semicolon: bool=False,
allow_debugger: bool=False,
allow_assignment_comparisions: bool=False,
allow_eval: bool=False,
allow_global_strict: bool=False,
allow_increment: bool=False,
allow_proto: bool=False,
allow_scripturls: bool=False,
allow_singleton: bool=False,
allow_this_statements: bool=False,
allow_with_statements: bool=False,
use_mozilla_extension: bool=False,
allow_noyield: bool=False,
allow_eqnull: bool=False,
allow_last_semicolon: bool=False,
allow_func_in_loop: bool=False,
allow_expr_in_assignments: bool=False,
use_es6_syntax: bool=False,
use_es3_array: bool=False,
environment_mootools: bool=False,
environment_couch: bool=False,
environment_jasmine: bool=False,
environment_jquery: bool=False,
environment_node: bool=False,
environment_qunit: bool=False,
environment_rhino: bool=False,
environment_shelljs: bool=False,
environment_prototypejs: bool=False,
environment_yui: bool=False,
environment_mocha: bool=True,
environment_module: bool=False,
environment_wsh: bool=False,
environment_worker: bool=False,
environment_nonstandard: bool=False,
environment_browser: bool=True,
environment_browserify: bool=False,
environment_devel: bool=True,
environment_dojo: bool=False,
environment_typed: bool=False,
environment_phantom: bool=False,
max_statements: bool_or_int=False,
max_depth: bool_or_int=False,
max_parameters: bool_or_int=False,
cyclomatic_complexity: bool_or_int=False,
allow_variable_shadowing: bool_or_str=False,
allow_unused_variables: bool_or_str=False,
allow_latedef: bool_or_str=False,
es_version: int=5,
jshint_config: str=""):
"""
:param allow_bitwise_operators:
Allows the use of bitwise operators.
:param allow_prototype_overwrite:
This options allows overwriting prototypes of native objects such
as ``Array``.
:param force_braces:
This option requires you to always put curly braces around blocks
in loops and conditionals.
:param allow_type_coercion:
This options allows the use of ``==`` and ``!=``.
:param allow_future_identifiers:
This option allows the use of identifiers which are defined in
future versions of JavaScript.
:param allow_typeof:
This option enables warnings about invalid ``typeof`` operator
values.
:param allow_filter_in_forin:
This option requires all ``for in`` loops to filter object's items.
:param allow_iterator_property:
This option suppresses warnings about the ``__iterator__``
property.
:param allow_funcscope:
This option suppresses warnings about declaring variables inside of
control structures while accessing them later from outside.
:param allow_argument_caller_and_callee:
This option allows the use of ``arguments.caller`` and
``arguments.callee``.
:param allow_comma_operator:
This option allows the use of the comma operator.
:param allow_non_breaking_whitespace:
Allows "non-breaking whitespace characters".
:param allow_constructor_functions:
Allows the use of constructor functions.
:param allow_grouping_operator:
This option allows the use of the grouping operator when it is
not strictly required.
:param allow_var_statement:
Allows the use of the ``var`` statement while declaring a variable.
Should use ``let`` or ``const`` while it is set to ``False``.
:param allow_missing_semicolon:
This option suppresses warnings about missing semicolons.
:param allow_debugger:
This option suppresses warnings about the ``debugger`` statements.
:param allow_assignment_comparisions:
This option suppresses warnings about the use of assignments in
cases where comparisons are expected.
:param allow_eval:
This options suppresses warnings about the use of ``eval``
function.
:param allow_global_strict:
This option suppresses warnings about the use of global strict
mode.
:param allow_increment:
This option suppresses warnings about the use of unary increment
and decrement operators.
:param allow_proto:
This option suppresses warnings about the ``__proto__`` property.
:param allow_scripturls:
This option suppresses warnings about the use of script-targeted
URLs.
:param allow_singleton:
This option suppresses warnings about constructions like
``new function () { ... }`` and ``new Object;`` sometimes used to
produce singletons.
:param allow_this_statements:
This option suppresses warnings about possible strict violations
when the code is running in strict mode and ``this`` is used in a
non-constructor function.
:param allow_with_statements:
This option suppresses warnings about the use of the ``with``
statement.
:param use_mozilla_extension:
This options tells JSHint that your code uses Mozilla JavaScript
extensions.
:param allow_noyield:
This option suppresses warnings about generator functions with no
``yield`` statement in them.
:param allow_eqnull:
This option suppresses warnings about ``== null`` comparisons.
:param allow_last_semicolon:
This option suppresses warnings about missing semicolons for the
last statement.
:param allow_func_in_loop:
This option suppresses warnings about functions inside of loops.
:param allow_expr_in_assignments:
This option suppresses warnings about the use of expressions where
normally assignments or function calls are expected.
:param use_es3_array:
This option tells JSHintBear ES3 array elision elements, or empty
elements are used.
:param use_es3_array:
This option tells JSHint ECMAScript 6 specific syntax is used.
:param environment_mootools:
This option defines globals exposed by the Mootools.
:param environment_couch:
This option defines globals exposed by CouchDB.
:param environment_jasmine:
This option defines globals exposed by Jasmine.
:param environment_jquery:
This option defines globals exposed by Jquery.
:param environment_node:
This option defines globals exposed by Node.
:param environment_qunit:
This option defines globals exposed by Qunit.
:param environment_rhino:
This option defines globals exposed when the code is running inside
rhino runtime environment.
:param environment_shelljs:
This option defines globals exposed by the ShellJS.
:param environment_prototypejs:
This option defines globals exposed by the Prototype.
:param environment_yui:
This option defines globals exposed by the YUI JavaScript
Framework.
:param environment_mocha:
This option defines globals exposed by the "BDD" and "TDD" UIs of
the Mocha unit testing framework.
:param environment_module:
This option informs JSHintBear that the input code describes an
ECMAScript 6 module.
:param environment_wsh:
This option defines globals available when the code is running as a
script for the Windows Script Host.
:param environment_worker:
This option defines globals available when the code is running
inside of a Web Worker.
:param environment_nonstandard:
This option defines non- standard but widely adopted globals such
as ``escape`` and ``unescape``.
:param environment_browser:
This option defines globals exposed by modern browsers.
:param environment_browserify:
This option defines globals available when using the Browserify.
:param environment_devel:
This option defines globals that are usually used for debugging:
``console``, ``alert``, etc.
:param environment_dojo:
This option defines globals exposed by the Dojo Toolkit.
:param environment_typed:
This option defines globals for typed array constructors.
:param environment_phantom:
This option defines globals available when your core is running
inside of the PhantomJS runtime environment.
:param max_statements:
Maximum number of statements allowed per function.
:param max_depth:
This option lets you control how nested do you want your blocks to
be.
:param max_parameters:
Maximum number of parameters allowed per function.
:param cyclomatic_complexity:
Maximum cyclomatic complexity in the code.
:param allow_variable_shadowing:
This option suppresses warnings about variable shadowing i.e.
declaring a variable that had been already declared somewhere in
the outer scope.
- "inner" - check for variables defined in the same scope only
- "outer" - check for variables defined in outer scopes as well
- False - same as inner
- True - allow variable shadowing
:param allow_unused_variables:
Allows when variables are defined but never used. This can be set
to ""vars"" to only check for variables, not function parameters,
or ""strict"" to check all variables and parameters.
:param allow_latedef:
This option allows the use of a variable before it was defined.
Setting this option to "nofunc" will allow function declarations to
be ignored.
:param es_version:
This option is used to specify the ECMAScript version to which the
code must adhere to.
"""
if not jshint_config:
options = {"bitwise": allow_bitwise_operators,
"freeze": not allow_prototype_overwrite,
"curly": force_braces,
"eqeqeq": not allow_type_coercion,
"futurehostile": not allow_future_identifiers,
"notypeof": not allow_typeof,
"forin": allow_filter_in_forin,
"funcscope": allow_funcscope,
"iterator": allow_iterator_property,
"noarg": not allow_argument_caller_and_callee,
"nocomma": not allow_comma_operator,
"nonbsp": not allow_non_breaking_whitespace,
"nonew": not allow_constructor_functions,
"undef": True,
"singleGroups": not allow_grouping_operator,
"varstmt": not allow_var_statement,
"asi": allow_missing_semicolon,
"debug": allow_debugger,
"boss": allow_assignment_comparisions,
"evil": allow_eval,
"globalstrict": allow_global_strict,
"plusplus": allow_increment,
"proto": allow_proto,
"scripturl": allow_scripturls,
"supernew": allow_singleton,
"validthis": allow_this_statements,
"withstmt": allow_with_statements,
"moz": use_mozilla_extension,
"noyield": allow_noyield,
"eqnull": allow_eqnull,
"lastsemic": allow_last_semicolon,
"loopfunc": allow_func_in_loop,
"expr": allow_expr_in_assignments,
"esnext": use_es6_syntax,
"elision": use_es3_array,
"mootools": environment_mootools,
"couch": environment_couch,
"jasmine": environment_jasmine,
"jquery": environment_jquery,
"node": environment_node,
"qunit": environment_qunit,
"rhino": environment_rhino,
"shelljs": environment_shelljs,
"prototypejs": environment_prototypejs,
"yui": environment_yui,
"mocha": environment_mocha,
"module": environment_module,
"wsh": environment_wsh,
"worker": environment_worker,
"nonstandard": environment_nonstandard,
"browser": environment_browser,
"browserify": environment_browserify,
"devel": environment_devel,
"dojo": environment_dojo,
"typed": environment_typed,
"phantom": environment_phantom,
"maxerr": 99999,
"maxcomplexity": cyclomatic_complexity,
"maxdepth": max_depth,
"maxparams": max_parameters,
"maxstatements": max_statements,
"shadow": allow_variable_shadowing,
"unused": not allow_unused_variables,
"latedef": allow_latedef,
"esversion": es_version}
return json.dumps(options)
else:
return None
@staticmethod
def create_arguments(filename, file, config_file, jshint_config: str=""):
"""
:param jshint_config:
The location of the jshintrc config file. If this option is present
all the above options are not used. Instead the .jshintrc file is
used as the configuration file.
"""
args = ('--verbose', filename, '--config')
if jshint_config:
args += (jshint_config,)
else:
args += (config_file,)
return args
| mr-karan/coala-bears | bears/js/JSHintBear.py | Python | agpl-3.0 | 17,767 | [
"VisIt"
] | b5c0690dddb5e5039231c282325e971a9c38cc20575cbbc87c09434a930aa8d7 |
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import time
import mock
from mock import ANY
from nose.tools import eq_
from datetime import datetime, timedelta
from webtest import TestApp
import bodhi.tests.functional.base
from bodhi import main
from bodhi.config import config
from bodhi.models import (
Build,
DBSession,
Group,
Package,
Update,
User,
UpdateStatus,
UpdateRequest,
Release,
)
YEAR = time.localtime().tm_year
mock_valid_requirements = {
'target': 'bodhi.validators._get_valid_requirements',
'return_value': ['rpmlint', 'upgradepath'],
}
mock_taskotron_results = {
'target': 'bodhi.util.taskotron_results',
'return_value': [{
"outcome": "PASSED",
"result_data": {},
"testcase": { "name": "rpmlint", }
}],
}
mock_failed_taskotron_results = {
'target': 'bodhi.util.taskotron_results',
'return_value': [{
"outcome": "FAILED",
"result_data": {},
"testcase": { "name": "rpmlint", }
}],
}
mock_absent_taskotron_results = {
'target': 'bodhi.util.taskotron_results',
'return_value': [],
}
class TestUpdatesService(bodhi.tests.functional.base.BaseWSGICase):
def test_home_html(self):
resp = self.app.get('/', headers={'Accept': 'text/html'})
self.assertIn('Fedora Updates System', resp)
self.assertIn('©', resp)
@mock.patch(**mock_valid_requirements)
def test_invalid_build_name(self, *args):
res = self.app.post_json('/updates/', self.get_update(u'bodhi-2.0-1.fc17,invalidbuild-1.0'),
status=400)
assert 'Build not in name-version-release format' in res, res
@mock.patch(**mock_valid_requirements)
def test_empty_build_name(self, *args):
res = self.app.post_json('/updates/', self.get_update([u'']), status=400)
self.assertEquals(res.json_body['errors'][0]['name'], 'builds.0')
self.assertEquals(res.json_body['errors'][0]['description'], 'Required')
@mock.patch(**mock_valid_requirements)
def test_fail_on_edit_with_empty_build_list(self, *args):
update = self.get_update()
update['edited'] = update['builds'] # the update title..
update['builds'] = []
res = self.app.post_json('/updates/', update, status=400)
self.assertEquals(len(res.json_body['errors']), 2)
self.assertEquals(res.json_body['errors'][0]['name'], 'builds')
self.assertEquals(
res.json_body['errors'][0]['description'],
'You may not specify an empty list of builds.')
self.assertEquals(res.json_body['errors'][1]['name'], 'builds')
self.assertEquals(
res.json_body['errors'][1]['description'],
'ACL validation mechanism was unable to determine ACLs.')
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_unicode_description(self, publish, *args):
update = self.get_update('bodhi-2.0.0-2.fc17')
update['notes'] = u'This is wünderfül'
r = self.app.post_json('/updates/', update)
up = r.json_body
self.assertEquals(up['title'], u'bodhi-2.0.0-2.fc17')
self.assertEquals(up['notes'], u'This is wünderfül')
self.assertIsNotNone(up['date_submitted'])
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
# FIXME: make it easy to tweak the tag of an update in our buildsys during unit tests
#def test_invalid_tag(self):
# session = DBSession()
# map(session.delete, session.query(Update).all())
# map(session.delete, session.query(Build).all())
# num = session.query(Update).count()
# assert num == 0, num
# res = self.app.post_json('/updates/', self.get_update(u'bodhi-1.0-1.fc17'),
# status=400)
# assert 'Invalid tag' in res, res
@mock.patch(**mock_valid_requirements)
def test_duplicate_build(self, *args):
res = self.app.post_json('/updates/',
self.get_update([u'bodhi-2.0-2.fc17', u'bodhi-2.0-2.fc17']),
status=400)
assert 'Duplicate builds' in res, res
@mock.patch(**mock_valid_requirements)
def test_multiple_builds_of_same_package(self, *args):
res = self.app.post_json('/updates/', self.get_update([u'bodhi-2.0-2.fc17',
u'bodhi-2.0-3.fc17']),
status=400)
assert 'Multiple bodhi builds specified' in res, res
@mock.patch(**mock_valid_requirements)
def test_invalid_autokarma(self, *args):
res = self.app.post_json('/updates/', self.get_update(stable_karma=-1),
status=400)
assert '-1 is less than minimum value 1' in res, res
res = self.app.post_json('/updates/', self.get_update(unstable_karma=1),
status=400)
assert '1 is greater than maximum value -1' in res, res
@mock.patch(**mock_valid_requirements)
def test_duplicate_update(self, *args):
res = self.app.post_json('/updates/', self.get_update(u'bodhi-2.0-1.fc17'),
status=400)
assert 'Update for bodhi-2.0-1.fc17 already exists' in res, res
@mock.patch(**mock_valid_requirements)
def test_invalid_requirements(self, *args):
update = self.get_update()
update['requirements'] = 'rpmlint silly-dilly'
res = self.app.post_json('/updates/', update, status=400)
assert 'Invalid requirement' in res, res
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_no_privs(self, publish, *args):
session = DBSession()
user = User(name=u'bodhi')
session.add(user)
session.flush()
app = TestApp(main({}, testing=u'bodhi', **self.app_settings))
res = app.post_json('/updates/', self.get_update(u'bodhi-2.1-1.fc17'),
status=400)
assert 'bodhi does not have commit access to bodhi' in res, res
self.assertEquals(publish.call_args_list, [])
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_provenpackager_privs(self, publish, *args):
"Ensure provenpackagers can push updates for any package"
session = DBSession()
user = User(name=u'bodhi')
session.add(user)
session.flush()
group = session.query(Group).filter_by(name=u'provenpackager').one()
user.groups.append(group)
app = TestApp(main({}, testing=u'bodhi', **self.app_settings))
update = self.get_update(u'bodhi-2.1-1.fc17')
update['csrf_token'] = app.get('/csrf').json_body['csrf_token']
res = app.post_json('/updates/', update)
assert 'bodhi does not have commit access to bodhi' not in res, res
build = session.query(Build).filter_by(nvr=u'bodhi-2.1-1.fc17').one()
assert build.update is not None
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_provenpackager_edit_anything(self, publish, *args):
"Ensure provenpackagers can edit updates for any package"
nvr = u'bodhi-2.1-1.fc17'
session = DBSession()
user = User(name=u'lloyd')
session.add(user)
session.add(User(name=u'ralph')) # Add a non proventester
session.flush()
group = session.query(Group).filter_by(name=u'provenpackager').one()
user.groups.append(group)
app = TestApp(main({}, testing=u'ralph', **self.app_settings))
up_data = self.get_update(nvr)
up_data['csrf_token'] = app.get('/csrf').json_body['csrf_token']
res = app.post_json('/updates/', up_data)
assert 'does not have commit access to bodhi' not in res, res
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
app = TestApp(main({}, testing=u'lloyd', **self.app_settings))
update = self.get_update(nvr)
update['csrf_token'] = app.get('/csrf').json_body['csrf_token']
update['notes'] = u'testing!!!'
update['edited'] = nvr
res = app.post_json('/updates/', update)
assert 'bodhi does not have commit access to bodhi' not in res, res
build = session.query(Build).filter_by(nvr=nvr).one()
assert build.update is not None
self.assertEquals(build.update.notes, u'testing!!!')
#publish.assert_called_once_with(
# topic='update.request.testing', msg=mock.ANY)
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_provenpackager_request_privs(self, publish, *args):
"Ensure provenpackagers can change the request for any update"
nvr = u'bodhi-2.1-1.fc17'
session = DBSession()
user = User(name=u'bob')
session.add(user)
session.add(User(name=u'ralph')) # Add a non proventester
session.flush()
group = session.query(Group).filter_by(name=u'provenpackager').one()
user.groups.append(group)
app = TestApp(main({}, testing=u'ralph', **self.app_settings))
up_data = self.get_update(nvr)
up_data['csrf_token'] = app.get('/csrf').json_body['csrf_token']
res = app.post_json('/updates/', up_data)
assert 'does not have commit access to bodhi' not in res, res
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
build = session.query(Build).filter_by(nvr=nvr).one()
eq_(build.update.request, UpdateRequest.testing)
# Try and submit the update to stable as a non-provenpackager
app = TestApp(main({}, testing=u'ralph', **self.app_settings))
post_data = dict(update=nvr, request='stable',
csrf_token=app.get('/csrf').json_body['csrf_token'])
res = app.post_json('/updates/%s/request' % nvr, post_data, status=400)
# Ensure we can't push it until it meets the requirements
eq_(res.json_body['status'], 'error')
eq_(res.json_body['errors'][0]['description'], config.get('not_yet_tested_msg'))
update = session.query(Update).filter_by(title=nvr).one()
eq_(update.stable_karma, 3)
eq_(update.locked, False)
eq_(update.request, UpdateRequest.testing)
# Pretend it was pushed to testing
update.request = None
update.status = UpdateStatus.testing
update.pushed = True
session.flush()
eq_(update.karma, 0)
update.comment(u"foo", 1, u'foo')
update = session.query(Update).filter_by(title=nvr).one()
eq_(update.karma, 1)
eq_(update.request, None)
update.comment(u"foo", 1, u'bar')
update = session.query(Update).filter_by(title=nvr).one()
eq_(update.karma, 2)
eq_(update.request, None)
update.comment(u"foo", 1, u'biz')
update = session.query(Update).filter_by(title=nvr).one()
eq_(update.karma, 3)
eq_(update.request, UpdateRequest.stable)
# Set it back to testing
update.request = UpdateRequest.testing
# Try and submit the update to stable as a proventester
app = TestApp(main({}, testing=u'bob', **self.app_settings))
res = app.post_json('/updates/%s/request' % nvr,
dict(update=nvr, request='stable',
csrf_token=app.get('/csrf').json_body['csrf_token']),
status=200)
eq_(res.json_body['update']['request'], 'stable')
app = TestApp(main({}, testing=u'bob', **self.app_settings))
res = app.post_json('/updates/%s/request' % nvr,
dict(update=nvr, request='obsolete',
csrf_token=app.get('/csrf').json_body['csrf_token']),
status=200)
eq_(res.json_body['update']['request'], None)
eq_(update.request, None)
eq_(update.status, UpdateStatus.obsolete)
@mock.patch(**mock_valid_requirements)
def test_pkgdb_outage(self, *args):
"Test the case where our call to the pkgdb throws an exception"
settings = self.app_settings.copy()
settings['acl_system'] = 'pkgdb'
settings['pkgdb_url'] = 'invalidurl'
app = TestApp(main({}, testing=u'guest', **settings))
update = self.get_update(u'bodhi-2.0-2.fc17')
update['csrf_token'] = app.get('/csrf').json_body['csrf_token']
res = app.post_json('/updates/', update, status=400)
assert "Unable to access the Package Database" in res, res
@mock.patch(**mock_valid_requirements)
def test_invalid_acl_system(self, *args):
settings = self.app_settings.copy()
settings['acl_system'] = 'null'
app = TestApp(main({}, testing=u'guest', **settings))
res = app.post_json('/updates/', self.get_update(u'bodhi-2.0-2.fc17'),
status=400)
assert "guest does not have commit access to bodhi" in res, res
def test_404(self):
self.app.get('/a', status=404)
def test_get_single_update(self):
res = self.app.get('/updates/bodhi-2.0-1.fc17')
self.assertEquals(res.json_body['update']['title'], 'bodhi-2.0-1.fc17')
self.assertIn('application/json', res.headers['Content-Type'])
def test_get_single_update_jsonp(self):
res = self.app.get('/updates/bodhi-2.0-1.fc17',
{'callback': 'callback'},
headers={'Accept': 'application/javascript'})
self.assertIn('application/javascript', res.headers['Content-Type'])
self.assertIn('callback', res)
self.assertIn('bodhi-2.0-1.fc17', res)
def test_get_single_update_rss(self):
self.app.get('/updates/bodhi-2.0-1.fc17',
headers={'Accept': 'application/atom+xml'},
status=406)
def test_get_single_update_html(self):
id = 'bodhi-2.0-1.fc17'
resp = self.app.get('/updates/%s' % id,
headers={'Accept': 'text/html'})
self.assertIn('text/html', resp.headers['Content-Type'])
self.assertIn(id, resp)
self.assertIn('©', resp)
def test_list_updates(self):
res = self.app.get('/updates/')
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['submitter'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_jsonp(self):
res = self.app.get('/updates/',
{'callback': 'callback'},
headers={'Accept': 'application/javascript'})
self.assertIn('application/javascript', res.headers['Content-Type'])
self.assertIn('callback', res)
self.assertIn('bodhi-2.0-1.fc17', res)
def test_list_updates_rss(self):
res = self.app.get('/updates/',
headers={'Accept': 'application/atom+xml'})
self.assertIn('application/rss+xml', res.headers['Content-Type'])
self.assertIn('bodhi-2.0-1.fc17', res)
def test_list_updates_html(self):
res = self.app.get('/updates/',
headers={'Accept': 'text/html'})
self.assertIn('text/html', res.headers['Content-Type'])
self.assertIn('bodhi-2.0-1.fc17', res)
self.assertIn('©', res)
def test_search_updates(self):
res = self.app.get('/updates/', {'like': 'odh'})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
res = self.app.get('/updates/', {'like': 'wat'})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
def test_list_updates_pagination(self):
# First, stuff a second update in there
self.test_new_update()
# Then, test pagination
res = self.app.get('/updates/',
{"rows_per_page": 1})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
update1 = body['updates'][0]
res = self.app.get('/updates/',
{"rows_per_page": 1, "page": 2})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
update2 = body['updates'][0]
self.assertNotEquals(update1, update2)
def test_list_updates_by_approved_since(self):
now = datetime.utcnow()
# Try with no approved updates first
res = self.app.get('/updates/',
{"approved_since": now.strftime("%Y-%m-%d")})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
# Now approve one
session = DBSession()
session.query(Update).first().date_approved = now
session.flush()
# And try again
res = self.app.get('/updates/',
{"approved_since": now.strftime("%Y-%m-%d")})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_approved'], now.strftime("%Y-%m-%d %H:%M:%S"))
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
self.assertEquals(len(up['bugs']), 1)
self.assertEquals(up['bugs'][0]['bug_id'], 12345)
# https://github.com/fedora-infra/bodhi/issues/270
self.assertEquals(len(up['test_cases']), 1)
self.assertEquals(up['test_cases'][0]['name'], u'Wat')
def test_list_updates_by_invalid_approved_since(self):
res = self.app.get('/updates/', {"approved_since": "forever"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'approved_since')
self.assertEquals(res.json_body['errors'][0]['description'],
'Invalid date')
def test_list_updates_by_bugs(self):
res = self.app.get('/updates/', {"bugs": '12345'})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
self.assertEquals(len(up['bugs']), 1)
self.assertEquals(up['bugs'][0]['bug_id'], 12345)
def test_list_updates_by_invalid_bug(self):
res = self.app.get('/updates/', {"bugs": "cockroaches"}, status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'bugs')
self.assertEquals(res.json_body['errors'][0]['description'],
"Invalid bug ID specified: [u'cockroaches']")
def test_list_updates_by_unexisting_bug(self):
res = self.app.get('/updates/', {"bugs": "19850110"})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
def test_list_updates_by_critpath(self):
res = self.app.get('/updates/', {"critpath": "false"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_invalid_critpath(self):
res = self.app.get('/updates/', {"critpath": "lalala"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'critpath')
self.assertEquals(res.json_body['errors'][0]['description'],
'"lalala" is neither in (\'false\', \'0\') nor in (\'true\', \'1\')')
def test_list_updates_by_cves(self):
res = self.app.get("/updates/", {"cves": "CVE-1985-0110"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
self.assertEquals(up['cves'][0]['cve_id'], "CVE-1985-0110")
def test_list_updates_by_unexisting_cve(self):
res = self.app.get('/updates/', {"cves": "CVE-2013-1015"})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
def test_list_updates_by_invalid_cve(self):
res = self.app.get('/updates/', {"cves": "WTF-ZOMG-BBQ"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'cves.0')
self.assertEquals(res.json_body['errors'][0]['description'],
'"WTF-ZOMG-BBQ" is not a valid CVE id')
def test_list_updates_by_date_submitted_invalid_date(self):
"""test filtering by submitted date with an invalid date"""
res = self.app.get('/updates/', {"submitted_since": "11-01-1984"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(body['errors'][0]['name'], 'submitted_since')
self.assertEquals(body['errors'][0]['description'],
'Invalid date')
def test_list_updates_by_date_submitted_future_date(self):
"""test filtering by submitted date with future date"""
tomorrow = datetime.utcnow() + timedelta(days=1)
tomorrow = tomorrow.strftime("%Y-%m-%d")
res = self.app.get('/updates/', {"submitted_since": tomorrow})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
def test_list_updates_by_date_submitted_valid(self):
"""test filtering by submitted date with valid data"""
res = self.app.get('/updates/', {"submitted_since": "1984-11-01"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_locked(self):
res = self.app.get('/updates/', {"locked": "false"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_invalid_locked(self):
res = self.app.get('/updates/', {"locked": "maybe"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'locked')
self.assertEquals(res.json_body['errors'][0]['description'],
'"maybe" is neither in (\'false\', \'0\') nor in (\'true\', \'1\')')
def test_list_updates_by_modified_since(self):
now = datetime.utcnow()
# Try with no modified updates first
res = self.app.get('/updates/',
{"modified_since": now.strftime("%Y-%m-%d")})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
# Now approve one
session = DBSession()
session.query(Update).first().date_modified = now
session.flush()
# And try again
res = self.app.get('/updates/',
{"modified_since": now.strftime("%Y-%m-%d")})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], now.strftime("%Y-%m-%d %H:%M:%S"))
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
self.assertEquals(len(up['bugs']), 1)
self.assertEquals(up['bugs'][0]['bug_id'], 12345)
def test_list_updates_by_invalid_modified_since(self):
res = self.app.get('/updates/', {"modified_since": "the dawn of time"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'modified_since')
self.assertEquals(res.json_body['errors'][0]['description'],
'Invalid date')
def test_list_updates_by_package(self):
res = self.app.get('/updates/', {"packages": "bodhi"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_builds(self):
res = self.app.get('/updates/', {"builds": "bodhi-3.0-1.fc17"})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
res = self.app.get('/updates/', {"builds": "bodhi-2.0-1.fc17"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_unexisting_package(self):
res = self.app.get('/updates/', {"packages": "flash-player"})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
def test_list_updates_by_pushed(self):
res = self.app.get('/updates/', {"pushed": "false"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
self.assertEquals(up['pushed'], False)
def test_list_updates_by_invalid_pushed(self):
res = self.app.get('/updates/', {"pushed": "who knows?"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'pushed')
self.assertEquals(res.json_body['errors'][0]['description'],
'"who knows?" is neither in (\'false\', \'0\') nor in (\'true\', \'1\')')
def test_list_updates_by_pushed_since(self):
now = datetime.utcnow()
# Try with no pushed updates first
res = self.app.get('/updates/',
{"pushed_since": now.strftime("%Y-%m-%d")})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
# Now approve one
session = DBSession()
session.query(Update).first().date_pushed = now
session.flush()
# And try again
res = self.app.get('/updates/',
{"pushed_since": now.strftime("%Y-%m-%d")})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], now.strftime("%Y-%m-%d %H:%M:%S"))
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
self.assertEquals(len(up['bugs']), 1)
self.assertEquals(up['bugs'][0]['bug_id'], 12345)
def test_list_updates_by_invalid_pushed_since(self):
res = self.app.get('/updates/', {"pushed_since": "a while ago"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'pushed_since')
self.assertEquals(res.json_body['errors'][0]['description'],
'Invalid date')
def test_list_updates_by_release_name(self):
res = self.app.get('/updates/', {"releases": "F17"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_release_version(self):
res = self.app.get('/updates/', {"releases": "17"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_unexisting_release(self):
res = self.app.get('/updates/', {"releases": "WinXP"}, status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'releases')
self.assertEquals(res.json_body['errors'][0]['description'],
'Invalid releases specified: WinXP')
def test_list_updates_by_request(self):
res = self.app.get('/updates/', {'request': "testing"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_unexisting_request(self):
res = self.app.get('/updates/', {"request": "impossible"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'request')
self.assertEquals(res.json_body['errors'][0]['description'],
'"impossible" is not one of unpush, testing, revoke,'
' obsolete, stable')
def test_list_updates_by_severity(self):
res = self.app.get('/updates/', {"severity": "unspecified"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_unexisting_severity(self):
res = self.app.get('/updates/', {"severity": "schoolmaster"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'severity')
self.assertEquals(res.json_body['errors'][0]['description'],
'"schoolmaster" is not one of high, urgent, medium, low, unspecified')
def test_list_updates_by_status(self):
res = self.app.get('/updates/', {"status": "pending"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_unexisting_status(self):
res = self.app.get('/updates/', {"status": "single"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'status')
self.assertEquals(res.json_body['errors'][0]['description'],
'"single" is not one of testing, processing, obsolete, stable, unpushed, pending')
def test_list_updates_by_suggest(self):
res = self.app.get('/updates/', {"suggest": "unspecified"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_unexisting_suggest(self):
res = self.app.get('/updates/', {"suggest": "no idea"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'suggest')
self.assertEquals(res.json_body['errors'][0]['description'],
'"no idea" is not one of logout, reboot, unspecified')
def test_list_updates_by_type(self):
res = self.app.get('/updates/', {"type": "bugfix"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_unexisting_type(self):
res = self.app.get('/updates/', {"type": "not_my"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'type')
self.assertEquals(res.json_body['errors'][0]['description'],
'"not_my" is not one of newpackage, bugfix, security, enhancement')
def test_list_updates_by_username(self):
res = self.app.get('/updates/', {"user": "guest"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'Useful details!')
self.assertEquals(up['date_submitted'], u'1984-11-02 00:00:00')
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0001' % YEAR)
self.assertEquals(up['karma'], 1)
def test_list_updates_by_unexisting_username(self):
res = self.app.get('/updates/', {"user": "santa"},
status=400)
body = res.json_body
self.assertEquals(len(body.get('updates', [])), 0)
self.assertEquals(res.json_body['errors'][0]['name'], 'user')
self.assertEquals(res.json_body['errors'][0]['description'],
"Invalid user specified: santa")
def test_put_json_update(self):
self.app.put_json('/updates/', self.get_update(), status=405)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_post_json_update(self, publish, *args):
self.app.post_json('/updates/', self.get_update('bodhi-2.0.0-1.fc17'))
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_new_update(self, publish, *args):
r = self.app.post_json('/updates/', self.get_update('bodhi-2.0.0-2.fc17'))
up = r.json_body
self.assertEquals(up['title'], u'bodhi-2.0.0-2.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'this is a test update')
self.assertIsNotNone(up['date_submitted'])
self.assertEquals(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0002' % YEAR)
self.assertEquals(up['karma'], 0)
self.assertEquals(up['requirements'], 'rpmlint')
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_new_update_with_multiple_bugs(self, publish, *args):
update = self.get_update('bodhi-2.0.0-2.fc17')
update['bugs'] = ['1234', '5678']
r = self.app.post_json('/updates/', update)
up = r.json_body
self.assertEquals(len(up['bugs']), 2)
self.assertEquals(up['bugs'][0]['bug_id'], 1234)
self.assertEquals(up['bugs'][1]['bug_id'], 5678)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_new_update_with_multiple_bugs_as_str(self, publish, *args):
update = self.get_update('bodhi-2.0.0-2.fc17')
update['bugs'] = '1234, 5678'
r = self.app.post_json('/updates/', update)
up = r.json_body
self.assertEquals(len(up['bugs']), 2)
self.assertEquals(up['bugs'][0]['bug_id'], 1234)
self.assertEquals(up['bugs'][1]['bug_id'], 5678)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_new_update_with_invalid_bugs_as_str(self, publish, *args):
update = self.get_update('bodhi-2.0.0-2.fc17')
update['bugs'] = '1234, blargh'
r = self.app.post_json('/updates/', update, status=400)
up = r.json_body
self.assertEquals(up['status'], 'error')
self.assertEquals(up['errors'][0]['description'],
"Invalid bug ID specified: [u'1234', u'blargh']")
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_edit_update(self, publish, *args):
args = self.get_update('bodhi-2.0.0-2.fc17')
r = self.app.post_json('/updates/', args)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
args['edited'] = args['builds']
args['builds'] = 'bodhi-2.0.0-3.fc17'
args['requirements'] = 'upgradepath'
r = self.app.post_json('/updates/', args)
up = r.json_body
self.assertEquals(up['title'], u'bodhi-2.0.0-3.fc17')
self.assertEquals(up['status'], u'pending')
self.assertEquals(up['request'], u'testing')
self.assertEquals(up['user']['name'], u'guest')
self.assertEquals(up['release']['name'], u'F17')
self.assertEquals(up['type'], u'bugfix')
self.assertEquals(up['severity'], u'unspecified')
self.assertEquals(up['suggest'], u'unspecified')
self.assertEquals(up['close_bugs'], True)
self.assertEquals(up['notes'], u'this is a test update')
self.assertIsNotNone(up['date_submitted'])
self.assertIsNotNone(up['date_modified'], None)
self.assertEquals(up['date_approved'], None)
self.assertEquals(up['date_pushed'], None)
self.assertEquals(up['locked'], False)
self.assertEquals(up['alias'], u'FEDORA-%s-0002' % YEAR)
self.assertEquals(up['karma'], 0)
self.assertEquals(up['requirements'], 'upgradepath')
self.assertEquals(up['comments'][-1]['text'],
u'guest edited this update. New build(s): ' +
u'bodhi-2.0.0-3.fc17. Removed build(s): bodhi-2.0.0-2.fc17.')
self.assertEquals(len(up['builds']), 1)
self.assertEquals(up['builds'][0]['nvr'], u'bodhi-2.0.0-3.fc17')
self.assertEquals(DBSession.query(Build).filter_by(nvr=u'bodhi-2.0.0-2.fc17').first(), None)
self.assertEquals(len(publish.call_args_list), 2)
publish.assert_called_with(topic='update.edit', msg=ANY)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_edit_update_with_different_release(self, publish, *args):
"""Test editing an update for one release with builds from another."""
nvr = 'bodhi-2.0.0-2.fc17'
args = self.get_update('bodhi-2.0.0-2.fc17')
r = self.app.post_json('/updates/', args)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
# Add another release and package
Release._tag_cache = None
release = Release(
name=u'F18', long_name=u'Fedora 18',
id_prefix=u'FEDORA', version=u'18',
dist_tag=u'f18', stable_tag=u'f18-updates',
testing_tag=u'f18-updates-testing',
candidate_tag=u'f18-updates-candidate',
pending_testing_tag=u'f18-updates-testing-pending',
pending_stable_tag=u'f18-updates-pending',
override_tag=u'f18-override',
branch=u'f18')
DBSession.add(release)
pkg = Package(name=u'nethack')
DBSession.add(pkg)
args = self.get_update('bodhi-2.0.0-2.fc17,nethack-4.0.0-1.fc18')
args['edited'] = nvr
r = self.app.post_json('/updates/', args, status=400)
up = r.json_body
self.assertEquals(up['status'], 'error')
self.assertEquals(up['errors'][0]['description'],
'Cannot add a F18 build to an F17 update')
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_cascade_package_requirements_to_update(self, publish, *args):
package = DBSession.query(Package).filter_by(name=u'bodhi').one()
package.requirements = u'upgradepath rpmlint'
DBSession.flush()
args = self.get_update(u'bodhi-2.0.0-3.fc17')
# Don't specify any requirements so that they cascade from the package
del args['requirements']
r = self.app.post_json('/updates/', args)
up = r.json_body
self.assertEquals(up['title'], u'bodhi-2.0.0-3.fc17')
self.assertEquals(up['requirements'], 'upgradepath rpmlint')
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_edit_stable_update(self, publish, *args):
"""Make sure we can't edit stable updates"""
self.assertEquals(publish.call_args_list, [])
# First, create a testing update
nvr = 'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
r = self.app.post_json('/updates/', args, status=200)
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
# Then, switch it to stable behind the scenes
up = DBSession.query(Update).filter_by(title=nvr).one()
up.status = UpdateStatus.stable
# Then, try to edit it through the api again
args['edited'] = args['builds']
args['builds'] = 'bodhi-2.0.0-3.fc17'
r = self.app.post_json('/updates/', args, status=400)
up = r.json_body
self.assertEquals(up['status'], 'error')
self.assertEquals(up['errors'][0]['description'], "Cannot edit stable updates")
self.assertEquals(len(publish.call_args_list), 1)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_edit_locked_update(self, publish, *args):
"""Make sure some changes are prevented"""
nvr = 'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
r = self.app.post_json('/updates/', args, status=200)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
up = DBSession.query(Update).filter_by(title=nvr).one()
up.locked = True
up.status = UpdateRequest.testing
up.request = None
up_id = up.id
build = DBSession.query(Build).filter_by(nvr=nvr).one()
# Changing the notes should work
args['edited'] = args['builds']
args['notes'] = 'Some new notes'
up = self.app.post_json('/updates/', args, status=200).json_body
self.assertEquals(up['notes'], 'Some new notes')
# Changing the builds should fail
args['notes'] = 'And yet some other notes'
args['builds'] = 'bodhi-2.0.0-3.fc17'
r = self.app.post_json('/updates/', args, status=400).json_body
self.assertEquals(r['status'], 'error')
self.assertIn('errors', r)
self.assertIn({u'description': u"Can't add builds to a locked update",
u'location': u'body', u'name': u'builds'},
r['errors'])
up = DBSession.query(Update).get(up_id)
self.assertEquals(up.notes, 'Some new notes')
self.assertEquals(up.builds, [build])
# Changing the request should fail
args['notes'] = 'Still new notes'
args['builds'] = args['edited']
args['request'] = 'stable'
r = self.app.post_json('/updates/', args, status=400).json_body
self.assertEquals(r['status'], 'error')
self.assertIn('errors', r)
self.assertIn({u'description': u"Can't change the request on a "
"locked update",
u'location': u'body', u'name': u'builds'},
r['errors'])
up = DBSession.query(Update).get(up_id)
self.assertEquals(up.notes, 'Some new notes')
self.assertEquals(up.builds, [build])
self.assertEquals(up.request, None)
# At the end of the day, two fedmsg messages should have gone out.
self.assertEquals(len(publish.call_args_list), 2)
publish.assert_called_with(topic='update.edit', msg=ANY)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_push_untested_critpath_to_release(self, publish, *args):
"""
Ensure that we cannot push an untested critpath update directly to
stable.
"""
args = self.get_update('kernel-3.11.5-300.fc17')
args['request'] = 'stable'
up = self.app.post_json('/updates/', args).json_body
self.assertTrue(up['critpath'])
self.assertEquals(up['request'], 'testing')
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_obsoletion(self, publish, *args):
nvr = 'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
self.app.post_json('/updates/', args)
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
publish.call_args_list = []
up = DBSession.query(Update).filter_by(title=nvr).one()
up.status = UpdateStatus.testing
up.request = None
args = self.get_update('bodhi-2.0.0-3.fc17')
r = self.app.post_json('/updates/', args).json_body
self.assertEquals(r['request'], 'testing')
self.assertEquals(r['comments'][-1]['text'],
u'This update has obsoleted bodhi-2.0.0-2.fc17, '
'and has inherited its bugs and notes.')
publish.assert_called_with(
topic='update.request.testing', msg=mock.ANY)
up = DBSession.query(Update).filter_by(title=nvr).one()
self.assertEquals(up.status, UpdateStatus.obsolete)
self.assertEquals(up.comments[-1].text,
u'This update has been obsoleted by bodhi-2.0.0-3.fc17')
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_obsoletion_with_open_request(self, publish, *args):
nvr = 'bodhi-2.0.0-2.fc17'
args = self.get_update(nvr)
self.app.post_json('/updates/', args)
args = self.get_update('bodhi-2.0.0-3.fc17')
r = self.app.post_json('/updates/', args).json_body
self.assertEquals(r['request'], 'testing')
up = DBSession.query(Update).filter_by(title=nvr).one()
self.assertEquals(up.status, UpdateStatus.pending)
self.assertEquals(up.request, UpdateRequest.testing)
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
def test_invalid_request(self, *args):
"""Test submitting an invalid request"""
args = self.get_update()
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'foo','csrf_token': self.get_csrf_token()}, status=400)
resp = resp.json_body
eq_(resp['status'], 'error')
eq_(resp['errors'][0]['description'], u'"foo" is not one of unpush, testing, revoke, obsolete, stable')
# Now try with None
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': None, 'csrf_token': self.get_csrf_token()}, status=400)
resp = resp.json_body
eq_(resp['status'], 'error')
eq_(resp['errors'][0]['name'], 'request')
eq_(resp['errors'][0]['description'], 'Required')
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_testing_request(self, publish, *args):
"""Test submitting a valid testing request"""
args = self.get_update()
args['request'] = None
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'testing', 'csrf_token': self.get_csrf_token()})
eq_(resp.json['update']['request'], 'testing')
self.assertEquals(publish.call_args_list, [])
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
def test_invalid_stable_request(self, *args):
"""Test submitting a stable request for an update that has yet to meet the stable requirements"""
args = self.get_update()
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'stable', 'csrf_token': self.get_csrf_token()},
status=400)
eq_(resp.json['status'], 'error')
eq_(resp.json['errors'][0]['description'],
config.get('not_yet_tested_msg'))
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_stable_request_after_testing(self, publish, *args):
"""Test submitting a stable request to an update that has met the minimum amount of time in testing"""
args = self.get_update('bodhi-2.0.0-3.fc17')
resp = self.app.post_json('/updates/', args)
up = DBSession.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.testing
up.request = None
up.comment('This update has been pushed to testing', author='bodhi')
up.comments[-1].timestamp -= timedelta(days=7)
DBSession.flush()
eq_(up.days_in_testing, 7)
eq_(up.meets_testing_requirements, True)
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'stable', 'csrf_token': self.get_csrf_token()})
eq_(resp.json['update']['request'], 'stable')
publish.assert_called_with(
topic='update.request.stable', msg=mock.ANY)
@mock.patch(**mock_failed_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_stable_request_failed_taskotron_results(self, publish, *args):
"""Test submitting a stable request, but with bad taskotron results"""
args = self.get_update('bodhi-2.0.0-3.fc17')
resp = self.app.post_json('/updates/', args)
up = DBSession.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.testing
up.request = None
up.comment('This update has been pushed to testing', author='bodhi')
up.comments[-1].timestamp -= timedelta(days=7)
DBSession.flush()
eq_(up.days_in_testing, 7)
eq_(up.meets_testing_requirements, True)
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'stable', 'csrf_token': self.get_csrf_token()},
status=400)
self.assertIn('errors', resp)
self.assertIn('Required task', resp)
@mock.patch(**mock_absent_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_stable_request_absent_taskotron_results(self, publish, *args):
"""Test submitting a stable request, but with absent task results"""
args = self.get_update('bodhi-2.0.0-3.fc17')
resp = self.app.post_json('/updates/', args)
up = DBSession.query(Update).filter_by(title=resp.json['title']).one()
up.status = UpdateStatus.testing
up.request = None
up.comment('This update has been pushed to testing', author='bodhi')
up.comments[-1].timestamp -= timedelta(days=7)
DBSession.flush()
eq_(up.days_in_testing, 7)
eq_(up.meets_testing_requirements, True)
resp = self.app.post_json(
'/updates/%s/request' % args['builds'],
{'request': 'stable', 'csrf_token': self.get_csrf_token()},
status=400)
self.assertIn('errors', resp)
self.assertIn('No result found for', resp)
@mock.patch(**mock_valid_requirements)
def test_new_update_with_existing_build(self, *args):
"""Test submitting a new update with a build already in the database"""
session = DBSession()
package = Package.get('bodhi', session)
session.add(Build(nvr=u'bodhi-2.0.0-3.fc17', package=package))
session.flush()
args = self.get_update(u'bodhi-2.0.0-3.fc17')
resp = self.app.post_json('/updates/', args)
eq_(resp.json['title'], 'bodhi-2.0.0-3.fc17')
@mock.patch(**mock_valid_requirements)
def test_update_with_older_build_in_testing_from_diff_user(self, r):
"""
Test submitting an update for a package that has an older build within
a multi-build update currently in testing submitted by a different
maintainer.
https://github.com/fedora-infra/bodhi/issues/78
"""
session = DBSession()
title = u'bodhi-2.0-2.fc17 python-3.0-1.fc17'
args = self.get_update(title)
resp = self.app.post_json('/updates/', args)
newuser = User(name=u'bob')
session.add(newuser)
up = session.query(Update).filter_by(title=title).one()
up.status = UpdateStatus.testing
up.request = None
up.user = newuser
session.flush()
newtitle = u'bodhi-2.0-3.fc17'
args = self.get_update(newtitle)
resp = self.app.post_json('/updates/', args)
# The TestResponse doesn't track the session, so we can't peek at the
# flash messages. So, let's just make sure the session cookie was set.
assert resp.headers.get('Set-Cookie', '').startswith('session='), '%s\n%s' % (resp.headers, resp)
# Ensure the second update was created successfully
session.query(Update).filter_by(title=newtitle).one()
@mock.patch(**mock_valid_requirements)
def test_updateid_alias(self, *args):
res = self.app.post_json('/updates/', self.get_update(u'bodhi-2.0.0-3.fc17'))
json = res.json_body
self.assertEquals(json['alias'], json['updateid'])
def test_list_updates_by_lowercase_release_name(self):
res = self.app.get('/updates/', {"releases": "f17"})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], u'bodhi-2.0-1.fc17')
def test_redirect_to_package(self):
"When you visit /updates/package, redirect to /updates/?packages=..."
res = self.app.get('/updates/bodhi', status=302)
target = 'http://localhost/updates/?packages=bodhi'
self.assertEquals(res.headers['Location'], target)
# But be sure that we don't redirect if the package doesn't exist
res = self.app.get('/updates/non-existant', status=404)
def test_list_updates_by_alias_and_updateid(self):
upd = self.db.query(Update).filter(Update.alias != None).first()
res = self.app.get('/updates/', {"alias": upd.alias})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], upd.title)
self.assertEquals(up['alias'], upd.alias)
res = self.app.get('/updates/', {"updateid": upd.alias})
body = res.json_body
self.assertEquals(len(body['updates']), 1)
up = body['updates'][0]
self.assertEquals(up['title'], upd.title)
res = self.app.get('/updates/', {"updateid": 'BLARG'})
body = res.json_body
self.assertEquals(len(body['updates']), 0)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.notifications.publish')
def test_submitting_multi_release_updates(self, publish, *args):
""" https://github.com/fedora-infra/bodhi/issues/219 """
# Add another release and package
Release._tag_cache = None
release = Release(
name=u'F18', long_name=u'Fedora 18',
id_prefix=u'FEDORA', version=u'18',
dist_tag=u'f18', stable_tag=u'f18-updates',
testing_tag=u'f18-updates-testing',
candidate_tag=u'f18-updates-candidate',
pending_testing_tag=u'f18-updates-testing-pending',
pending_stable_tag=u'f18-updates-pending',
override_tag=u'f18-override',
branch=u'f18')
DBSession.add(release)
pkg = Package(name=u'nethack')
DBSession.add(pkg)
# A multi-release submission!!! This should create *two* updates
args = self.get_update('bodhi-2.0.0-2.fc17,bodhi-2.0.0-2.fc18')
r = self.app.post_json('/updates/', args)
data = r.json_body
self.assertIn('caveats', data)
import pprint; pprint.pprint(data['caveats'])
self.assertEquals(len(data['caveats']), 1)
self.assertEquals(data['caveats'][0]['description'], "Your update is being split into 2, one for each release.")
self.assertIn('updates', data)
self.assertEquals(len(data['updates']), 2)
publish.assert_called_with(topic='update.request.testing', msg=ANY)
# Make sure two fedmsg messages were published
self.assertEquals(len(publish.call_args_list), 2)
| mathstuf/bodhi | bodhi/tests/functional/test_updates.py | Python | gpl-2.0 | 75,491 | [
"VisIt"
] | bea6c8e513568408ab9415529cd477373f8d46098321a65eee5b7913b6489f64 |
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Visualization sample for particle dumbbells in the constant-temperature, constant-pressure ensemble.
"""
from __future__ import print_function
import numpy as np
from threading import Thread
import espressomd
from espressomd import thermostat
from espressomd.interactions import HarmonicBond
import espressomd.visualization_opengl
required_features = ["NPT", "LENNARD_JONES"]
espressomd.assert_features(required_features)
box_l = 10
system = espressomd.System(box_l=[box_l] * 3)
system.set_random_state_PRNG()
np.random.seed(seed=system.seed)
visualizer = espressomd.visualization_opengl.openGLLive(
system, background_color=[1, 1, 1], bond_type_radius=[0.2])
system.time_step = 0.0005
system.cell_system.skin = 0.1
system.box_l = [box_l, box_l, box_l]
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=2, sigma=1,
cutoff=3, shift="auto")
system.bonded_inter[0] = HarmonicBond(k=5.0, r_0=1.0)
n_part = 200
for i in range(n_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
for i in range(0, n_part - 1, 2):
system.part[i].add_bond((system.bonded_inter[0], system.part[i + 1].id))
print("E before minimization:", system.analysis.energy()["total"])
system.minimize_energy.init(f_max=0.0, gamma=30.0,
max_steps=10000, max_displacement=0.1)
system.minimize_energy.minimize()
print("E after minimization:", system.analysis.energy()["total"])
system.thermostat.set_npt(kT=2.0, gamma0=1.0, gammav=0.01)
system.integrator.set_isotropic_npt(ext_pressure=1.0, piston=0.01)
def main():
cnt = 0
P = 0
while True:
system.integrator.run(1)
P += system.analysis.pressure()['total']
if cnt > 10000:
print("Pressure:", P / cnt, "Box:", system.box_l)
cnt = 0
P = 0
visualizer.update()
cnt += 1
# Start simulation in seperate thread
t = Thread(target=main)
t.daemon = True
t.start()
visualizer.start()
| hmenke/espresso | samples/visualization_npt.py | Python | gpl-3.0 | 2,684 | [
"ESPResSo"
] | 18768199e9f0a046098db8ec7c107596ba1828095cb100cd83bc7d0d1d7ef5d6 |
""" DIRAC Workload Management System utility module to get available memory and processors from mjf
"""
__RCSID__ = "$Id$"
import os
import re
import multiprocessing
import urllib2
from DIRAC import gLogger, gConfig
from DIRAC.Core.Utilities.List import fromChar
def getJobFeatures():
features = {}
if 'JOBFEATURES' not in os.environ:
return features
for item in ('allocated_cpu', 'hs06_job', 'shutdowntime_job', 'grace_secs_job', 'jobstart_secs',
'job_id', 'wall_limit_secs',
'cpu_limit_secs', 'max_rss_bytes', 'max_swap_bytes', 'scratch_limit_bytes'):
fname = os.path.join(os.environ['JOBFEATURES'], item)
try:
val = urllib2.urlopen(fname).read()
except BaseException:
val = 0
features[item] = val
return features
def getProcessorFromMJF():
return getJobFeatures().get('allocated_cpu')
def getMemoryFromMJF():
return getJobFeatures().get('max_rss_bytes')
def getMemoryFromProc():
meminfo = dict((i.split()[0].rstrip(':'), int(i.split()[1])) for i in open('/proc/meminfo').readlines())
maxRAM = meminfo['MemTotal']
if maxRAM:
return maxRAM / 1024
def getNumberOfProcessors(siteName=None, gridCE=None, queue=None):
""" gets the number of processors on a certain CE/queue/node (what the pilot administers)
The siteName/gridCE/queue parameters are normally not necessary.
Tries to find it in this order:
1) from the /Resources/Computing/CEDefaults/NumberOfProcessors (which is what the pilot fills up)
2) if not present from JobFeatures
3) if not present looks in CS for "NumberOfProcessors" Queue or CE option
4) if not present looks in CS for "%dProcessors" Queue or CE Tag
5) if not present but there's WholeNode tag, look what the WN provides using multiprocessing.cpu_count()
6) return 1
"""
# 1) from /Resources/Computing/CEDefaults/NumberOfProcessors
gLogger.info("Getting numberOfProcessors from /Resources/Computing/CEDefaults/NumberOfProcessors")
numberOfProcessors = gConfig.getValue('/Resources/Computing/CEDefaults/NumberOfProcessors')
if numberOfProcessors:
return numberOfProcessors
# 2) from MJF
gLogger.info("Getting numberOfProcessors from MJF")
numberOfProcessors = getProcessorFromMJF()
if numberOfProcessors:
return numberOfProcessors
# 3) looks in CS for "NumberOfProcessors" Queue or CE or site option
grid = siteName.split('.')[0]
gLogger.info("NumberOfProcessors could not be found in MJF, trying from CS (queue definition)")
numberOfProcessors = gConfig.getValue('/Resources/Sites/%s/%s/CEs/%s/Queues/%s/NumberOfProcessors' % (grid,
siteName,
gridCE,
queue))
if numberOfProcessors:
return numberOfProcessors
gLogger.info("NumberOfProcessors could not be found in CS queue definition, ",
"trying from /Resources/Sites/%s/%s/CEs/%s/NumberOfProcessors" % (grid, siteName, gridCE))
numberOfProcessors = gConfig.getValue('/Resources/Sites/%s/%s/CEs/%s/NumberOfProcessors' % (grid,
siteName,
gridCE))
if numberOfProcessors:
return numberOfProcessors
gLogger.info("NumberOfProcessors could not be found in CS CE definition, ",
"trying from /Resources/Sites/%s/%s/NumberOfProcessors" % (grid, siteName))
numberOfProcessors = gConfig.getValue('/Resources/Sites/%s/%s/NumberOfProcessors' % (grid, siteName))
if numberOfProcessors:
return numberOfProcessors
# 3) looks in CS for tags
gLogger.info("Getting number of processors" "from tags for %s: %s: %s" % (siteName, gridCE, queue))
# Tags of the CE
tags = fromChar(gConfig.getValue('/Resources/Sites/%s/%s/CEs/%s/Tag' % (siteName.split('.')[0], siteName, gridCE),
''))
# Tags of the Queue
tags += fromChar(gConfig.getValue('/Resources/Sites/%s/%s/CEs/%s/Queues/%s/Tag' % (siteName.split('.')[0],
siteName,
gridCE, queue),
''))
for tag in tags:
numberOfProcessorsTag = re.search('[0-9]Processors', tag)
if numberOfProcessorsTag:
gLogger.info("Number of processors from tags", numberOfProcessorsTag.string)
return int(numberOfProcessorsTag.string.replace('Processors', ''))
gLogger.info("NumberOfProcessors could not be found in CS")
if 'WholeNode' in tags:
gLogger.info("Found WholeNode tag, using multiprocessing.cpu_count()")
return multiprocessing.cpu_count()
return 1
def getNumberOfPayloadProcessors(siteName=None, gridCE=None, queue=None):
""" Gets the number of processors allowed for a single JobAgent (so for a "inner" CE).
(NB: this does not refer to the job processors).
This is normally used ONLY when a pilot instantiates more than one JobAgent (MultiLaunchAgent pilot command).
The siteName/gridCE/queue parameters are normally not necessary.
Tries to find it in this order:
1) from the /Resources/Computing/CEDefaults/NumberOfPayloadProcessors (which is what pilot 3 fills up)
2) if not present but there's WholeNode tag, use the getNumberOfProcessors function above
3) otherwise returns 1
"""
# 1) from /Resources/Computing/CEDefaults/NumberOfPayloadProcessors
gLogger.info("Getting NumberOfPayloadProcessors from /Resources/Computing/CEDefaults/NumberOfPayloadProcessors")
NumberOfPayloadProcessors = gConfig.getValue('/Resources/Computing/CEDefaults/NumberOfPayloadProcessors')
if NumberOfPayloadProcessors:
return NumberOfPayloadProcessors
# 2) Checks if 'Whole' is one of the used tags
# Tags of the CE
tags = fromChar(gConfig.getValue('/Resources/Sites/%s/%s/CEs/%s/Tag' % (siteName.split('.')[0], siteName, gridCE),
''))
# Tags of the Queue
tags += fromChar(gConfig.getValue('/Resources/Sites/%s/%s/CEs/%s/Queues/%s/Tag' % (siteName.split('.')[0],
siteName,
gridCE, queue),
''))
if 'WholeNode' in tags:
return getNumberOfProcessors()
# 3) Just returns a conservative "1"
return 1
def getNumberOfJobProcessors(jobID):
""" Gets the number of processors allowed for the job.
This can be used to communicate to your job payload the number of processors it's allowed to use,
so this function should be called from your extension.
If the JobAgent is using "InProcess" CE (which is the default),
then what's returned will basically be the same of what's returned by the getNumberOfProcessors() function above
"""
# from /Resources/Computing/JobLimits/jobID/NumberOfProcessors (set by PoolComputingElement)
numberOfProcessors = gConfig.getValue('Resources/Computing/JobLimits/%s/NumberOfProcessors' % jobID)
if numberOfProcessors:
return numberOfProcessors
return getNumberOfProcessors()
| fstagni/DIRAC | WorkloadManagementSystem/Utilities/JobParameters.py | Python | gpl-3.0 | 7,507 | [
"DIRAC"
] | c3cd79fca7216e6e325c9c79af701c1fcfd7361c013527f78798816cf5f6d251 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2013 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
"""Attachment domain class
Allows other tables to have any kinds of attachments."""
# pylint: enable=E1101
from zope.interface import implementer
from stoqlib.database.properties import BLOBCol, UnicodeCol
from stoqlib.domain.base import Domain
from stoqlib.domain.interfaces import IDescribable
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
@implementer(IDescribable)
class Attachment(Domain):
__storm_table__ = 'attachment'
#: the attachment name
name = UnicodeCol(default=u'')
#: MIME for the filetype attached
mimetype = UnicodeCol(default=u'')
#: blob that contains the file
blob = BLOBCol(default=None)
#
# IDescribable implementation
#
def get_description(self):
return self.name
| tiagocardosos/stoq | stoqlib/domain/attachment.py | Python | gpl-2.0 | 1,691 | [
"VisIt"
] | 46ab0f4906225e22d36fda57045ac30d93a2bfc953b2df7ca54d1a4f7a53057f |
#!/usr/bin/env python2
# * **************************************************************** **
# File: util.py
# Requires: Python 2.7+ (but not Python 3.0+)
# Note: For history, changes and dates for this file, consult git.
# Author: Brian Danilko, Likeable Software (brian@likeablesoftware.com)
# Copyright 2015-2017 Microbric Pty Ltd.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License (in the doc/licenses directory)
# for more details.
#
# * **************************************************************** */
""" Module providing utility functions. """
from __future__ import print_function
from __future__ import absolute_import
import sys
import datetime
import os
import os.path
class Enum(object):
""" Provides a 'C'-like enumeration for python
e.g. ERRORS = Enum("OVERFLOW", "DIV_BY_ZERO")
"""
def __init__(self, *keys):
self.__dict__.update(zip(keys, range(len(keys))))
def len(self):
return len(self.__dict__.keys())
def isValid(self, value):
return (value >= 0) and (value < self.len())
class Mask(object):
""" Provides a 'C'-like enumeration of mask values for python
(so values are powers of 2, so they can be orred together)
e.g. DUMP = Mask("PARSER", "OPTIMISER")
"""
def __init__(self, *keys):
values = [2**i for i in range(len(keys))]
self.__dict__.update(zip(keys, values))
def len(self):
return len(self.__dict__.keys())
def isValid(self, value):
"""value can be an 'or' of possible keys"""
return (value >= 0) and (value < (2 ** self.len()))
class SimpleLog(object):
""" Provide a VERY SIMPLE log of execution. Just start, and end with timestamps. So
if something crashes then should be able to see that it happened."""
def __init__(self, use=True, fileName="EdPy.log", maxBytes=2000000):
self.fh = None
self.use = use
self.start = datetime.datetime.now()
self.fileName = fileName
self.maxBytes = maxBytes
def formatTimestamp(self, ts=None):
if (ts is None):
return datetime.datetime.now.isoformat(' ')
else:
return ts.isoformat(' ')
def formatDelta(self, delta):
return "+{:d}.{:06d}s".format(delta.seconds, delta.microseconds)
def open(self):
if (not self.use):
return
# try to rename if it's too large. But if there is an error then just
# continue on.
try:
# if there is an existing file then see if it's too large
if (os.path.exists(self.fileName)):
bytes = os.path.getsize(self.fileName)
if (bytes >= self.maxBytes):
os.rename(self.fileName, self.fileName + ".old")
except:
pass
try:
# Non-buffered appending log
self.fh = open(self.fileName, "a", buffering=0)
except:
self.fh = None
def log(self, line):
if (not self.use):
return
if (self.fh is None):
self.open()
if (self.fh is not None):
now = datetime.datetime.now()
delta = now - self.start
print("{:s} dur:{:s} pid:{} msg:{:s}".format(self.formatTimestamp(now),
self.formatDelta(delta),
os.getpid(), line), file=self.fh)
self.fh.flush()
def close(self):
if (not self.use):
return
if (self.fh is not None):
self.fh.close()
self.fh = None
def LowerStr(inString):
"""Returns a lower case string from any string. Useful for argparse types"""
return inString.lower()
def CheckPythonVersion():
ver = sys.version_info
# Using version 2.0 access to version (instead of assuming 2.7 here)
if (ver < (2,7)):
rawText = "Python version must be 2.7 or greater,"
rawText += " this Python version is %d.%d." % (ver[0], ver[1])
print("FATAL: " + rawText, file=sys.stderr)
sys.exit(1)
# do this check on every import of util!
CheckPythonVersion()
| Bdanilko/EdPy | src/lib/util.py | Python | gpl-2.0 | 4,629 | [
"Brian"
] | 98b95d0f5946038d7fc228890806317bcd7ee4afdd6551de35ef2ea2d0e42cec |
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
"""
Management utility to create superusers.
"""
import getpass
import os
import re
import sys
import django.db.transaction
import tldap.transaction
from django.conf import settings
from django.core import exceptions
from django.core.management.base import BaseCommand
from django.core.validators import validate_email
from karaage.institutes.models import Institute
from karaage.people.models import Group, Person
from karaage.people.utils import (
UsernameException,
validate_username_for_new_person,
)
try:
input = raw_input
except NameError:
pass
class Command(BaseCommand):
help = 'Used to create a karaage superuser.'
def add_arguments(self, parser):
parser.add_argument('--username', '-u', help='Username'),
parser.add_argument('--email', '-e', help='E-Mail'),
parser.add_argument('--short_name', '-f', help='Short Name'),
parser.add_argument('--full_name', '-l', help='Full Name'),
parser.add_argument('--password', '-p', help='Password'),
parser.add_argument('--institute', '-i', help='Institute Name'),
@django.db.transaction.atomic
@tldap.transaction.commit_on_success
def handle(self, *args, **options):
username = options['username']
email = options['email']
short_name = options['short_name']
full_name = options['full_name']
password = options['password']
institute_name = options['institute']
# Try to determine the current system user's username to use as a
# default.
try:
import pwd
unix_uid = os.getuid()
unix_username = pwd.getpwuid(unix_uid)[0]
default_username = unix_username.replace(' ', '').lower()
if default_username == 'root':
default_username = ''
except (ImportError, KeyError):
# KeyError will be raised by getpwuid() if there is no
# corresponding entry in the /etc/passwd file (a very restricted
# chroot environment, for example).
default_username = ''
# Determine whether the default username is taken, so we don't display
# it as an option.
if default_username:
try:
Person.objects.get(username=default_username)
except Person.DoesNotExist:
pass
else:
default_username = ''
# Prompt for username/email/password. Enclose this whole thing in a
# try/except to trap for a keyboard interrupt and exit gracefully.
try:
# Get a username
while 1:
if not username:
input_msg = 'Username'
if default_username:
input_msg += \
' (Leave blank to use %r)' % default_username
username = input(input_msg + ': ')
if default_username and username == '':
username = default_username
try:
validate_username_for_new_person(username)
break
except UsernameException as e:
sys.stderr.write("%s\n" % e)
username = None
print('')
continue
# Get an email
while 1:
if not email:
email = input('E-mail address: ')
try:
validate_email(email)
except exceptions.ValidationError:
sys.stderr.write(
"Error: That e-mail address is invalid.\n")
print('')
email = None
else:
break
# Get a password
while 1:
if not password:
password = getpass.getpass()
password2 = getpass.getpass('Password (again): ')
if password != password2:
sys.stderr.write(
"Error: Your passwords didn't match.\n")
password = None
print('')
continue
if password.strip() == '':
sys.stderr.write(
"Error: Blank passwords aren't allowed.\n")
password = None
print('')
continue
break
while 1:
if not short_name:
short_name = input('Short Name: ')
else:
break
while 1:
if not full_name:
full_name = input('Full Name: ')
else:
break
group_re = re.compile(r'^%s$' % settings.GROUP_VALIDATION_RE)
while 1:
if not institute_name:
if Institute.objects.count() > 0:
print(
"Choose an existing institute "
"for new superuser.")
print("Alternatively enter a new name to create one.")
print("")
print("Valid choices are:")
print("")
for i in Institute.active.all():
print("* %s" % i)
print
else:
print("No Institutes in system, will create one now.")
print('')
institute_name = input('Institute Name: ')
if not re.search(group_re, institute_name):
sys.stderr.write(
"%s\n" % settings.GROUP_VALIDATION_ERROR_MSG)
institute_name = None
print('')
continue
else:
break
try:
institute = Institute.objects.get(name=institute_name)
print("Using existing institute %s." % institute)
except Institute.DoesNotExist:
group, c = Group.objects.get_or_create(name=institute_name)
if c:
print("Created new group %s." % group)
else:
print("Using existing group %s." % group)
institute = Institute.objects.create(
name=institute_name, group=group, is_active=True)
print("Created new institute %s." % institute)
except KeyboardInterrupt:
sys.stderr.write("\nOperation cancelled.\n")
sys.exit(1)
data = {
'username': username,
'email': email,
'password': password,
'short_name': short_name,
'full_name': full_name,
'institute': institute,
}
Person.objects.create_superuser(**data)
print("Karaage Superuser created successfully.")
| brianmay/karaage | karaage/management/commands/kgcreatesuperuser.py | Python | gpl-3.0 | 7,851 | [
"Brian"
] | bb52ff0b806445904135d6a15456b31f8a475f2f86057b0e980ef3a3ce63acd1 |
from abc import ABC, abstractmethod
from forte.core import flog
from forte.molecule import Molecule
from forte.basis import Basis
from ._forte import StateInfo
from ._forte import Symmetry
from ._forte import make_ints_from_psi4
class Model(ABC):
"""
An abstract base class used to implement a model.
A model contains the information necessary to compute
the Hamiltonian for a system (number of orbitals,
symmetry information, basis set).
This allows to deal with molecules, model Hamiltonians
(e.g. Hubbard), effective Hamiltonians, in a unified way.
"""
@abstractmethod
def point_group(self) -> str:
"""The model point group"""
@abstractmethod
def ints(self, mo_space_info):
"""Make a ForteIntegral object"""
pass
class MolecularModel(Model):
"""
A class used to handle molecules
This class knows the molecular structure and the basis,
and is responsible for providing integrals over molecular orbitals.
"""
def __init__(
self,
molecule: Molecule,
basis: Basis,
int_type: str = None,
scf_aux_basis: Basis = None,
corr_aux_basis: Basis = None
):
"""
Initialize a MolecularModel object
Parameters
----------
molecule: Molecule
the molecule information
int_type: {'CONVENTIONAL', 'DF, 'CD', 'DISKDF'}
the type of integrals used
basis: Basis
the computational basis
scf_aux_basis: Basis
the auxiliary basis set used in density-fitted SCF computations
corr_aux_basis: Basis
the auxiliary basis set used in density-fitted correlated computations
"""
self._molecule = molecule
self._basis = basis
self._int_type = 'CONVENTIONAL' if int_type is None else int_type.upper()
self._scf_aux_basis = scf_aux_basis
self._corr_aux_basis = corr_aux_basis
self.symmetry = Symmetry(molecule.molecule.point_group().symbol().capitalize())
def __repr__(self):
"""
return a string representation of this object
"""
return f"MolecularModel(\n{repr(self._molecule)},\n{repr(self._basis)},\n{self._int_type})"
def __str__(self):
"""
return a string representation of this object
"""
return self.__repr__()
@property
def molecule(self):
return self._molecule.molecule
@property
def int_type(self):
return self._int_type
@property
def basis(self):
return self._basis.__str__()
@property
def scf_aux_basis(self):
if self._scf_aux_basis is None:
return None
return self._scf_aux_basis.__str__()
@property
def corr_aux_basis(self):
if self._corr_aux_basis is None:
return None
return self._corr_aux_basis.__str__()
@property
def point_group(self) -> str:
return self.symmetry.point_group_label()
def state(self, charge: int, multiplicity: int, ms: float = None, sym: str = None, gasmin=None, gasmax=None):
"""This function is used to create a StateInfo object.
It checks for potential errors.
Parameters
----------
charge: int
total charge
multiplicity: int
the spin multiplicity of the state
ms: float
projection of spin on the z axis (e.g. 0.5, 2.0,).
(default = lowest value consistent with multiplicity)
sym: str
the state irrep label (e.g., 'C2v')
gasmin: list(int)
the minimum number of electrons in each GAS space
gasmax: list(int)
the maximum number of electrons in each GAS space
"""
if ms is None:
# If ms = None take the lowest value consistent with multiplicity
# For example:
# singlet: multiplicity = 1 -> twice_ms = 0 (ms = 0)
# doublet: multiplicity = 2 -> twice_ms = 1 (ms = 1/2)
# triplet: multiplicity = 3 -> twice_ms = 0 (ms = 0)
twice_ms = (multiplicity + 1) % 2
else:
twice_ms = round(2.0 * ms)
# compute the number of electrons
molecule = self.molecule
natom = molecule.natom()
nel = round(sum([molecule.Z(i) for i in range(natom)])) - charge
if (nel - twice_ms) % 2 != 0:
raise ValueError(
f'(MolecularModel) The value of M_S ({twice_ms / 2.0}) is incompatible with the number of electrons ({nel})'
)
# compute the number of alpha/beta electrons
na = (nel + twice_ms) // 2 # from ms = (na - nb) / 2
nb = nel - na
# compute the irrep index and produce a standard label
if sym is None:
if self.symmetry.nirrep() == 1:
# in this case there is only one possible choice
sym = 'A'
else:
raise ValueError(
f'(MolecularModel) The value of sym ({sym}) is invalid.'
f' Please specify a valid symmetry label.'
)
# get the irrep index from the symbol
irrep = self.symmetry.irrep_label_to_index(sym)
gasmin = [] if gasmin is None else gasmin
gasmax = [] if gasmax is None else gasmax
return StateInfo(na, nb, multiplicity, twice_ms, irrep, sym, gasmin, gasmax)
def ints(self, data, options):
flog('info', 'MolecularModel: preparing integrals from psi4')
# if we do DF, we need to make sure that psi4's wavefunction object
# has a DF_BASIS_MP2 basis registered
if self.int_type == 'DF':
import psi4
aux_basis = psi4.core.BasisSet.build(
self.molecule, 'DF_BASIS_MP2', self.corr_aux_basis, 'RIFIT', self.basis
)
data.psi_wfn.set_basisset('DF_BASIS_MP2', aux_basis)
# get the appropriate integral object
return make_ints_from_psi4(data.psi_wfn, options, data.mo_space_info, self._int_type)
| evangelistalab/forte | forte/model.py | Python | lgpl-3.0 | 6,126 | [
"Psi4"
] | 3eabc90f444234b51e4d60faeb2980a541acf0cc89e8251444be8bc727011864 |
# coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains functions used for polling, training and saving jobs."""
from core.classifiers import algorithm_registry
from core.classifiers import classifier_utils
from core.domain import remote_access_services
# pylint: disable=too-many-branches
def _validate_job_data(job_data):
if not isinstance(job_data, dict):
raise Exception('Invalid format of job data')
if 'job_id' not in job_data:
raise Exception('job data should contain job id')
if 'training_data' not in job_data:
raise Exception('job data should contain training data')
if 'algorithm_id' not in job_data:
raise Exception('job data should contain algorithm id')
if not isinstance(job_data['job_id'], unicode):
raise Exception(
'Expected job id to be unicode, received %s' %
job_data['job_id'])
if not isinstance(job_data['algorithm_id'], unicode):
raise Exception(
'Expected algorithm id to be unicode, received %s' %
job_data['algorithm_id'])
if not isinstance(job_data['training_data'], list):
raise Exception(
'Expected training data to be a list, received %s' %
job_data['training_data'])
algorithm_ids = (
algorithm_registry.Registry.get_all_classifier_algorithm_ids())
if job_data['algorithm_id'] not in algorithm_ids:
raise Exception('Invalid algorithm id %s' % job_data['algorithm_id'])
for grouped_answers in job_data['training_data']:
if 'answer_group_index' not in grouped_answers:
raise Exception(
'Expected answer_group_index to be a key in training_data',
' list item')
if 'answers' not in grouped_answers:
raise Exception(
'Expected answers to be a key in training_data list item')
if not isinstance(grouped_answers['answer_group_index'], int):
raise Exception(
'Expected answer_group_index to be an int, received %s' %
grouped_answers['answer_group_index'])
if not isinstance(grouped_answers['answers'], list):
raise Exception(
'Expected answers to be a list, received %s' %
grouped_answers['answers'])
def get_next_job():
"""Get next job request.
Returns: dict. A dictionary containing job data.
"""
job_data = remote_access_services.fetch_next_job_request()
if job_data:
_validate_job_data(job_data)
return job_data
def train_classifier(algorithm_id, training_data):
"""Train classifier associated with 'algorithm_id' using 'training_data'.
Args:
algorithm_id: str. ID of classifier algorithm.
training_data: list(dict). A list containing training data. Each dict
stores 'answer_group_index' and 'answers'.
Returns:
dict. Result of trained classifier algorithm.
"""
classifier = algorithm_registry.Registry.get_classifier_by_algorithm_id(
algorithm_id)
classifier.train(training_data)
classifier_data = classifier.to_dict()
classifier.validate(classifier_data)
return classifier_data
def store_job_result(job_id, classifier_data):
"""Store result of job in the Oppia server.
Args:
job_id: str. ID of the job whose result is to be stored.
classifier_data: dict. A dictionary representing result of the job.
Returns:
int. Status code of response.
"""
# The classifier data to be sent in the payload should have all
# floating point values stored as strings. This is because floating point
# numbers are represented differently on GAE(Oppia) and GCE(Oppia-ml).
# Therefore, converting all floating point numbers to string keeps
# signature consistent on both Oppia and Oppia-ml.
# For more info visit: https://stackoverflow.com/q/40173295
classifier_data_with_floats_stringified = (
classifier_utils.encode_floats_in_classifier_data(
classifier_data))
job_result_dict = {
'job_id': job_id,
'classifier_data_with_floats_stringified': (
classifier_data_with_floats_stringified)
}
status = remote_access_services.store_trained_classifier_model(
job_result_dict)
return status
| prasanna08/oppia-ml | core/domain/job_services.py | Python | apache-2.0 | 4,935 | [
"VisIt"
] | 95f72fb814c2add1e541593c45b281f692a69c2f5d1d343999986eb0eb01874d |
# coding=utf-8
# Copyright 2022 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for weight_symmetry.shuffled_mask."""
import glob
from os import path
import tempfile
from absl.testing import absltest
from absl.testing import flagsaver
from rigl.experimental.jax import shuffled_mask
class ShuffledMaskTest(absltest.TestCase):
def test_run_fc(self):
"""Tests if the driver for shuffled training runs correctly with FC NN."""
experiment_dir = tempfile.mkdtemp()
eval_flags = dict(
epochs=1,
experiment_dir=experiment_dir,
model='MNIST_FC',
)
with flagsaver.flagsaver(**eval_flags):
shuffled_mask.main([])
outfile = path.join(experiment_dir, '*', 'events.out.tfevents.*')
files = glob.glob(outfile)
self.assertTrue(len(files) == 1 and path.exists(files[0]))
def test_run_conv(self):
"""Tests if the driver for shuffled training runs correctly with CNN."""
experiment_dir = tempfile.mkdtemp()
eval_flags = dict(
epochs=1,
experiment_dir=experiment_dir,
model='MNIST_CNN',
)
with flagsaver.flagsaver(**eval_flags):
shuffled_mask.main([])
outfile = path.join(experiment_dir, '*', 'events.out.tfevents.*')
files = glob.glob(outfile)
self.assertTrue(len(files) == 1 and path.exists(files[0]))
def test_run_random(self):
"""Test random mask driver with per-neuron sparsity."""
experiment_dir = tempfile.mkdtemp()
self._eval_flags = dict(
epochs=1,
experiment_dir=experiment_dir,
mask_type='random',
)
with flagsaver.flagsaver(**self._eval_flags):
shuffled_mask.main([])
outfile = path.join(experiment_dir, '*', 'events.out.tfevents.*')
files = glob.glob(outfile)
self.assertTrue(len(files) == 1 and path.exists(files[0]))
def test_run_per_neuron(self):
"""Test random mask driver with per-neuron sparsity."""
experiment_dir = tempfile.mkdtemp()
self._eval_flags = dict(
epochs=1,
experiment_dir=experiment_dir,
mask_type='per_neuron',
)
with flagsaver.flagsaver(**self._eval_flags):
shuffled_mask.main([])
outfile = path.join(experiment_dir, '*', 'events.out.tfevents.*')
files = glob.glob(outfile)
self.assertTrue(len(files) == 1 and path.exists(files[0]))
def test_run_symmetric(self):
"""Test random mask driver with per-neuron sparsity."""
experiment_dir = tempfile.mkdtemp()
self._eval_flags = dict(
epochs=1,
experiment_dir=experiment_dir,
mask_type='symmetric',
)
with flagsaver.flagsaver(**self._eval_flags):
shuffled_mask.main([])
outfile = path.join(experiment_dir, '*', 'events.out.tfevents.*')
files = glob.glob(outfile)
self.assertTrue(len(files) == 1 and path.exists(files[0]))
if __name__ == '__main__':
absltest.main()
| google-research/rigl | rigl/experimental/jax/shuffled_mask_test.py | Python | apache-2.0 | 3,413 | [
"NEURON"
] | 6d9348073e9496de6917f606b2a2d7c5216dd9af24bdde25c0c3cb8458ce2f3b |
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS.
"""
from __future__ import absolute_import
import pytest
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.xblock.acid import AcidView
from common.test.acceptance.tests.helpers import UniqueCourseTest
class XBlockAcidBase(UniqueCourseTest):
"""
Base class for tests that verify that XBlock integration is working correctly
"""
__test__ = False
def setUp(self):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(XBlockAcidBase, self).setUp()
self.setup_fixtures()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
def validate_acid_block_view(self, acid_block):
"""
Verify that the LMS view for the Acid Block is correct
"""
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
self.assertTrue(acid_block.scope_passed('user_state'))
self.assertTrue(acid_block.scope_passed('user_state_summary'))
self.assertTrue(acid_block.scope_passed('preferences'))
self.assertTrue(acid_block.scope_passed('user_info'))
class XBlockAcidNoChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with no children
"""
shard = 20
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
def test_acid_block(self):
"""
Verify that all expected acid block tests pass in the lms.
"""
self.courseware_page.visit()
acid_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid]')
self.validate_acid_block_view(acid_block)
class XBlockAcidChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with children
"""
shard = 20
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
XBlockFixtureDesc('acid', 'First Acid Child', metadata={'name': 'first'}),
XBlockFixtureDesc('acid', 'Second Acid Child', metadata={'name': 'second'}),
XBlockFixtureDesc('html', 'Html Child', data="<html>Contents</html>"),
)
)
)
)
).install()
def validate_acid_parent_block_view(self, acid_parent_block):
super(XBlockAcidChildTest, self).validate_acid_block_view(acid_parent_block)
self.assertTrue(acid_parent_block.child_tests_passed)
def test_acid_block(self):
"""
Verify that all expected acid block tests pass in the lms.
"""
self.courseware_page.visit()
acid_parent_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid_parent]')
self.validate_acid_parent_block_view(acid_parent_block)
acid_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid]')
self.validate_acid_block_view(acid_block)
@pytest.mark.xfail
class XBlockAcidAsideTest(XBlockAcidBase):
"""
Tests of an AcidBlock with children
"""
shard = 20
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
def test_acid_block(self):
"""
Verify that all expected acid block tests pass in the lms.
"""
self.courseware_page.visit()
acid_aside = AcidView(self.browser, '.xblock_asides-v1-student_view[data-block-type=acid_aside]')
self.validate_acid_aside_view(acid_aside)
acid_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid]')
self.validate_acid_block_view(acid_block)
def validate_acid_aside_view(self, acid_aside):
self.validate_acid_block_view(acid_aside)
| jolyonb/edx-platform | common/test/acceptance/tests/lms/test_lms_acid_xblock.py | Python | agpl-3.0 | 5,768 | [
"VisIt"
] | 25fa795901f08cb3b881cab562c81da8e0438bcfc18b1517f2da8859ced09fe5 |
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2010 (ita)
"""
Classes and functions required for waf commands
"""
import os, imp, sys
from waflib import Utils, Errors, Logs
import waflib.Node
# the following 3 constants are updated on each new release (do not touch)
HEXVERSION=0x1060b00
"""Constant updated on new releases"""
WAFVERSION="1.6.11"
"""Constant updated on new releases"""
WAFREVISION="a7e69d6b81b04729804754c4d5214da063779a65"
"""Constant updated on new releases"""
ABI = 98
"""Version of the build data cache file format (used in :py:const:`waflib.Context.DBFILE`)"""
DBFILE = '.wafpickle-%d' % ABI
"""Name of the pickle file for storing the build data"""
APPNAME = 'APPNAME'
"""Default application name (used by ``waf dist``)"""
VERSION = 'VERSION'
"""Default application version (used by ``waf dist``)"""
TOP = 'top'
"""The variable name for the top-level directory in wscript files"""
OUT = 'out'
"""The variable name for the output directory in wscript files"""
WSCRIPT_FILE = 'wscript'
"""Name of the waf script files"""
launch_dir = ''
"""Directory from which waf has been called"""
run_dir = ''
"""Location of the wscript file to use as the entry point"""
top_dir = ''
"""Location of the project directory (top), if the project was configured"""
out_dir = ''
"""Location of the build directory (out), if the project was configured"""
waf_dir = ''
"""Directory containing the waf modules"""
local_repo = ''
"""Local repository containing additional Waf tools (plugins)"""
remote_repo = 'http://waf.googlecode.com/git/'
"""
Remote directory containing downloadable waf tools. The missing tools can be downloaded by using::
$ waf configure --download
"""
remote_locs = ['waflib/extras', 'waflib/Tools']
"""
Remote directories for use with :py:const:`waflib.Context.remote_repo`
"""
g_module = None
"""
Module representing the main wscript file (see :py:const:`waflib.Context.run_dir`)
"""
STDOUT = 1
STDERR = -1
BOTH = 0
classes = []
"""
List of :py:class:`waflib.Context.Context` subclasses that can be used as waf commands. The classes
are added automatically by a metaclass.
"""
def create_context(cmd_name, *k, **kw):
"""
Create a new :py:class:`waflib.Context.Context` instance corresponding to the given command.
Used in particular by :py:func:`waflib.Scripting.run_command`
:param cmd_name: command
:type cmd_name: string
:param k: arguments to give to the context class initializer
:type k: list
:param k: keyword arguments to give to the context class initializer
:type k: dict
"""
global classes
for x in classes:
if x.cmd == cmd_name:
return x(*k, **kw)
ctx = Context(*k, **kw)
ctx.fun = cmd_name
return ctx
class store_context(type):
"""
Metaclass for storing the command classes into the list :py:const:`waflib.Context.classes`
Context classes must provide an attribute 'cmd' representing the command to execute
"""
def __init__(cls, name, bases, dict):
super(store_context, cls).__init__(name, bases, dict)
name = cls.__name__
if name == 'ctx' or name == 'Context':
return
try:
cls.cmd
except AttributeError:
raise Errors.WafError('Missing command for the context class %r (cmd)' % name)
if not getattr(cls, 'fun', None):
cls.fun = cls.cmd
global classes
classes.insert(0, cls)
ctx = store_context('ctx', (object,), {})
"""Base class for the :py:class:`waflib.Context.Context` classes"""
class Context(ctx):
"""
Default context for waf commands, and base class for new command contexts.
Context objects are passed to top-level functions::
def foo(ctx):
print(ctx.__class__.__name__) # waflib.Context.Context
Subclasses must define the attribute 'cmd':
:param cmd: command to execute as in ``waf cmd``
:type cmd: string
:param fun: function name to execute when the command is called
:type fun: string
.. inheritance-diagram:: waflib.Context.Context waflib.Build.BuildContext waflib.Build.InstallContext waflib.Build.UninstallContext waflib.Build.StepContext waflib.Build.ListContext waflib.Configure.ConfigurationContext waflib.Scripting.Dist waflib.Scripting.DistCheck waflib.Build.CleanContext
"""
errors = Errors
"""
Shortcut to :py:mod:`waflib.Errors` provided for convenience
"""
tools = {}
"""
A cache for modules (wscript files) read by :py:meth:`Context.Context.load`
"""
def __init__(self, **kw):
try:
rd = kw['run_dir']
except KeyError:
global run_dir
rd = run_dir
# binds the context to the nodes in use to avoid a context singleton
class node_class(waflib.Node.Node):
pass
self.node_class = node_class
self.node_class.__module__ = "waflib.Node"
self.node_class.__name__ = "Nod3"
self.node_class.ctx = self
self.root = self.node_class('', None)
self.cur_script = None
self.path = self.root.find_dir(rd)
self.stack_path = []
self.exec_dict = {'ctx':self, 'conf':self, 'bld':self, 'opt':self}
self.logger = None
def __hash__(self):
"""
Return a hash value for storing context objects in dicts or sets. The value is not persistent.
:return: hash value
:rtype: int
"""
return id(self)
def load(self, tool_list, *k, **kw):
"""
Load a Waf tool as a module, and try calling the function named :py:const:`waflib.Context.Context.fun` from it.
A ``tooldir`` value may be provided as a list of module paths.
:type tool_list: list of string or space-separated string
:param tool_list: list of Waf tools to use
"""
tools = Utils.to_list(tool_list)
path = Utils.to_list(kw.get('tooldir', ''))
for t in tools:
module = load_tool(t, path)
fun = getattr(module, kw.get('name', self.fun), None)
if fun:
fun(self)
def execute(self):
"""
Execute the command. Redefine this method in subclasses.
"""
global g_module
self.recurse([os.path.dirname(g_module.root_path)])
def pre_recurse(self, node):
"""
Method executed immediately before a folder is read by :py:meth:`waflib.Context.Context.recurse`. The node given is set
as an attribute ``self.cur_script``, and as the current path ``self.path``
:param node: script
:type node: :py:class:`waflib.Node.Node`
"""
self.stack_path.append(self.cur_script)
self.cur_script = node
self.path = node.parent
def post_recurse(self, node):
"""
Restore ``self.cur_script`` and ``self.path`` right after :py:meth:`waflib.Context.Context.recurse` terminates.
:param node: script
:type node: :py:class:`waflib.Node.Node`
"""
self.cur_script = self.stack_path.pop()
if self.cur_script:
self.path = self.cur_script.parent
def recurse(self, dirs, name=None, mandatory=True, once=True):
"""
Run user code from the supplied list of directories.
The directories can be either absolute, or relative to the directory
of the wscript file. The methods :py:meth:`waflib.Context.Context.pre_recurse` and :py:meth:`waflib.Context.Context.post_recurse`
are called immediately before and after a script has been executed.
:param dirs: List of directories to visit
:type dirs: list of string or space-separated string
:param name: Name of function to invoke from the wscript
:type name: string
:param mandatory: whether sub wscript files are required to exist
:type mandatory: bool
:param once: read the script file once for a particular context
:type once: bool
"""
try:
cache = self.recurse_cache
except:
cache = self.recurse_cache = {}
for d in Utils.to_list(dirs):
if not os.path.isabs(d):
# absolute paths only
d = os.path.join(self.path.abspath(), d)
WSCRIPT = os.path.join(d, WSCRIPT_FILE)
WSCRIPT_FUN = WSCRIPT + '_' + (name or self.fun)
node = self.root.find_node(WSCRIPT_FUN)
if node and (not once or node not in cache):
cache[node] = True
self.pre_recurse(node)
try:
function_code = node.read('rU')
exec(compile(function_code, node.abspath(), 'exec'), self.exec_dict)
finally:
self.post_recurse(node)
elif not node:
node = self.root.find_node(WSCRIPT)
tup = (node, name or self.fun)
if node and (not once or tup not in cache):
cache[tup] = True
self.pre_recurse(node)
try:
wscript_module = load_module(node.abspath())
user_function = getattr(wscript_module, (name or self.fun), None)
if not user_function:
if not mandatory:
continue
raise Errors.WafError('No function %s defined in %s' % (name or self.fun, node.abspath()))
user_function(self)
finally:
self.post_recurse(node)
elif not node:
if not mandatory:
continue
raise Errors.WafError('No wscript file in directory %s' % d)
def exec_command(self, cmd, **kw):
"""
Execute a command and return the exit status. If the context has the attribute 'log',
capture and log the process stderr/stdout for logging purposes::
def run(tsk):
ret = tsk.generator.bld.exec_command('touch foo.txt')
return ret
Do not confuse this method with :py:meth:`waflib.Context.Context.cmd_and_log` which is used to
return the standard output/error values.
:param cmd: command argument for subprocess.Popen
:param kw: keyword arguments for subprocess.Popen
"""
subprocess = Utils.subprocess
kw['shell'] = isinstance(cmd, str)
Logs.debug('runner: %r' % cmd)
Logs.debug('runner_env: kw=%s' % kw)
try:
if self.logger:
# warning: may deadlock with a lot of output (subprocess limitation)
self.logger.info(cmd)
kw['stdout'] = kw['stderr'] = subprocess.PIPE
p = subprocess.Popen(cmd, **kw)
(out, err) = p.communicate()
if out:
self.logger.debug('out: %s' % out.decode(sys.stdout.encoding or 'iso8859-1'))
if err:
self.logger.error('err: %s' % err.decode(sys.stdout.encoding or 'iso8859-1'))
return p.returncode
else:
p = subprocess.Popen(cmd, **kw)
return p.wait()
except OSError:
return -1
def cmd_and_log(self, cmd, **kw):
"""
Execute a command and return stdout if the execution is successful.
An exception is thrown when the exit status is non-0. In that case, both stderr and stdout
will be bound to the WafError object::
def configure(conf):
out = conf.cmd_and_log(['echo', 'hello'], output=waflib.Context.STDOUT, quiet=waflib.Context.BOTH)
(out, err) = conf.cmd_and_log(['echo', 'hello'], output=waflib.Context.BOTH)
try:
conf.cmd_and_log(['which', 'someapp'], output=waflib.Context.BOTH)
except Exception as e:
print(e.stdout, e.stderr)
:param cmd: args for subprocess.Popen
:param kw: keyword arguments for subprocess.Popen
"""
subprocess = Utils.subprocess
kw['shell'] = isinstance(cmd, str)
Logs.debug('runner: %r' % cmd)
if 'quiet' in kw:
quiet = kw['quiet']
del kw['quiet']
else:
quiet = None
if 'output' in kw:
to_ret = kw['output']
del kw['output']
else:
to_ret = STDOUT
kw['stdout'] = kw['stderr'] = subprocess.PIPE
if quiet is None:
self.to_log(cmd)
try:
p = subprocess.Popen(cmd, **kw)
(out, err) = p.communicate()
except Exception as e:
raise Errors.WafError('Execution failure: %s' % str(e), ex=e)
if not isinstance(out, str):
out = out.decode(sys.stdout.encoding or 'iso8859-1')
if not isinstance(err, str):
err = err.decode(sys.stdout.encoding or 'iso8859-1')
if out and quiet != STDOUT and quiet != BOTH:
self.to_log('out: %s' % out)
if err and quiet != STDERR and quiet != BOTH:
self.to_log('err: %s' % err)
if p.returncode:
e = Errors.WafError('Command %r returned %r' % (cmd, p.returncode))
e.returncode = p.returncode
e.stderr = err
e.stdout = out
raise e
if to_ret == BOTH:
return (out, err)
elif to_ret == STDERR:
return err
return out
def fatal(self, msg, ex=None):
"""
Raise a configuration error to interrupt the execution immediately::
def configure(conf):
conf.fatal('a requirement is missing')
:param msg: message to display
:type msg: string
:param ex: optional exception object
:type ex: exception
"""
if self.logger:
self.logger.info('from %s: %s' % (self.path.abspath(), msg))
try:
msg = '%s\n(complete log in %s)' % (msg, self.logger.handlers[0].baseFilename)
except:
pass
raise self.errors.ConfigurationError(msg, ex=ex)
def to_log(self, msg):
"""
Log some information to the logger (if present), or to stderr. If the message is empty,
it is not printed::
def build(bld):
bld.to_log('starting the build')
When in doubt, override this method, or provide a logger on the context class.
:param msg: message
:type msg: string
"""
if not msg:
return
if self.logger:
self.logger.info(msg)
else:
sys.stderr.write(str(msg))
sys.stderr.flush()
def msg(self, msg, result, color=None):
"""
Print a configuration message of the form ``msg: result``.
The second part of the message will be in colors. The output
can be disabled easly by setting ``in_msg`` to a positive value::
def configure(conf):
self.in_msg = 1
conf.msg('Checking for library foo', 'ok')
# no output
:param msg: message to display to the user
:type msg: string
:param result: result to display
:type result: string or boolean
:param color: color to use, see :py:const:`waflib.Logs.colors_lst`
:type color: string
"""
self.start_msg(msg)
if not isinstance(color, str):
color = result and 'GREEN' or 'YELLOW'
self.end_msg(result, color)
def start_msg(self, msg):
"""
Print the beginning of a 'Checking for xxx' message. See :py:meth:`waflib.Context.Context.msg`
"""
try:
if self.in_msg:
self.in_msg += 1
return
except:
self.in_msg = 0
self.in_msg += 1
try:
self.line_just = max(self.line_just, len(msg))
except AttributeError:
self.line_just = max(40, len(msg))
for x in (self.line_just * '-', msg):
self.to_log(x)
Logs.pprint('NORMAL', "%s :" % msg.ljust(self.line_just), sep='')
def end_msg(self, result, color=None):
"""Print the end of a 'Checking for' message. See :py:meth:`waflib.Context.Context.msg`"""
self.in_msg -= 1
if self.in_msg:
return
defcolor = 'GREEN'
if result == True:
msg = 'ok'
elif result == False:
msg = 'not found'
defcolor = 'YELLOW'
else:
msg = str(result)
self.to_log(msg)
Logs.pprint(color or defcolor, msg)
def load_special_tools(self, var, ban=[]):
global waf_dir
lst = self.root.find_node(waf_dir).find_node('waflib/extras').ant_glob(var)
for x in lst:
if not x.name in ban:
load_tool(x.name.replace('.py', ''))
cache_modules = {}
"""
Dictionary holding already loaded modules, keyed by their absolute path.
The modules are added automatically by :py:func:`waflib.Context.load_module`
"""
def load_module(path):
"""
Load a source file as a python module.
:param path: file path
:type path: string
:return: Loaded Python module
:rtype: module
"""
try:
return cache_modules[path]
except KeyError:
pass
module = imp.new_module(WSCRIPT_FILE)
try:
code = Utils.readf(path, m='rU')
except (IOError, OSError):
raise Errors.WafError('Could not read the file %r' % path)
module_dir = os.path.dirname(path)
sys.path.insert(0, module_dir)
exec(compile(code, path, 'exec'), module.__dict__)
sys.path.remove(module_dir)
cache_modules[path] = module
return module
def load_tool(tool, tooldir=None):
"""
Import a Waf tool (python module), and store it in the dict :py:const:`waflib.Context.Context.tools`
:type tool: string
:param tool: Name of the tool
:type tooldir: list
:param tooldir: List of directories to search for the tool module
"""
tool = tool.replace('++', 'xx')
tool = tool.replace('java', 'javaw')
tool = tool.replace('compiler_cc', 'compiler_c')
if tooldir:
assert isinstance(tooldir, list)
sys.path = tooldir + sys.path
try:
__import__(tool)
ret = sys.modules[tool]
Context.tools[tool] = ret
return ret
finally:
for d in tooldir:
sys.path.remove(d)
else:
global waf_dir
try:
os.stat(os.path.join(waf_dir, 'waflib', 'extras', tool + '.py'))
d = 'waflib.extras.%s' % tool
except:
try:
os.stat(os.path.join(waf_dir, 'waflib', 'Tools', tool + '.py'))
d = 'waflib.Tools.%s' % tool
except:
d = tool # user has messed with sys.path
__import__(d)
ret = sys.modules[d]
Context.tools[tool] = ret
return ret
| Theragon/kupfer | waflib/Context.py | Python | gpl-3.0 | 16,308 | [
"VisIt"
] | b1ca13887cd82730784a3631e0b2b3d8efec906a1cdae221cbf185836e3e9673 |
"""Testing for Gaussian process regression """
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.gaussian_process.kernels import DotProduct
from sklearn.utils.testing \
import (assert_true, assert_greater, assert_array_less,
assert_almost_equal, assert_equal, assert_raise_message)
def f(x):
return x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=1.0), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2)),
C(0.1, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2))]
def test_gpr_interpolation():
# Test the interpolating property for different kernels.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_pred, y)
assert_almost_equal(np.diag(y_cov), 0.)
def test_lml_improving():
# Test that hyperparameter-tuning improves log-marginal likelihood.
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
# Test that lml of optimized kernel is stored correctly.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_equal(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood())
def test_converged_to_local_maximum():
# Test that we are in local maximum after hyperparameter-optimization.
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])))
def test_solution_inside_bounds():
# Test that hyperparameter-optimization remains in bounds#
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
bounds = gpr.kernel_.bounds
max_ = np.finfo(gpr.kernel_.theta.dtype).max
tiny = 1e-10
bounds[~np.isfinite(bounds[:, 1]), 1] = max_
assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
def test_lml_gradient():
# Compare analytic and numeric gradient of log marginal likelihood.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpr.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_prior():
# Test that GP prior has mean 0 and identical variances.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel)
y_mean, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_mean, 0, 5)
if len(gpr.kernel.theta) > 1:
# XXX: quite hacky, works only for current kernels
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
else:
assert_almost_equal(np.diag(y_cov), 1, 5)
def test_sample_statistics():
# Test that statistics of samples drawn from GP are correct.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 300000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 1)
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
np.var(samples, 1) / np.diag(y_cov).max(), 1)
def test_no_optimizer():
# Test that kernel parameters are unmodified when optimizer is None.
kernel = RBF(1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
assert_equal(np.exp(gpr.kernel_.theta), 1.0)
def test_predict_cov_vs_std():
# Test that predicted std.-dev. is consistent with cov's diagonal.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
y_mean, y_std = gpr.predict(X2, return_std=True)
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
def test_anisotropic_kernel():
# Test that GPR can identify meaningful anisotropic length-scales.
# We learn a function which varies in one dimension ten-times slower
# than in the other. The corresponding length-scales should differ by at
# least a factor 5
rng = np.random.RandomState(0)
X = rng.uniform(-1, 1, (50, 2))
y = X[:, 0] + 0.1 * X[:, 1]
kernel = RBF([1.0, 1.0])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(np.exp(gpr.kernel_.theta[1]),
np.exp(gpr.kernel_.theta[0]) * 5)
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the log marginal likelihood of the chosen theta.
n_samples, n_features = 25, 2
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) \
+ rng.normal(scale=0.1, size=n_samples)
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1.0] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessRegressor(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0,).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_y_normalization():
# Test normalization of the target values in GP
# Fitting non-normalizing GP on normalized y and fitting normalizing GP
# on unnormalized y should yield identical results
y_mean = y.mean(0)
y_norm = y - y_mean
for kernel in kernels:
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_mean + y_pred
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
def test_y_multioutput():
# Test that GPR can deal with multi-dimensional target values
y_2d = np.vstack((y, y * 2)).T
# Test for fixed kernel that first dimension of 2d GP equals the output
# of 1d GP and that second dimension is twice as large
kernel = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr_2d.fit(X, y_2d)
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
_, y_cov_1d = gpr.predict(X2, return_cov=True)
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
# Standard deviation and covariance do not depend on output
assert_almost_equal(y_std_1d, y_std_2d)
assert_almost_equal(y_cov_1d, y_cov_2d)
y_sample_1d = gpr.sample_y(X2, n_samples=10)
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0])
# Test hyperparameter optimization
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_2d.fit(X, np.vstack((y, y)).T)
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
def test_custom_optimizer():
# Test that GPR can use externally defined optimizers.
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
gpr.fit(X, y)
# Checks that optimizer improved marginal likelihood
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(gpr.kernel.theta))
def test_gpr_correct_error_message():
X = np.arange(12).reshape(6, -1)
y = np.ones(6)
kernel = DotProduct()
gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0)
assert_raise_message(np.linalg.LinAlgError,
"The kernel, %s, is not returning a "
"positive definite matrix. Try gradually increasing "
"the 'alpha' parameter of your "
"GaussianProcessRegressor estimator."
% kernel, gpr.fit, X, y)
def test_duplicate_input():
# Test GPR can handle two different output-values for the same input.
for kernel in kernels:
gpr_equal_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
gpr_similar_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
X_ = np.vstack((X, X[0]))
y_ = np.hstack((y, y[0] + 1))
gpr_equal_inputs.fit(X_, y_)
X_ = np.vstack((X, X[0] + 1e-15))
y_ = np.hstack((y, y[0] + 1))
gpr_similar_inputs.fit(X_, y_)
X_test = np.linspace(0, 10, 100)[:, None]
y_pred_equal, y_std_equal = \
gpr_equal_inputs.predict(X_test, return_std=True)
y_pred_similar, y_std_similar = \
gpr_similar_inputs.predict(X_test, return_std=True)
assert_almost_equal(y_pred_equal, y_pred_similar)
assert_almost_equal(y_std_equal, y_std_similar)
| Titan-C/scikit-learn | sklearn/gaussian_process/tests/test_gpr.py | Python | bsd-3-clause | 12,435 | [
"Gaussian"
] | 6c9a6fefd33e54244a6e893ee41216e71dcb5cae7b7c1b990951107d65f6f873 |
# Authors: Jonathan Anchell, Ram Balachandran.
# Center for Nanophase Materials Sciences, Oak Ridge National Laboratory, Oak Ridge, TN.
import sys
import math
from operator import itemgetter
import numpy as np
import pymatgen as mg
import copy
from collections import namedtuple
from enum import Enum
class densityOfStates:
#create a constructor with the structure information (pymatgen structure).
# Everything needs to be reimplemented by importing phonopy into python
def total_dos(self, tdosFileName, energyRange):
temp_list = []
with open(tdosFileName, 'r') as f:
next(f)
for line in f:
val = [float(i) for i in line.split()]
temp_list.append(val)
total_dos_list = []
for cnt, val in enumerate(temp_list):
if (energyRange[0]<=val[0]<=energyRange[1]):
total_dos_list.append(val)
return np.asarray(total_dos_list)
# pass which types of atom we are interested. The index in turn can be obtained from the structure created in constructor
def partial_dos(self, pdosFileName, atomIndexList, energyRange):
temp_list = []
energy_list = []
with open(pdosFileName, 'r') as f:
next(f)
for line in f:
lineVal = line.split()
energyVal = float(lineVal[0])
energy_list.append(energyVal)
pdosVal = [float(i) for i in lineVal[1:]]
temp_list.append(pdosVal)
partial_dos_list=[]
for cnt, val in enumerate(energy_list):
pdosVal = 0
if (energyRange[0]<= val <= energyRange[1]):
for index in atomIndexList:
try:
pdosVal += temp_list[cnt][index]
except IndexError:
print 'Index does not exist'
partial_dos_list.append([val,pdosVal])
return np.asarray(partial_dos_list)
| ornl-abinitio/codesScripts | ramScripts/phonopyPostProcess.py | Python | lgpl-2.1 | 2,007 | [
"phonopy",
"pymatgen"
] | 47a2c8d4dd365ec4deee485103c677182dd462b70b86cf5711d115a891fd4c00 |
#!/usr/bin/env python
# (C) Copyright 2018 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
import cdsapi
c = cdsapi.Client(debug=True)
r = c.retrieve(
"reanalysis-era5-single-levels",
{
"variable": "2t",
"product_type": "reanalysis",
"date": "2012-12-01",
"time": "14:00",
"format": "netcdf",
},
)
r.download("test.nc")
| Kate-Willett/Climate_Explorer | PYTHON/cdsapi-0.1.4/example-era5.py | Python | cc0-1.0 | 680 | [
"NetCDF"
] | d61e1633fd153c3e2f96465dc61e6bf602c38b8ccc59dc320e9b7b0346c9208c |
########################################################################
# $HeadURL$
# File : dirac-proxy-init.py
# Author : Adrian Casajus
########################################################################
__RCSID__ = "$Id$"
import sys
import getpass
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.NTP import getClockDeviation
class CLIParams:
proxyLifeTime = 86400
diracGroup = False
proxyStrength = 1024
limitedProxy = False
strict = False
summary = False
certLoc = False
keyLoc = False
proxyLoc = False
checkWithCS = True
stdinPasswd = False
userPasswd = ""
checkClock = True
embedDefaultGroup = True
def setProxyLifeTime( self, arg ):
try:
fields = [ f.strip() for f in arg.split( ":" ) ]
self.proxyLifeTime = int( fields[0] ) * 3600 + int( fields[1] ) * 60
except:
gLogger.error( "Can't parse %s time! Is it a HH:MM?" % arg )
return S_ERROR( "Can't parse time argument" )
return S_OK()
def setProxyRemainingSecs( self, arg ):
self.proxyLifeTime = int( arg )
return S_OK()
def getProxyLifeTime( self ):
hours = self.proxyLifeTime / 3600
mins = self.proxyLifeTime / 60 - hours * 60
return "%s:%s" % ( hours, mins )
def getProxyRemainingSecs( self ):
return self.proxyLifeTime
def setDIRACGroup( self, arg ):
self.diracGroup = arg
return S_OK()
def getDIRACGroup( self ):
return self.diracGroup
def setProxyStrength( self, arg ):
try:
self.proxyStrength = int( arg )
except:
gLogger.error( "Can't parse %s bits! Is it a number?" % arg )
return S_ERROR( "Can't parse strength argument" )
return S_OK()
def setProxyLimited( self, arg ):
self.limitedProxy = True
return S_OK()
def setSummary( self, arg ):
gLogger.info( "Enabling summary output" )
self.summary = True
return S_OK()
def setCertLocation( self, arg ):
self.certLoc = arg
return S_OK()
def setKeyLocation( self, arg ):
self.keyLoc = arg
return S_OK()
def setProxyLocation( self, arg ):
self.proxyLoc = arg
return S_OK()
def setDisableCSCheck( self, arg ):
self.checkWithCS = False
return S_OK()
def setStdinPasswd( self, arg ):
self.stdinPasswd = True
return S_OK()
def setStrict( self, arg ):
self.strict = True
return S_OK()
def showVersion( self, arg ):
gLogger.always( "Version: %s" % __RCSID__ )
sys.exit( 0 )
return S_OK()
def disableClockCheck( self, arg ):
self.checkClock = False
return S_OK()
def registerCLISwitches( self ):
Script.registerSwitch( "v:", "valid=", "Valid HH:MM for the proxy. By default is 24 hours", self.setProxyLifeTime )
Script.registerSwitch( "g:", "group=", "DIRAC Group to embed in the proxy", self.setDIRACGroup )
Script.registerSwitch( "b:", "strength=", "Set the proxy strength in bytes", self.setProxyStrength )
Script.registerSwitch( "l", "limited", "Generate a limited proxy", self.setProxyLimited )
Script.registerSwitch( "t", "strict", "Fail on each error. Treat warnings as errors.", self.setStrict )
Script.registerSwitch( "S", "summary", "Enable summary output when generating proxy", self.setSummary )
Script.registerSwitch( "C:", "Cert=", "File to use as user certificate", self.setCertLocation )
Script.registerSwitch( "K:", "Key=", "File to use as user key", self.setKeyLocation )
Script.registerSwitch( "u:", "out=", "File to write as proxy", self.setProxyLocation )
Script.registerSwitch( "x", "nocs", "Disable CS check", self.setDisableCSCheck )
Script.registerSwitch( "p", "pwstdin", "Get passwd from stdin", self.setStdinPasswd )
Script.registerSwitch( "i", "version", "Print version", self.showVersion )
Script.registerSwitch( "j", "noclockcheck", "Disable checking if time is ok", self.disableClockCheck )
from DIRAC.Core.Security.X509Chain import X509Chain
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.Core.Security import Locations
def generateProxy( params ):
if params.checkClock:
result = getClockDeviation()
if result[ 'OK' ]:
deviation = result[ 'Value' ]
if deviation > 600:
gLogger.error( "Your host clock seems to be off by more than TEN MINUTES! Thats really bad." )
gLogger.error( "We're cowardly refusing to generate a proxy. Please fix your system time" )
sys.exit( 1 )
elif deviation > 180:
gLogger.error( "Your host clock seems to be off by more than THREE minutes! Thats bad." )
gLogger.notice( "We'll generate the proxy but please fix your system time" )
elif deviation > 60:
gLogger.error( "Your host clock seems to be off by more than a minute! Thats not good." )
gLogger.notice( "We'll generate the proxy but please fix your system time" )
certLoc = params.certLoc
keyLoc = params.keyLoc
if not certLoc or not keyLoc:
cakLoc = Locations.getCertificateAndKeyLocation()
if not cakLoc:
return S_ERROR( "Can't find user certificate and key" )
if not certLoc:
certLoc = cakLoc[0]
if not keyLoc:
keyLoc = cakLoc[1]
params.certLoc = certLoc
params.keyLoc = keyLoc
#Load password
testChain = X509Chain()
retVal = testChain.loadChainFromFile( params.certLoc )
if not retVal[ 'OK' ]:
return S_ERROR( "Cannot load certificate %s: %s" % ( params.certLoc, retVal[ 'Message' ] ) )
timeLeft = testChain.getRemainingSecs()[ 'Value' ] / 86400
if timeLeft < 30:
gLogger.notice( "\nYour certificate will expire in %d days. Please renew it!\n" % timeLeft )
retVal = testChain.loadKeyFromFile( params.keyLoc, password = params.userPasswd )
if not retVal[ 'OK' ]:
passwdPrompt = "Enter Certificate password:"
if params.stdinPasswd:
userPasswd = sys.stdin.readline().strip( "\n" )
else:
userPasswd = getpass.getpass( passwdPrompt )
params.userPasswd = userPasswd
#Find location
proxyLoc = params.proxyLoc
if not proxyLoc:
proxyLoc = Locations.getDefaultProxyLocation()
chain = X509Chain()
#Load user cert and key
retVal = chain.loadChainFromFile( certLoc )
if not retVal[ 'OK' ]:
gLogger.warn( retVal[ 'Message' ] )
return S_ERROR( "Can't load %s" % certLoc )
retVal = chain.loadKeyFromFile( keyLoc, password = params.userPasswd )
if not retVal[ 'OK' ]:
gLogger.warn( retVal[ 'Message' ] )
if 'bad decrypt' in retVal[ 'Message' ]:
return S_ERROR( "Bad passphrase" )
return S_ERROR( "Can't load %s" % keyLoc )
if params.checkWithCS:
retVal = chain.generateProxyToFile( proxyLoc,
params.proxyLifeTime,
strength = params.proxyStrength,
limited = params.limitedProxy )
gLogger.info( "Contacting CS..." )
retVal = Script.enableCS()
if not retVal[ 'OK' ]:
gLogger.warn( retVal[ 'Message' ] )
if 'Unauthorized query' in retVal[ 'Message' ]:
# add hint for users
return S_ERROR( "Can't contact DIRAC CS: %s (User possibly not registered with dirac server) "
% retVal[ 'Message' ] )
return S_ERROR( "Can't contact DIRAC CS: %s" % retVal[ 'Message' ] )
userDN = chain.getCertInChain( -1 )['Value'].getSubjectDN()['Value']
if not params.diracGroup:
result = Registry.findDefaultGroupForDN( userDN )
if not result[ 'OK' ]:
gLogger.warn( "Could not get a default group for DN %s: %s" % ( userDN, result[ 'Message' ] ) )
else:
params.diracGroup = result[ 'Value' ]
gLogger.info( "Default discovered group is %s" % params.diracGroup )
gLogger.info( "Checking DN %s" % userDN )
retVal = Registry.getUsernameForDN( userDN )
if not retVal[ 'OK' ]:
gLogger.warn( retVal[ 'Message' ] )
return S_ERROR( "DN %s is not registered" % userDN )
username = retVal[ 'Value' ]
gLogger.info( "Username is %s" % username )
retVal = Registry.getGroupsForUser( username )
if not retVal[ 'OK' ]:
gLogger.warn( retVal[ 'Message' ] )
return S_ERROR( "User %s has no groups defined" % username )
groups = retVal[ 'Value' ]
if params.diracGroup not in groups:
return S_ERROR( "Requested group %s is not valid for DN %s" % ( params.diracGroup, userDN ) )
gLogger.info( "Creating proxy for %s@%s (%s)" % ( username, params.diracGroup, userDN ) )
if params.summary:
h = int( params.proxyLifeTime / 3600 )
m = int( params.proxyLifeTime / 60 ) - h * 60
gLogger.notice( "Proxy lifetime will be %02d:%02d" % ( h, m ) )
gLogger.notice( "User cert is %s" % certLoc )
gLogger.notice( "User key is %s" % keyLoc )
gLogger.notice( "Proxy will be written to %s" % proxyLoc )
if params.diracGroup:
gLogger.notice( "DIRAC Group will be set to %s" % params.diracGroup )
else:
gLogger.notice( "No DIRAC Group will be set" )
gLogger.notice( "Proxy strength will be %s" % params.proxyStrength )
if params.limitedProxy:
gLogger.notice( "Proxy will be limited" )
retVal = chain.generateProxyToFile( proxyLoc,
params.proxyLifeTime,
params.diracGroup,
strength = params.proxyStrength,
limited = params.limitedProxy )
if not retVal[ 'OK' ]:
gLogger.warn( retVal[ 'Message' ] )
return S_ERROR( "Couldn't generate proxy: %s" % retVal[ 'Message' ] )
return S_OK( proxyLoc )
| rajanandakumar/DIRAC | FrameworkSystem/Client/ProxyGeneration.py | Python | gpl-3.0 | 9,636 | [
"DIRAC"
] | 0d9757437c6c345b4b7a49b4f513cfa9bab3b43fb63c6388735b9aed31fdd9fb |
from ase import Atoms
from gpaw import GPAW, PW
from gpaw.response.df import DielectricFunction
from gpaw.test import equal, findpeak
# Comparing the plasmon peaks found in bulk sodium for two different
# atomic structures. Testing for idential plasmon peaks. Not using
# physical sodium cell.
a = 4.23 / 2.0
a1 = Atoms('Na',
scaled_positions=[[0, 0, 0]],
cell=(a, a, a),
pbc=True)
# Expanding along x-direction
a2 = Atoms('Na2',
scaled_positions=[[0, 0, 0], [0.5, 0, 0]],
cell=(2 * a, a, a),
pbc=True)
a1.calc = GPAW(gpts=(10, 10, 10),
mode=PW(300),
kpts={'size': (8, 8, 8), 'gamma': True},
parallel={'band': 1},
txt='small.txt')
# Kpoint sampling should be halved in the expanded direction.
a2.calc = GPAW(gpts=(20, 10, 10),
mode=PW(300),
kpts={'size': (4, 8, 8), 'gamma': True},
parallel={'band': 1},
txt='large.txt')
a1.get_potential_energy()
a2.get_potential_energy()
# Use twice as many bands for expanded structure
a1.calc.diagonalize_full_hamiltonian(nbands=20)
a2.calc.diagonalize_full_hamiltonian(nbands=40)
a1.calc.write('gs_Na_small.gpw', 'all')
a2.calc.write('gs_Na_large.gpw', 'all')
# Calculate the dielectric functions
df1 = DielectricFunction('gs_Na_small.gpw',
omegamax=15,
domega0=0.05,
hilbert=True,
ecut=150)
df1NLFCx, df1LFCx = df1.get_dielectric_function(direction='x')
df1NLFCy, df1LFCy = df1.get_dielectric_function(direction='y')
df1NLFCz, df1LFCz = df1.get_dielectric_function(direction='z')
df2 = DielectricFunction('gs_Na_large.gpw',
omegamax=15,
domega0=0.05,
hilbert=True,
ecut=150)
df2NLFCx, df2LFCx = df2.get_dielectric_function(direction='x')
df2NLFCy, df2LFCy = df2.get_dielectric_function(direction='y')
df2NLFCz, df2LFCz = df2.get_dielectric_function(direction='z')
# Compare plasmon frequencies and intensities
w_w = df1.chi0.omega_w
w1, I1 = findpeak(w_w, -(1. / df1LFCx).imag)
w2, I2 = findpeak(w_w, -(1. / df2LFCx).imag)
equal(w1, w2, 1e-2)
equal(I1, I2, 1e-3)
w1, I1 = findpeak(w_w, -(1. / df1LFCy).imag)
w2, I2 = findpeak(w_w, -(1. / df2LFCy).imag)
equal(w1, w2, 1e-2)
equal(I1, I2, 1e-3)
w1, I1 = findpeak(w_w, -(1. / df1LFCz).imag)
w2, I2 = findpeak(w_w, -(1. / df2LFCz).imag)
equal(w1, w2, 1e-2)
equal(I1, I2, 1e-3)
| robwarm/gpaw-symm | gpaw/test/response_na_plasmon.py | Python | gpl-3.0 | 2,563 | [
"ASE",
"GPAW"
] | d584b5b3293879b127ddff394b3093de1afd43c8a8833143b67bc44b7be92b4c |
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
from tinctest.lib import run_shell_command
from mpp.models import SQLPerformanceTestCase
from mpp.models.sql_tc import _SQLTestCaseResult
from mpp.lib.PSQL import PSQL
import unittest2 as unittest
import fnmatch
import hashlib
import os
import socket
import shutil
import sys
from xml.dom import minidom
_DEFAULT_LOOKUP_FILE = os.path.join(os.environ["TINCHOME"], 'function_owners.csv')
@tinctest.skipLoading("Test model. No tests loaded.")
class OptimizerSQLPerformanceTestCase(SQLPerformanceTestCase):
"""
Inherits from SQLPerformanceTestCase and runs a performance test with additional optimizer gucs
"""
def __init__(self, methodName, baseline_result = None, sql_file = None, db_name = None):
super(OptimizerSQLPerformanceTestCase, self).__init__(methodName, baseline_result, sql_file, db_name)
self.gucs.add('optimizer=on')
self.gucs.add('optimizer_log=on')
def _add_gucs_to_sql_file(self, sql_file, gucs_sql_file=None, optimizer=None):
"""
Form test sql file by adding the defined gucs to the sql file
@param sql_file Path to the test sql file
@returns Path to the modified sql file
"""
ignore_gucs = False
if not gucs_sql_file:
gucs_sql_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file))
if 'setup.sql' in gucs_sql_file or 'teardown.sql' in gucs_sql_file:
shutil.copyfile(sql_file, gucs_sql_file)
return gucs_sql_file
# gucs_sql_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '_gucs.sql'))
with open(gucs_sql_file, 'w') as o:
with open(sql_file, 'r') as f:
# We make a dirty assumption that there is only one valid query block
# in the sql file as in many other places. We have to re-visit this
# when we tackle handling of multiple queries in a SQL file
query_string = ''
for line in f:
if (line.find('--') != 0):
query_string += line
f.seek(0)
for line in f:
if (line.find('--') != 0):
break
else:
o.write(line)
o.write('\n\n-- start_ignore\n')
# Add gucs and then add the line
for guc_string in self.gucs:
o.write("SET %s;\n" %guc_string)
for orca_guc_string in self.orcagucs:
o.write("%s;\n"%orca_guc_string)
# Add explain of query to load optimizer libraries and md cache- QAINF-418
# Note - Assuming just one valid query block
# o.write("select disable_xform('CXformInnerJoin2IndexApply');\n")
o.write('--Add explain of query to load optimizer libraries\n')
o.write('EXPLAIN \n %s\n\n' %query_string.strip())
o.write('\\timing on\n')
o.write('-- end_ignore\n\n')
for line in f:
o.write(line)
self.test_artifacts.append(gucs_sql_file)
return gucs_sql_file
#self.gucs.add('optimizer_damping_factor_join = 1')
class OptStacktrace(object):
"""
Given a minidump or a text containing stack trace, this parses the stacktrace element from the mini-dump / text and parses the stack trace into an object.
"""
def __init__(self):
"""
Initialize parser.
"""
self.binary = 'postgres'
self.threads = []
self.text = ''
@classmethod
def parse(cls, type, dxl = None, text = None):
"""
Parse stack trace from a minidump or a text and return a
OptStacktrace object
@param type - 'dxl' or 'text' specifying where to look for a stack trace
@param dxl - location of the dxl file
@param text - string containing a stack trace
"""
if type != 'dxl' and type != 'text':
tinctest.logger.warning("Unknown source type %s. Returning no stack." %(type))
return None
if type == 'dxl':
return cls._parse_dxl_for_stack_trace(dxl)
if type == 'text':
return cls._parse_text_for_stack_trace(text)
@classmethod
def _parse_dxl_for_stack_trace(cls, dxl_file):
if dxl_file == None or dxl_file == '':
tinctest.logger.warning("No dxl specified. Returning no stack.")
return None
# Check if dxl exists
if not os.path.exists(dxl_file):
tinctest.logger.warning("Dxl file %s not found. Returning no stack" %(dxl_file))
return None
# parse dxl to find string between <dxl:Stacktrace> and </dxl:StackTrace>
dxl_dom = minidom.parse(dxl_file)
thread_elements = dxl_dom.getElementsByTagName("dxl:Thread")
if len(thread_elements) == 0:
tinctest.logger.warning("No threads found. Returning no stack")
opt_stack = OptStacktrace()
# find thread id
for thread in thread_elements:
# Look for an attribute "Id"
thread_number = int(thread.getAttribute("Id"))
thread_stack = ''
for stack_node in thread.childNodes:
if stack_node.nodeName == 'dxl:Stacktrace':
thread_stack = stack_node.firstChild.data
opt_stack.text += thread_stack
opt_stack.threads.append(OptStacktraceThread.parse(thread_stack, thread_number))
return opt_stack
def get_thread(self, number):
"""
Given a thread number, returns the corresponding OptSTacktraceThread object
"""
for thread in self.threads:
if thread.number == number:
return thread
return None
def __str__(self):
return self.text
class OptStacktraceThread(object):
"""
Class representing one thread of a stack trace. Contains a list of OptStackFrame objects.
"""
def __init__(self):
self.frames = []
self.number = 0
self.text = ''
self.first_opt_stack_frame = None
@classmethod
def parse(cls, text, number):
"""
Parse a single thread's stack trace text and returns an OptStacktraceThread object
"""
thread = OptStacktraceThread()
thread.number = number
thread.text = text
# Get every line in the text and form a frame object
for line in text.splitlines():
thread.frames.append(OptStackFrame.parse(line))
return thread
def get_first_relevant_frame(self):
"""
Returns the first relevant frame in the stack trace. Relevance
here means the first non-gpos stack frame. We should refine this
when we encounter special cases. For the following stack:
1 0x000000000132f465 gpos::CException::Raise + 165
2 0x0000000001b9f148 gpdxl::CDXLUtils::PphdxlParseDXLFile + 888
3 0x000000000035450d COptTasks::PdrgPssLoad + 61,
this should return the second frame
"""
ret = False
for frame in self.frames:
if ret == True:
return frame
if 'gpos' in frame.function:
ret = True
# This means there was no frame with gpos functions and we return None
return None
def hash(self, number_of_frames):
"""
Return a hash of the top 'number_of_frames' of the stack trace.
"""
if len(self.frames) < number_of_frames:
number_of_frames = len(self.frames)
m = hashlib.md5()
for i in xrange(number_of_frames):
m.update(self.frames[i].text)
return m.hexdigest()
def __str__(self):
return self.text
class OptStackFrame(object):
"""
Single stack frame element representing a single function call in a stack.
Each frame is assumed to be of the following format:
1 0x000000000132f465 gpos::CException::Raise + 165
"""
def __init__(self):
self.function = None
self.number = -1
self.address = None
self.file = None
self.line = -1
self.text = None
self._owner_cache = {}
@classmethod
def parse(cls, text):
"""
Given a single line of stack trace, parses the string and returns an OptStackFrame object
"""
frame = OptStackFrame()
frame.text = text
frame_elements = text.split()
# Assuming the following format
# "1 0x000000000132f465 gpos::CException::Raise + 165""
# TODO - Check if we will have other formats
frame.function = frame_elements[2]
frame.number = int(frame_elements[0])
frame.address = frame_elements[1]
frame.line = int(frame_elements[4])
return frame
def __str__(self):
return self.text
def get_owner(self, lookup_file = _DEFAULT_LOOKUP_FILE):
"""
By default, find the owner from a lookup file at function_owners.csv in $GPHOME/bin
"""
if self.function in self._owner_cache:
return self._owner_cache[self.function]
if not os.path.exists(lookup_file):
tinctest.logger.warning("Lookup file does not exist - " + lookup_file)
return ''
with open(lookup_file, 'r') as f:
for line in f:
fields = line.split(',')
owner = fields[2].strip()
function = fields[1].strip()
# Note that we also add the default namespace 'gpopt::' while looking up function_owners.csv
# because complexity.csv does not include namespace for functions in .cpp files.
if self.function == function or self.function == 'gpopt::' + function or self.function == 'gpdxl::' + function:
self._owner_cache[self.function] = owner
return owner.strip()
tinctest.logger.warning("Did not find function %s in the lookup file %s " %(self.function, lookup_file))
return ''
class OptimizerTestResult(_SQLTestCaseResult):
"""
A listener for OptimizerSQLTestCase that will collect mini dumps when a test case fails
"""
def addFailure(self, test, err):
"""
Collect mini dumps for test queries during a failure
"""
dxl_file = test._collect_mini_dump()
super(OptimizerTestResult, self).addFailure(test, err)
def _get_stack_info(self, dxl_file, stack_frames):
stack = OptStacktrace.parse(type = 'dxl', dxl = dxl_file)
stack_hash = ''
stack_trace = ''
stack_owner = ''
if stack is not None:
stack_hash = stack.get_thread(0).hash(stack_frames)
stack_trace = stack.get_thread(0).text
stack_owner = stack.get_thread(0).get_first_relevant_frame().get_owner()
return (stack_trace, stack_hash, stack_owner)
| CraigHarris/gpdb | src/test/tinc/tincrepo/mpp/models/optimizer_sql_performance_tc.py | Python | apache-2.0 | 11,760 | [
"VisIt"
] | 4f9386acb50268fab12fe802cda875d52472916e45566a52548cdefa29ea5743 |
from abc import ABCMeta, abstractmethod
from math import fsum
import numpy as np
from scipy.stats import norm as norm_dist
from collections import namedtuple
from pyx import _hmmc
from hmm.multivariatenormal import MultivariateNormal, MixtureModel
__author__ = 'eranroz'
class HMMModel(object):
"""
base model for HMM
"""
__metaclass__ = ABCMeta
def __init__(self, state_transition, emission, min_alpha=None):
"""
Initializes a new HMM model.
@param state_transition: state transition matrix.
(A & Pi in Rabiner's paper)
with rows - source state, cols - target state.
0 state assumed to be the begin state
(according to Durbin's book)
@param emission: observation symbol probability distribution
(B in Rabiner's paper)
rows - states, cols - output
The begin state should have emission too (doesn't matter what)
@param min_alpha: prior on the sequence length (transition to itself)
"""
self.state_transition = state_transition
self.emission = emission
self.min_alpha = min_alpha
def num_states(self):
"""
Get number of states in the model
"""
return self.state_transition.shape[0]
def num_alphabet(self):
"""
Get number of symbols in the alphabet
"""
return self.emission.shape[1]
def get_state_transition(self):
"""
State transition matrix: rows - source state, cols - target state
"""
return self.state_transition
def get_emission(self):
"""
Emission matrix: rows states, cols - symbols
"""
return self.emission
@abstractmethod
def _maximize_emission(self, seq, gammas):
"""
part of the maximization step of the EM algorithm (Baum-Welsh)
should update the emission probabilities according to the forward-backward results
@param seq symbol sequence
@param gammas from backward forward
"""
pass
def collect_stats(self, seq, bf_output):
"""
Collect statistics based on the expectations, for later use in the maximization step
@param seq: observed sequence
@param bf_output: backward forward output
@return: statistics on transitions, statistics on emission
"""
transition_stats = self._collect_transition_stats(seq, bf_output)
emission_stats = self._collect_emission_stats(seq, bf_output.state_p)
return transition_stats, emission_stats
def maximize_using_stats(self, transition_stats, emission_stats):
"""
Use collected statistics to maximize the model.
@param transition_stats: statistics of the transitions
@param emission_stats: statistics on emission
"""
self._maximize_transition_stats(transition_stats)
self._maximize_emission_stats(emission_stats)
@abstractmethod
def _collect_emission_stats(self, seq, gammas):
"""
collects statistics from backward forward iteration (without normalization) about emission
@param seq: observation sequence
@param gammas: matrix of states to probabilities
@return:
"""
pass
def _maximize_transition_stats(self, transition_stats, prob_w):
# == maximize transition ==
prob_w = np.ones(prob_w.shape[0]) # replace prob_w with ones - all are equal (it is not normalized)
transition_stats = np.average(transition_stats, 0, prob_w)
transition_stats[1:, 1:] /= np.sum(transition_stats[1:, 1:], 1)[:, None] # normalize
if self.min_alpha is not None:
n_states = transition_stats.shape[0] - 1 # minus begin state
diagonal_selector = np.eye(n_states, dtype='bool')
self_transitions = transition_stats[1:, 1:][diagonal_selector]
n_self_transitions = np.maximum(self.min_alpha, self_transitions)
# reduce the diff from the rest of transitions equally
transition_stats[1:, 1:][~diagonal_selector] -= (n_self_transitions - self_transitions) / (n_states - 1)
transition_stats[1:, 1:][diagonal_selector] = n_self_transitions
# start transition
transition_stats[0, 1:] /= np.sum(transition_stats[0, 1:])
# end transition
transition_stats[1:, 0] /= np.sum(transition_stats[1:, 0])
# update transition matrix
self.state_transition = transition_stats
@abstractmethod
def _maximize_emission_stats(self, emission_stats, prob_w):
"""
collects statistics from backward forward iteration (without normalization) about emission
@param emission_stats: statistics for emission of each state
@return:
"""
pass
def _collect_transition_stats(self, seq, bf_output):
"""
collects statistics from backward forward iteration (without normalization) about transitions
@param seq: observation sequence
@param bf_output: output of backward forward iteration
@return:
"""
# collect statistics for the transition matrix
new_state_transition = self.state_transition.copy()
emission = self.get_emission()
back_emission_seq = emission[1:, seq].T
back_emission_seq *= bf_output.backward / bf_output.scales[:, None]
new_state_transition[1:, 1:] *= np.dot(bf_output.forward[:-1, :].transpose(), back_emission_seq[1:, :])
# we avoid normalization in the collection phase:
# new_state_transition[1:, 1:] /= np.sum(new_state_transition[1:, 1:], 1)[:, None]
# start transition
# new_state_transition[0, 1:] = bf_output.state_p[0, :]
new_state_transition[0, 1:] *= back_emission_seq[0, :]
new_state_transition[0, 1:] /= np.sum(new_state_transition[0, 1:])
# end transition
new_state_transition[1:, 0] = bf_output.forward[-1, :] * back_emission_seq[-1, :] / bf_output.scales[-1]
# we normalize it although it is collect stats because there must be one
new_state_transition[1:, 0] /= np.sum(new_state_transition[1:, 0])
return new_state_transition
def _maximize_transition(self, seq, bf_output):
"""
part of the maximization step of the EM algorithm (Baum-Welsh)
should state transition probabilities according to the forward-backward results
@param seq: observation sequence
@param bf_output: output of the forward-backward algorithm
@return:
"""
new_state_transition = self.state_transition.copy()
emission = self.get_emission()
back_emission_seq = emission[1:, seq].T
back_emission_seq *= bf_output.backward / bf_output.scales[:, None]
new_state_transition[1:, 1:] *= np.dot(bf_output.forward[:-1, :].transpose(), back_emission_seq[1:, :])
new_state_transition[1:, 1:] /= np.sum(new_state_transition[1:, 1:], 1)[:, None] # normalize
if self.min_alpha is not None:
n_states = new_state_transition.shape[0] - 1 # minus begin state
diagonal_selector = np.eye(n_states, dtype='bool')
self_transitions = new_state_transition[1:, 1:][diagonal_selector]
n_self_transitions = np.maximum(self.min_alpha, self_transitions)
# reduce the diff from the rest of transitions equally
new_state_transition[1:, 1:][~diagonal_selector] -= (n_self_transitions - self_transitions) / (n_states - 1)
new_state_transition[1:, 1:][diagonal_selector] = n_self_transitions
# start transition
new_state_transition[0, 1:] = bf_output.state_p[0, :]
# new_state_transition[0, 1:] *= back_emission_seq[0, 1:]
new_state_transition[0, 1:] /= np.sum(new_state_transition[0, 1:])
# end transition
new_state_transition[1:, 0] = bf_output.forward[-1, :] * back_emission_seq[-1, :] / bf_output.scales[-1]
new_state_transition[1:, 0] /= np.sum(new_state_transition[1:, 0])
# update transition matrix
self.state_transition = new_state_transition
def maximize(self, seq, bw_output):
"""
Maximization step for in Baum-Welsh algorithm (EM)
@param seq symbol sequence
@param bw_output results of backward forward (scaling version)
"""
self._maximize_transition(seq, bw_output)
self._maximize_emission(seq, bw_output.state_p)
def __str__(self):
return '\n'.join(
['Model parameters:', 'Emission:', str(self.emission), 'State transition:',
str(self.state_transition)])
def viterbi(self, symbol_seq):
"""
Find the most probable path through the model
Dynamic programming algorithm for decoding the states.
Implementation according to Durbin, Biological sequence analysis [p. 57]
@param symbol_seq: observed sequence (array). Should be numerical (same size as defined in model)
"""
n_states = self.num_states()
unique_values = set(symbol_seq)
emission_seq = np.zeros((len(symbol_seq), n_states - 1))
for v in unique_values:
emission_seq[symbol_seq == v, :] = np.log(self.get_emission()[1:, v])
return _hmmc.viterbi(emission_seq, self.state_transition)
def viterbi_old(self, symbol_seq):
"""
Find the most probable path through the model
Same as above, but not optimized. just for simplification or if you got to trouble with compilation
@param symbol_seq: observed sequence (array). Should be numerical (same size as defined in model)
"""
n_states = self.num_states()
unique_values = set(symbol_seq)
emission_seq = np.zeros((len(symbol_seq), n_states - 1))
for v in unique_values:
emission_seq[symbol_seq == v, :] = np.log(self.get_emission()[1:, v])
ptr_mat = np.zeros((len(symbol_seq), n_states - 1))
l_state_trans_mat_T = self.get_state_transition().T
emission_iterator = iter(emission_seq)
ptr_iterator = iter(ptr_mat)
# initial condition is begin state
prev = next(emission_iterator) + 1 + np.log(l_state_trans_mat_T[1:, 0])
next(ptr_iterator)[...] = np.argmax(prev)
end_state = 0 # termination step
end_transition = np.log(l_state_trans_mat_T[end_state, 1:])
l_state_trans_mat_T = np.log(l_state_trans_mat_T[1:, 1:])
# recursion step
for emission_symbol in emission_iterator:
p_state_transition = prev + l_state_trans_mat_T
max_k = np.max(p_state_transition, 1)
next(ptr_iterator)[...] = np.argmax(p_state_transition, 1)
prev = emission_symbol + max_k
p_state_transition = prev + end_transition
# traceback step and without begin state
most_probable_path = np.zeros(len(symbol_seq), int)
most_probable_path[-1] = np.argmax(p_state_transition)
for i in np.arange(len(symbol_seq) - 1, 0, -1):
most_probable_path[i - 1] = ptr_mat[i, most_probable_path[i]]
return most_probable_path
def forward_backward(self, symbol_seq, model_end_state=False, num_stable=False):
"""
Calculates the probability for the model and each step in it
@param symbol_seq: observed sequence (array). Should be numerical (same size as defined in model)
@param model_end_state: whether to consider end state or not
@param num_stable: whether to handle numerical stability by changing inf to max number (may cause slowness)
Remarks:
this implementation uses scaling variant to overcome floating points errors.
"""
n_states = self.num_states()
emission = self.get_emission()
emission_seq = emission[1:, symbol_seq].T
state_trans_mat = self.get_state_transition()
dot = np.dot # shortcut for performance
real_transitions_T = state_trans_mat[1:, 1:].T.copy(order='C')
real_transitions_T2 = state_trans_mat[1:, 1:].copy(order='C')
# emission * transition
e_trans = iter(emission_seq[1:, :, np.newaxis] * real_transitions_T)
forward_iterator = np.nditer([emission_seq, np.newaxis, np.newaxis],
flags=['external_loop', 'reduce_ok'],
op_flags=[
['readonly'],
['readwrite', 'allocate', 'no_broadcast'],
['readwrite', 'allocate', 'no_broadcast']
],
op_axes=[[-1, 0, 1], [-1, 0, 1], [-1, 0, -1]], order='C')
# ----- forward algorithm -----
# initial condition is begin state (in Durbin there is another forward - the begin = 1)
tup = next(forward_iterator) # emission_i, forward_i,scaling_i
try:
tup[1][...] = np.maximum(state_trans_mat[0, 1:] * tup[0], 1e-100)
except FloatingPointError:
tup[1][...] = np.exp(np.maximum(np.log(state_trans_mat[0, 1:])+np.log(tup[0]), -100))
tup[2][...] = fsum(tup[1])
tup[1][...] /= tup[2]
prev_forward = tup[1]
# recursion step
for tup in forward_iterator:
prev_forward = dot(next(e_trans), prev_forward)
# scaling - see Rabiner p. 16, or Durbin p. 79
scaling = tup[2]
scaling[...] = fsum(prev_forward) # fsum is more numerical stable
tup[1][...] = prev_forward = prev_forward / scaling
forward = forward_iterator.operands[1]
s_j = forward_iterator.operands[2]
# end transition
log_p_model = np.sum(np.log(s_j))
if model_end_state: # Durbin - with end state
end_state = 0 # termination step
end_transition = forward[emission_seq.shape[0] - 1, :] * state_trans_mat[1:, end_state]
log_p_model += np.log(sum(end_transition))
# backward algorithm
# initial condition is end state
if model_end_state:
prev_back = (state_trans_mat[1:, 0]) # Durbin p.60
else:
prev_back = np.ones(n_states - 1) # Rabiner p.7 (24)
backward_iterator = np.nditer([emission_seq[:0:-1], np.newaxis], # / s_j[:, None]
flags=['external_loop', 'reduce_ok'],
op_flags=[['readonly'],
['readwrite', 'allocate']],
op_axes=[[-1, 0, 1], [-1, 0, 1]], order='C')
# recursion step
e_trans = iter((emission_seq / s_j[:, np.newaxis])[:0:-1, np.newaxis, :] * real_transitions_T2)\
# keep the scale if we get out of numerical boundaries (may occur if the transition matrix has 0)
if num_stable:
for tup in backward_iterator: # emission_i/scale_i, backward_i
dot(next(e_trans), prev_back, tup[1]) # = dot(real_transitions_T2, prev_back * tup[0])
prev_back = tup[1][...] = np.nan_to_num(prev_back)
else:
for tup in backward_iterator: # emission_i/scale_i, backward_i
dot(next(e_trans), prev_back, tup[1]) # = dot(real_transitions_T2, prev_back * tup[0])
if model_end_state:
backward = np.append(backward_iterator.operands[1][::-1], state_trans_mat[1:, 0][np.newaxis, :],
axis=0) # Durbin p.60
else:
backward = np.append(backward_iterator.operands[1][::-1], np.ones((1, n_states - 1)),
axis=0) # Rabiner p.7 (24)
# return bf_result
bf_result = namedtuple('BFResult', 'model_p state_p forward backward scales')
return bf_result(log_p_model, backward * forward, forward, backward, s_j)
def forward_backward_old(self, symbol_seq, model_end_state=False):
"""
Calculates the probability for the model and each step in it
Same as above but with less optimizations. for easier reading
@param model_end_state: whether to model the end state
@param symbol_seq: sequence of symbols/observations
"""
n_states = self.num_states()
emission = self.get_emission()
state_trans_mat = self.get_state_transition()
s_j = np.ones(len(symbol_seq))
forward = np.zeros((len(symbol_seq), n_states - 1), order='F') # minus the begin state
backward = np.zeros((len(symbol_seq), n_states - 1), order='F')
# forward algorithm
# initial condition is begin state (in Durbin there is another forward - the begin = 1)
forward[0, :] = state_trans_mat[0, 1:] * emission[1:, symbol_seq[0]]
s_j[0] = sum(forward[0, :])
forward[0, :] /= s_j[0]
prev_forward = forward[0, :]
# recursion step
# transform to emission array
unique_values = set(symbol_seq)
emission_seq = np.zeros((len(symbol_seq), n_states - 1))
for v in unique_values:
emission_seq[symbol_seq == v, :] = emission[1:, v]
real_transitions = state_trans_mat[1:, 1:]
t_real_transitions = real_transitions.transpose()
p_state_transition = np.zeros(n_states - 1)
summing_arr = np.array([1] * (n_states - 1))
emission_iterator = iter(emission_seq)
next(emission_iterator) # skip the first (instead of condition in for loop
s_j_iterator = np.nditer(s_j, op_flags=['writeonly'])
forward_iterator = np.nditer(forward, flags=['external_loop'], op_flags=['writeonly'], order='C')
next(s_j_iterator)
next(forward_iterator)
for sym_emission in emission_iterator:
np.dot(t_real_transitions, prev_forward, p_state_transition)
prev_forward = sym_emission * p_state_transition
# scaling - see Rabiner p. 16, or Durbin p. 79
scaling = np.dot(summing_arr, prev_forward) # dot is actually faster then np.sum(prev_forward)
next(s_j_iterator)[...] = scaling
next(forward_iterator)[...] = prev_forward = prev_forward / scaling
# end transition
log_p_model = np.sum(np.log(s_j))
if model_end_state: # Durbin - with end state
end_state = 0 # termination step
end_transition = forward[len(symbol_seq) - 1, :] * state_trans_mat[1:, end_state]
log_p_model += np.log(sum(end_transition))
# backward algorithm
# initial condition is end state
if model_end_state:
prev_back = backward[len(symbol_seq) - 1, :] = (state_trans_mat[1:, 0]) # Durbin p.60
else:
prev_back = backward[len(symbol_seq) - 1, :] = [1, 1] # Rabiner p.7 (24)
backward_iterator = np.nditer(backward[::-1], flags=['external_loop'], op_flags=['writeonly'], order='C')
next(backward_iterator)
s_j_iterator = iter(s_j[::-1])
# recursion step
for sym_emission in emission_seq[:0:-1]:
np.dot(prev_back * sym_emission, t_real_transitions, p_state_transition)
next(backward_iterator)[...] = prev_back = p_state_transition / next(s_j_iterator)
bf_result = namedtuple('BFResult', 'model_p state_p forward backward scales')
return bf_result(log_p_model, backward * forward, forward, backward, s_j)
def forward_backward_log(self, symbol_seq, model_end_state=False):
"""
Calculates the probability for the model and each step in it
@param symbol_seq: observed sequence (array). Should be numerical (same size as defined in model)
@param model: an HMM model to calculate on the given symbol sequence
@param model_end_state: whether to consider end state or not
Remarks:
this implementation uses log variant to overcome floating points errors instead of scaling.
"""
interpolation_res = 0.01
interpolation_res_i = 1.0 / interpolation_res
interpol_tbl = np.log(1 + np.exp(-np.arange(0, 35, interpolation_res)))
last_interpol = len(interpol_tbl) - 1
def interpolate(prev):
"""
Uses interpolation table to calculate log r=log(p+log(1+exp(q-p))) which is approx equals to
log r = log (max)+(exp(1+log(x)) [x=min-max from prev].
see Durbin p. 79
@param prev: previous probabilities plus the transition
@return: result of interpolation of log r=log(p+log(1+exp(q-p)))
"""
maxes = np.max(prev, 1)
interpolation_i = np.minimum(np.round(-interpolation_res_i * (np.sum(prev, 1) - 2 * maxes)),
last_interpol).astype(int)
return maxes + interpol_tbl[interpolation_i]
l_emission = np.log(self.get_emission()[1:, :])
forward = np.zeros((len(symbol_seq), self.num_states() - 1))
backward = np.zeros((len(symbol_seq), self.num_states() - 1))
l_state_transition = np.log(self.get_state_transition()[1:, 1:])
l_t_state_transition = l_state_transition.transpose()
# forward algorithm
# initial condition is begin state (in Durbin there is another forward - the begin = 1)
prev_forward = forward[0, :] = np.log(self.get_state_transition()[0, 1:]) + l_emission[:, symbol_seq[0]]
# recursion step
emission_seq = list(enumerate([l_emission[:, s] for s in symbol_seq]))
for i, sym_emission in emission_seq:
if i == 0:
continue
from_prev = prev_forward + l_t_state_transition # each row is different state, each col - CHECKED
# now the sum approximation
from_prev = interpolate(from_prev)
forward[i, :] = prev_forward = sym_emission + from_prev
# termination step
if model_end_state:
end_state = 0
log_p_model = forward[len(symbol_seq) - 1, :] + np.log(self.get_state_transition()[1:, end_state])
log_p_model = interpolate(np.array([log_p_model]))
else:
log_p_model = interpolate([forward[len(symbol_seq) - 1, :]])
# backward algorithm
last_index = len(symbol_seq) - 1
if model_end_state:
prev_back = backward[last_index, :] = np.log(self.get_state_transition()[1:, 0])
else:
prev_back = backward[last_index, :] = [0, 0] # Rabiner p.7 (24)
for i, sym_emission in reversed(emission_seq):
if i == 0:
continue
p_state_transition = interpolate(l_state_transition + (prev_back + sym_emission))
prev_back = backward[i - 1, :] = p_state_transition
# posterior probability 3.14 (P.60 Durbin)
l_posterior = forward + backward - log_p_model
bf_result = namedtuple('BFResult', 'model_p state_p forward backward')
return bf_result(log_p_model, l_posterior, forward, backward)
def html_state_transition(self):
"""
nice html representation (as table) of state transition matrix
"""
import matplotlib as mpl
import matplotlib.cm as cm
backgrounds = cm.ScalarMappable(norm=mpl.colors.Normalize(vmin=np.min(self.state_transition), vmax=np.max(self.state_transition)), cmap=cm.Blues)
color_mapper = lambda x: 'rgb(%i, %i, %i)' % backgrounds.to_rgba(x, bytes=True)[:3]
cells_ths = ''.join(['<th>%i</th>' % i for i in np.arange(1, self.state_transition.shape[0])])
states_trs = []
for state_i, state_trans in enumerate(self.state_transition[1:, 1:]):
state_description = "<td style=\"font-weight:bold;\">%i</td>" % (state_i+1)
state_description += ''.join(['<td style="color:#fff;background:%s">%.2f</td>' % (color_mapper(val), val)
for val in state_trans])
states_trs.append('<tr>%s</tr>' % state_description)
template = """
<table style="font-size:85%;text-align:center;border-collapse:collapse;border:1px solid #aaa;" cellpadding="5" border="1">
<tr style="font-size:larger; font-weight: bold;">
<th>State/Cell</th>
{cells_ths}
</tr>
{states_trs}
</table>
"""
return template.format(**({'cells_ths': cells_ths,
'states_trs': '\n'.join(states_trs)}))
class DiscreteHMM(HMMModel):
"""
Discrete Hidden Markov Model
Handles sequences with discrete alphabet
"""
def _collect_emission_stats(self, seq, gammas):
new_emission_matrix = np.zeros((self.num_states()-1, self.num_alphabet())) # except begin state
state_p = gammas
for sym in range(0, self.num_alphabet()):
where_sym = (seq == sym)
new_emission_matrix[:, sym] = np.sum(state_p[where_sym, :], 0)
return new_emission_matrix
def _maximize_emission_stats(self, emission_stats, prob_w):
# normalize
prob_w = np.ones(prob_w.shape[0]) # replace to 1 - all equals (non normalized)
emission_stats = np.average(emission_stats, 0, prob_w)
emission_stats /= np.sum(emission_stats, 1)[:, None]
self.emission[1:, :] = emission_stats
def _maximize_emission(self, seq, gammas):
new_emission_matrix = self._collect_emission_stats(seq, gammas)
# normalize
new_emission_matrix /= np.sum(new_emission_matrix, 1)[:, None]
self.emission[1:, :] = new_emission_matrix
class ContinuousHMM(HMMModel):
"""
Continuous HMM for observations of real values
The states are gaussian (or gaussian mixtures)
@param state_transition: state transition matrix
@param mean_vars: array of mean, var tuple (or array of such for mixtures)
@param emission_density: log-concave or elliptically symmetric density
@param mixture_coef: mixture coefficients
"""
def __init__(self, state_transition, mean_vars, emission_density=norm_dist, mixture_coef=None,
min_alpha=None):
emission = _ContinuousEmission(mean_vars, emission_density, mixture_coef)
super().__init__(state_transition, emission, min_alpha=min_alpha)
def _maximize_emission(self, seq, gammas):
mean_vars = np.zeros((self.num_states(), 2))
min_std = 0.5
if self.emission.mixtures is None:
state_norm = np.sum(gammas, 0)
mu = np.sum(gammas * seq[:, None], 0) / state_norm
old_mu = self.emission.mean_vars[1:, 0]
sym_min_mu = np.power(seq[:, None] - old_mu, 2)
std = np.sqrt(np.sum(gammas * sym_min_mu, 0) / state_norm)
std = np.maximum(std, min_std) # it must not get to zero
mean_vars[1:, :] = np.column_stack([mu, std])
self.emission = _ContinuousEmission(mean_vars, self.emission.dist_func)
else: # TODO: not yet fully tested
mean_vars = [(0, 0)]
mixture_coeff = [1]
for state in np.arange(0, self.num_states() - 1):
has_coeff = True
try:
if len(self.emission.mixtures[state + 1]) > 1:
coeff_pdfs = [self.emission.dist_func(mean, var).pdf for mean, var in
self.emission.mean_vars[state + 1]]
coeff_obs = np.array([[p(s) for p in coeff_pdfs] for s in seq])
coeff_obs /= np.sum(coeff_obs, 1)[:, None]
gamma_coeff = coeff_obs * gammas[:, state][:, None]
seq_man = seq[:, None]
else:
gamma_coeff = gammas[:, state]
except TypeError:
gamma_coeff = gammas[:, state]
seq_man = seq
has_coeff = False
sum_gamma = np.sum(gamma_coeff, 0)
mu = np.sum(gamma_coeff * seq_man, 0) / sum_gamma
mu *= self.emission.mixtures[state + 1]
sym_min_mu = np.power(seq_man - mu, 2)
std = np.sqrt(np.sum(gamma_coeff * sym_min_mu, 0) / sum_gamma)
std = np.maximum(std, min_std) # it must not get to zero
if has_coeff:
mean_vars.append(list(zip(mu, std)))
else:
mean_vars.append((mu, std))
mixture_coeff.append(sum_gamma / np.sum(sum_gamma))
self.emission = _ContinuousEmission(mean_vars, self.emission.dist_func, mixture_coeff)
class _ContinuousEmission():
"""
Emission for continuous HMM.
"""
def __init__(self, mean_vars, dist=norm_dist, mixture_coef=None):
"""
Initializes a new continuous distribution states.
@param mean_vars: np array of mean and variance for each state
@param dist: distribution function
@return: a new instance of ContinuousDistStates
"""
self.dist_func = dist
self.mean_vars = mean_vars
self.mixtures = mixture_coef
self.cache = dict()
self.states = self._set_states()
self._set_states()
def _set_states(self):
from functools import partial
if self.mixtures is None:
states = ([self.dist_func(mean, var).pdf for mean, var in self.mean_vars])
else:
states = []
for mean_var, mixture in zip(self.mean_vars, self.mixtures):
try:
mix_pdf = [self.dist_func(mean, var).pdf for mean, var in mean_var]
mix = partial(_ContinuousEmission.mixture_pdf, mix_pdf, mixture)
if np.abs(np.sum(mixture) - 1) > 1e-6:
raise Exception("Bad mixture - mixture for be summed to 1")
except TypeError:
mix = self.dist_func(mean_var[0], mean_var[1]).pdf
states.append(mix)
return states
@staticmethod
def mixture_pdf(pdfs, mixtures, val):
"""
Mixture distrbution
@param pdfs:
@param mixtures:
@param val:
@return:
"""
return np.dot([p(val) for p in pdfs], mixtures)
def __getitem__(self, x):
"""
Get emission for state
@param x: first index is state (or slice for all states), second is value or array of values
@return: p according to pdf
"""
min_p = np.finfo(float).eps # 1e-100
if isinstance(x[0], slice):
if isinstance(x[1], np.ndarray): # this new case improves performance if you give emission array of values
pdfs = np.array([dist(x[1]) for dist in self.states[x[0]]])
pdfs = np.maximum(pdfs, min_p)
return pdfs
else:
try:
return self.cache[x[1]]
except KeyError:
pdfs = np.array([dist(x[1]) for dist in self.states[x[0]]])
pdfs = np.maximum(pdfs, min_p)
self.cache[x[1]] = pdfs
return self.cache[x[1]]
else:
return self.states[x[0]].pdf(x[1])
def __getstate__(self):
return {
'mean_vars': self.mean_vars,
'mixture_coef': self.mixtures,
'dist_func': self.dist_func
}
def __setstate__(self, state):
self.mean_vars = state['mean_vars']
self.mixtures = state['mixture_coef']
self.dist_func = state['dist_func']
self.cache = dict()
self.states = self._set_states()
def __str__(self):
return '\n'.join([str(self.dist_func.name) + ' distribution', 'Mean\t Var', str(self.mean_vars)])
class GaussianHMM(HMMModel):
"""
Gaussian HMM for multidimensional gaussian mixtures. Extension for Continuous HMM above
The states are gaussian mixtures
@param state_transition: state transition matrix
@param mean_vars: array of mean, var tuple (or array of such for mixtures)
@param mixture_coef: mixture coefficients
"""
def __init__(self, state_transition, mean_vars, mixture_coef=None, min_alpha=None):
if len(mean_vars) == len(state_transition):
mean_vars = mean_vars[1:] # trim the begin emission
# if not mixture (only tuple) wrap it with another list
mean_vars = [[mean_cov] if isinstance(mean_cov, tuple) else mean_cov for mean_cov in mean_vars]
# same type: all mean vars should be lists
emission = _GaussianEmission(mean_vars, mixture_coef)
super().__init__(state_transition, emission, min_alpha=min_alpha)
def _collect_emission_stats(self, seq, gammas):
state_norm = np.sum(gammas, 0)
mean_vars = []
mixture_coeff = []
for state in np.arange(0, self.num_states() - 1):
is_mixture = len(self.emission.mixtures[state]) > 1
if is_mixture:
emissions = self.emission.components_emission(state, seq)
sum_emissions = np.sum(emissions, 0)
emissions /= sum_emissions[:, None] # normalize
gamma_state = emissions * gammas[:, state][:, None]
del emissions
mixture_coeff.append(sum_emissions/np.sum(sum_emissions))
else:
gamma_state = gammas[:, state][:, None]
mixture_coeff.append([1])
covars_mixture = []
new_means = np.dot(seq, gamma_state).T # avoid normalization by state_norm[state]).T
for mixture_i, mixture in enumerate(self.emission.mixtures[state]):
gamma_c = gamma_state[:, mixture_i]
old_mean = self.emission.mean_vars[state][mixture_i][0]
seq_min_mean = seq - old_mean.T
new_cov = np.dot((seq_min_mean * gamma_c), seq_min_mean.T) # avoid normalization by state_norm[state])
covars_mixture.append(new_cov)
mean_vars.append((new_means, covars_mixture))
return mean_vars, mixture_coeff, state_norm
def _maximize_emission_stats(self, emissions_stats, prob_w):
prob_w = np.ones_like(prob_w) # we use non normalized terms(normalizinig by state_norms)
# extract means and covariances
mean_vars, mixture_coeff, state_norms = zip(*emissions_stats)
state_norms = np.sum(state_norms, 0)
# extract mixture coefficents
mixture_coeff = np.array(mixture_coeff)
new_mixcoeff = []
min_std = 0.5 # np.finfo(float).eps #1e-5 #
new_mean_vars = []
for state in np.arange(0, self.num_states() - 1):
new_mixcoeff.append(np.average(mixture_coeff[:, state], 0, prob_w))
mix_mean_covar = [mean_var_i[state] for mean_var_i in mean_vars]
mean_state_i, covar_state_i = zip(*mix_mean_covar)
mean_state_i = np.average(mean_state_i, 0, prob_w)[0]/state_norms[state]
covar_state_i = np.average(covar_state_i, 0, prob_w)[0]/state_norms[state]
# the diagonal must be large enough
np.fill_diagonal(covar_state_i, np.maximum(covar_state_i.diagonal().copy(), min_std))
new_mean_vars.append([(mean_state_i, covar_state_i)])
self.emission = _GaussianEmission(new_mean_vars, new_mixcoeff)
def _maximize_emission(self, seq, gammas):
min_std = 0.5 # np.finfo(float).eps #1e-5 #
state_norm = np.sum(gammas, 0)
mean_vars = []
mixture_coeff = []
for state in np.arange(0, self.num_states() - 1):
is_mixture = len(self.emission.mixtures[state]) > 1
if is_mixture:
emissions = self.emission.components_emission(state, seq)
sum_emissions = np.sum(emissions, 0)
emissions /= sum_emissions[:, None] # normalize
gamma_state = emissions * gammas[:, state][:, None]
del emissions
mixture_coeff.append(sum_emissions/np.sum(sum_emissions))
else:
gamma_state = gammas[:, state][:, None]
mixture_coeff.append([1])
covars_mixture = []
new_means = (np.dot(seq, gamma_state) / state_norm[state]).T
for mixture_i, mixture in enumerate(self.emission.mixtures[state]):
gamma_c = gamma_state[:, mixture_i]
old_mean = self.emission.mean_vars[state][mixture_i][0]
seq_min_mean = seq - old_mean.T
new_cov = np.dot((seq_min_mean * gamma_c), seq_min_mean.T) / state_norm[state]
# the diagonal must be large enough
np.fill_diagonal(new_cov, np.maximum(new_cov.diagonal().copy(), min_std))
covars_mixture.append(new_cov)
mean_vars.append(list(zip(new_means, covars_mixture)))
self.emission = _GaussianEmission(mean_vars, mixture_coeff)
def viterbi(self, symbol_seq):
"""
Find the most probable path through the model
Dynamic programming algorithm for decoding the states.
Implementation according to Durbin, Biological sequence analysis [p. 57]
@param symbol_seq: observed sequence (array). Should be numerical (same size as defined in model)
"""
emission_seq = np.log(self.get_emission()[1:, symbol_seq]).T
return _hmmc.viterbi(emission_seq, self.state_transition)
def __str__(self):
# handelding mixtures isn't handled currently
means = np.array([x[0][0] for x in self.emission.mean_vars])
covars = np.array([x[0][1].diagonal() for x in self.emission.mean_vars])
str_rep = 'GMM. Means:\n'
str_rep += np.array_str(means, precision=2, suppress_small=True, max_line_width=250).replace('\n\n', '\n')
str_rep += '\nDiagonals for covariance matrices:\n'
str_rep += np.array_str(covars, precision=2, suppress_small=True, max_line_width=250).replace('\n\n', '\n')
str_rep += '\nStates transitions% (begin not shown):\n'
str_rep += np.array_str(100*(self.state_transition[1:, 1:]), precision=1, suppress_small=True,
max_line_width=250)
return str_rep
class _GaussianEmission():
"""
Emission for continuous HMM.
"""
def __init__(self, mean_vars, mixture_coef=None):
"""
Initializes a new continuous distribution states.
@param mean_vars: np array of mean and variance for each state
@return: a new instance of ContinuousDistStates
"""
# if no mixture defined set to 1
if mixture_coef is None:
mixture_coef = np.ones((len(mean_vars), 1))
self.mean_vars = _GaussianEmission._normalize_mean_vars(mean_vars)
self.mixtures = mixture_coef
self.states, self.pseudo_states = self._set_states()
@staticmethod
def _normalize_mean_vars(mean_vars):
# use numpy arrays
n_mean_vars = []
for state in mean_vars:
state_mean_cov = []
for mean, cov in state:
mean = np.array(mean)
if mean.ndim == 0:
mean = mean[None, None]
if mean.ndim == 1:
mean = mean[None]
cov = np.array(cov)
if cov.ndim == 0:
cov = cov[None, None]
if mean.ndim == 1:
cov = cov[None]
state_mean_cov.append((mean, cov))
n_mean_vars.append(state_mean_cov)
return n_mean_vars
def _set_states(self):
states = []
pseudo_states = []
for mean_var, mixture in zip(self.mean_vars, self.mixtures):
if mixture == 1:
emission = MultivariateNormal(mean_var[0][0], mean_var[0][1])
pseudo_states.append(emission.pdf)
else:
emission = MixtureModel([MultivariateNormal(mean, cov) for mean, cov in mean_var], mixture)
pseudo_states.append(emission.components_pdf)
states.append(emission.pdf)
return states, pseudo_states
def components_emission(self, states, observations, use_log=False):
min_p = np.finfo(float).eps if use_log else -np.inf
if isinstance(states, int):
return np.maximum(self.pseudo_states[states].log_pdf(observations), min_p, use_log=use_log)
pdfs = np.array([dist(observations, use_log=True) for dist in self.pseudo_states[states]]).T
return np.maximum(pdfs, min_p)
def __getitem__(self, x):
"""
Get emission for state
@param x: first index is state (or slice for all states), second is value or array of values
@return: p according to pdf
"""
min_p = np.finfo(float).eps
if isinstance(x[0], int):
return np.maximum(self.states[x[0]].pdf(x[1]), min_p)
pdfs = np.array([dist(x[1]) for dist in self.states])
return np.maximum(pdfs, min_p)
def __getstate__(self):
return {
'mean_vars': self.mean_vars,
'mixture_coef': self.mixtures
}
def __setstate__(self, state):
self.mean_vars = state['mean_vars']
self.mixtures = state['mixture_coef']
self.states, self.pseudo_states = self._set_states()
def __str__(self):
return 'Mean\t Var\n %s' % str(self.mean_vars)
class MultinomialHMM(HMMModel):
"""
A multinomial HMM supporting multiple features
@param state_transition: transition matrix between states
@param emission_matrix: n X m X k matrix, n - state, m - feature , k - emission for char k
@param min_alpha:
"""
def __init__(self, state_transition, emission_matrix, min_alpha=None):
emission = MultinomialEmission(emission_matrix)
self.num_features = emission_matrix.shape[1]
super().__init__(state_transition, emission, min_alpha=min_alpha)
def _maximize_emission(self, seq, gammas):
"""
num_features = seq.shape[1]
alphabet_size = self.emission.states_prob.shape[2]
new_emission_matrix = np.zeros((self.num_states(), num_features, alphabet_size))
state_p = gammas
for feature in range(seq.shape[1]):
for sym in range(0, alphabet_size):
where_sym = (seq[:, feature] == sym)
new_emission_matrix[1:, feature, sym] = np.sum(state_p[where_sym, :], 0)
# normalize
new_emission_matrix[1:, :, :] /= np.sum(new_emission_matrix[1:, :, :], -1)[:, :, np.newaxis]
self.emission = MultinomialEmission(new_emission_matrix)
"""
emission_stats = self._collect_emission_stats(seq, gammas)
self._maximize_emission_stats(emission_stats)
def _collect_emission_stats(self, seq, gammas):
num_features = seq.shape[1]
alphabet_size = self.emission.states_prob.shape[2]
new_emission_matrix = np.zeros((self.num_states(), num_features, alphabet_size))
state_p = gammas
for feature in range(seq.shape[1]):
for sym in range(0, alphabet_size):
where_sym = (seq[:, feature] == sym)
new_emission_matrix[1:, feature, sym] = np.sum(state_p[where_sym, :], 0)
return new_emission_matrix
def _maximize_emission_stats(self, emission_stats, prob_w):
# normalize
prob_w = np.ones(prob_w.shape[0]) # replace to 1 - all equals (non normalized)
emission_stats = np.average(emission_stats, 0, prob_w)
emission_stats[1:, :, :] /= np.sum(emission_stats[1:, :, :], -1)[:, :, np.newaxis]
self.emission = MultinomialEmission(emission_stats)
def num_alphabet(self):
"""
Number of symbols in alphabet based on teh defined emission (in all the features)
@return:
"""
return self.emission.states_prob.shape[1]*self.emission.states_prob.shape[2]
def viterbi(self, symbol_seq):
"""
Find the most probable path through the model
Dynamic programming algorithm for decoding the states.
Implementation according to Durbin, Biological sequence analysis [p. 57]
@param symbol_seq: observed sequence (array). Should be numerical (same size as defined in model)
"""
emission_seq = np.log(self.get_emission()[1:, symbol_seq]).T
return _hmmc.viterbi(emission_seq, self.state_transition)
class MultinomialEmission():
"""
MultinomialEmission assumes multinomial distribution e.g (n!/(n1!n2!))*(p1)^n1*(p2)^n2...
"""
def __init__(self, states_prob):
"""
initializes a new instance of MultinomialEmission
@param states_prob: a matrix of nxmxk where n - number of states and m-feature index, k -emission for char k
"""
self.states_prob = states_prob
def __getitem__(self, item):
"""
Get emission for state
@param item: first index is state (or slice for all states),
second is value or matrix of values [observations x features]
@return: p according to pdf
"""
min_p = np.finfo(float).eps
if isinstance(item[0], int):
features_prob = self.states_prob[item[0], :, item[1]]
p = np.prod(features_prob, -1)
else:
# features_prob = self.states_prob[:, :, item[1]]
# p = np.prod(features_prob, -1)
features_prob = []
for f in range(self.states_prob.shape[1]):
features_prob.append(self.states_prob[item[0], f, item[1][:, f]])
p = np.prod(features_prob, 0)
p = np.maximum(p, min_p)
return p
| eranroz/dnase | src/hmm/HMMModel.py | Python | mit | 45,994 | [
"Gaussian"
] | 0221aac9d3ff8ed71ffe405f71c88a196a4a3565f16913d240da827cbd5d749b |
from gi.repository import Gtk, Gdk, GObject
import logging
import os
import xapian
from gettext import gettext as _
from softwarecenter.ui.gtk3.session.appmanager import get_appmanager
from cellrenderers import (CellRendererAppView,
CellButtonRenderer,
CellButtonIDs)
from softwarecenter.ui.gtk3.em import em, StockEms
from softwarecenter.enums import (AppActions, NonAppVisibility, Icons)
from softwarecenter.utils import ExecutionTime
from softwarecenter.backend import get_install_backend
from softwarecenter.netstatus import (get_network_watcher,
network_state_is_connected)
from softwarecenter.ui.gtk3.models.appstore2 import (
AppGenericStore, CategoryRowReference)
class AppTreeView(Gtk.TreeView):
"""Treeview based view component that takes a AppStore and displays it"""
VARIANT_INFO = 0
VARIANT_REMOVE = 1
VARIANT_INSTALL = 2
VARIANT_PURCHASE = 3
ACTION_BTNS = (VARIANT_REMOVE, VARIANT_INSTALL, VARIANT_PURCHASE)
def __init__(self, app_view, db, icons, show_ratings, store=None):
Gtk.TreeView.__init__(self)
self._logger = logging.getLogger("softwarecenter.view.appview")
self.app_view = app_view
self.db = db
self.pressed = False
self.focal_btn = None
self._action_block_list = []
self._needs_collapse = []
self.expanded_path = None
#~ # if this hacked mode is available everything will be fast
#~ # and we can set fixed_height mode and still have growing rows
#~ # (see upstream gnome #607447)
try:
self.set_property("ubuntu-almost-fixed-height-mode", True)
self.set_fixed_height_mode(True)
except:
self._logger.warn("ubuntu-almost-fixed-height-mode extension not available")
self.set_headers_visible(False)
# a11y: this is a cell renderer that only displays a icon, but still
# has a markup property for orca and friends
# we use it so that orca and other a11y tools get proper text to read
# it needs to be the first one, because that is what the tools look
# at by default
tr = CellRendererAppView(icons,
self.create_pango_layout(''),
show_ratings,
Icons.INSTALLED_OVERLAY)
tr.set_pixbuf_width(32)
tr.set_button_spacing(em(0.3))
# create buttons and set initial strings
info = CellButtonRenderer(self,
name=CellButtonIDs.INFO)
info.set_markup_variants(
{self.VARIANT_INFO: _('More Info')})
action = CellButtonRenderer(self,
name=CellButtonIDs.ACTION)
action.set_markup_variants(
{self.VARIANT_INSTALL: _('Install'),
self.VARIANT_REMOVE: _('Remove'),
self.VARIANT_PURCHASE: _(u'Buy\u2026')})
tr.button_pack_start(info)
tr.button_pack_end(action)
column = Gtk.TreeViewColumn("Applications", tr,
application=AppGenericStore.COL_ROW_DATA)
column.set_cell_data_func(tr, self._cell_data_func_cb)
column.set_fixed_width(200)
column.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
self.append_column(column)
# network status watcher
watcher = get_network_watcher()
watcher.connect("changed", self._on_net_state_changed, tr)
# custom cursor
self._cursor_hand = Gdk.Cursor.new(Gdk.CursorType.HAND2)
self.connect("style-updated", self._on_style_updated, tr)
# button and motion are "special"
self.connect("button-press-event", self._on_button_press_event, tr)
self.connect("button-release-event", self._on_button_release_event, tr)
self.connect("key-press-event", self._on_key_press_event, tr)
self.connect("key-release-event", self._on_key_release_event, tr)
self.connect("motion-notify-event", self._on_motion, tr)
self.connect("cursor-changed", self._on_cursor_changed, tr)
# our own "activate" handler
self.connect("row-activated", self._on_row_activated, tr)
self.backend = get_install_backend()
self._transactions_connected = False
self.connect('realize', self._on_realize, tr)
@property
def appmodel(self):
model = self.get_model()
if isinstance(model, Gtk.TreeModelFilter):
return model.get_model()
return model
def clear_model(self):
vadjustment = self.get_scrolled_window_vadjustment()
if vadjustment:
vadjustment.set_value(0)
self.expanded_path = None
self._needs_collapse = []
if self.appmodel:
self.appmodel.clear()
def expand_path(self, path):
if path is not None and not isinstance(path, Gtk.TreePath):
raise TypeError, ("Expects Gtk.TreePath or None, got %s" %
type(path))
model = self.get_model()
old = self.expanded_path
self.expanded_path = path
if old is not None:
ok = self.get_visible_range()
if ok:
start, end = ok
if (ok and start.compare(old) != -1 or
end.compare(old) != 1):
self._needs_collapse.append(old)
else:
try: # try... a lazy solution to Bug #846204
model.row_changed(old, model.get_iter(old))
except:
msg = ("apptreeview.expand_path: Supplied 'old' "
"path is an invalid tree path: '%s'" % old)
logging.debug(msg)
if path == None: return
model.row_changed(path, model.get_iter(path))
return
def get_scrolled_window_vadjustment(self):
ancestor = self.get_ancestor(Gtk.ScrolledWindow)
if ancestor:
return ancestor.get_vadjustment()
return None
def get_rowref(self, model, path):
if path == None: return None
return model[path][AppGenericStore.COL_ROW_DATA]
def rowref_is_category(self, rowref):
return isinstance(rowref, CategoryRowReference)
def _on_realize(self, widget, tr):
# connect to backend events once self is realized so handlers
# have access to the TreeView's initialised Gdk.Window
if self._transactions_connected: return
self.backend.connect("transaction-started", self._on_transaction_started, tr)
self.backend.connect("transaction-finished", self._on_transaction_finished, tr)
self.backend.connect("transaction-stopped", self._on_transaction_stopped, tr)
self._transactions_connected = True
return
def _calc_row_heights(self, tr):
ypad = StockEms.SMALL
tr.set_property('xpad', StockEms.MEDIUM)
tr.set_property('ypad', ypad)
for btn in tr.get_buttons():
# recalc button geometry and cache
btn.configure_geometry(self.create_pango_layout(""))
btn_h = btn.height
tr.normal_height = max(32 + 2*ypad, em(2.5) + ypad)
tr.selected_height = tr.normal_height + btn_h + StockEms.MEDIUM
return
def _on_style_updated(self, widget, tr):
self._calc_row_heights(tr)
return
def _on_motion(self, tree, event, tr):
window = self.get_window()
x, y = int(event.x), int(event.y)
if not self._xy_is_over_focal_row(x, y):
window.set_cursor(None)
return
path = tree.get_path_at_pos(x, y)
if not path:
window.set_cursor(None)
return
rowref = self.get_rowref(tree.get_model(), path[0])
if not rowref: return
if self.rowref_is_category(rowref):
window.set_cursor(None)
return
model = tree.get_model()
app = model[path[0]][AppGenericStore.COL_ROW_DATA]
if (not network_state_is_connected() and
not self.appmodel.is_installed(app)):
for btn_id in self.ACTION_BTNS:
btn_id = tr.get_button_by_name(CellButtonIDs.ACTION)
btn_id.set_sensitive(False)
use_hand = False
for btn in tr.get_buttons():
if btn.state == Gtk.StateFlags.INSENSITIVE:
continue
if btn.point_in(x, y):
use_hand = True
if self.focal_btn is btn:
btn.set_state(Gtk.StateFlags.ACTIVE)
elif not self.pressed:
btn.set_state(Gtk.StateFlags.PRELIGHT)
else:
if btn.state != Gtk.StateFlags.NORMAL:
btn.set_state(Gtk.StateFlags.NORMAL)
if use_hand:
window.set_cursor(self._cursor_hand)
else:
window.set_cursor(None)
return
def _on_cursor_changed(self, view, tr):
model = view.get_model()
sel = view.get_selection()
path = view.get_cursor()[0]
rowref = self.get_rowref(model, path)
if not rowref: return
if self.has_focus(): self.grab_focus()
if self.rowref_is_category(rowref):
self.expand_path(None)
return
sel.select_path(path)
self._update_selected_row(view, tr, path)
return
def _update_selected_row(self, view, tr, path=None):
sel = view.get_selection()
if not sel:
return False
model, rows = sel.get_selected_rows()
if not rows:
return False
row = rows[0]
if self.rowref_is_category(row):
return False
# update active app, use row-ref as argument
self.expand_path(row)
app = model[row][AppGenericStore.COL_ROW_DATA]
# make sure this is not a category (LP: #848085)
if self.rowref_is_category(app):
return False
action_btn = tr.get_button_by_name(
CellButtonIDs.ACTION)
#if not action_btn: return False
if self.appmodel.is_installed(app):
action_btn.set_variant(self.VARIANT_REMOVE)
action_btn.set_sensitive(True)
action_btn.show()
elif self.appmodel.is_available(app):
if self.appmodel.is_purchasable(app):
action_btn.set_variant(self.VARIANT_PURCHASE)
else:
action_btn.set_variant(self.VARIANT_INSTALL)
action_btn.set_sensitive(True)
action_btn.show()
if not network_state_is_connected():
action_btn.set_sensitive(False)
self.app_view.emit("application-selected",
self.appmodel.get_application(app))
return
else:
action_btn.set_sensitive(False)
action_btn.hide()
self.app_view.emit("application-selected",
self.appmodel.get_application(app))
return
if self.appmodel.get_transaction_progress(app) > 0:
action_btn.set_sensitive(False)
elif self.pressed and self.focal_btn == action_btn:
action_btn.set_state(Gtk.StateFlags.ACTIVE)
else:
action_btn.set_state(Gtk.StateFlags.NORMAL)
self.app_view.emit(
"application-selected", self.appmodel.get_application(app))
return False
def _on_row_activated(self, view, path, column, tr):
rowref = self.get_rowref(view.get_model(), path)
if not rowref:
return
elif self.rowref_is_category(rowref):
return
x, y = self.get_pointer()
for btn in tr.get_buttons():
if btn.point_in(x, y):
return
app = self.appmodel.get_application(rowref)
if app:
self.app_view.emit("application-activated", app)
return
def _on_button_event_get_path(self, view, event):
if event.button != 1: return False
res = view.get_path_at_pos(int(event.x), int(event.y))
if not res: return False
# check the path is valid and is not a category row
path = res[0]
is_cat = self.rowref_is_category(self.get_rowref(view.get_model(), path))
if path is None or is_cat: return False
# only act when the selection is already there
selection = view.get_selection()
if not selection.path_is_selected(path): return False
return path
def _on_button_press_event(self, view, event, tr):
if not self._on_button_event_get_path(view, event): return
self.pressed = True
x, y = int(event.x), int(event.y)
for btn in tr.get_buttons():
if btn.point_in(x, y) and (btn.state != Gtk.StateFlags.INSENSITIVE):
self.focal_btn = btn
btn.set_state(Gtk.StateFlags.ACTIVE)
view.queue_draw()
return
self.focal_btn = None
return
def _on_button_release_event(self, view, event, tr):
path = self._on_button_event_get_path(view, event)
if not path: return
self.pressed = False
x, y = int(event.x), int(event.y)
for btn in tr.get_buttons():
if btn.point_in(x, y) and (btn.state != Gtk.StateFlags.INSENSITIVE):
btn.set_state(Gtk.StateFlags.NORMAL)
self.get_window().set_cursor(self._cursor_hand)
if self.focal_btn is not btn:
break
self._init_activated(btn, view.get_model(), path)
view.queue_draw()
break
self.focal_btn = None
return
def _on_key_press_event(self, widget, event, tr):
kv = event.keyval
#print kv
r = False
if kv == Gdk.KEY_Right: # right-key
btn = tr.get_button_by_name(CellButtonIDs.ACTION)
if btn is None: return # Bug #846779
if btn.state != Gtk.StateFlags.INSENSITIVE:
btn.has_focus = True
btn = tr.get_button_by_name(CellButtonIDs.INFO)
btn.has_focus = False
elif kv == Gdk.KEY_Left: # left-key
btn = tr.get_button_by_name(CellButtonIDs.ACTION)
if btn is None: return # Bug #846779
btn.has_focus = False
btn = tr.get_button_by_name(CellButtonIDs.INFO)
btn.has_focus = True
elif kv == Gdk.KEY_space: # spacebar
for btn in tr.get_buttons():
if (btn is not None and btn.has_focus and
btn.state != Gtk.StateFlags.INSENSITIVE):
btn.set_state(Gtk.StateFlags.ACTIVE)
sel = self.get_selection()
model, it = sel.get_selected()
path = model.get_path(it)
if path:
#self._init_activated(btn, self.get_model(), path)
r = True
break
self.queue_draw()
return r
def _on_key_release_event(self, widget, event, tr):
kv = event.keyval
r = False
if kv == 32: # spacebar
for btn in tr.get_buttons():
if btn.has_focus and btn.state != Gtk.StateFlags.INSENSITIVE:
btn.set_state(Gtk.StateFlags.NORMAL)
sel = self.get_selection()
model, it = sel.get_selected()
path = model.get_path(it)
if path:
self._init_activated(btn, self.get_model(), path)
btn.has_focus = False
r = True
break
self.queue_draw()
return r
def _init_activated(self, btn, model, path):
app = model[path][AppGenericStore.COL_ROW_DATA]
s = Gtk.Settings.get_default()
GObject.timeout_add(s.get_property("gtk-timeout-initial"),
self._app_activated_cb,
btn,
btn.name,
app,
model,
path)
return
def _cell_data_func_cb(self, col, cell, model, it, user_data):
path = model.get_path(it)
if model[path][0] is None:
indices = path.get_indices()
model.load_range(indices, 5)
if path in self._needs_collapse:
# collapse rows that were outside the visible range and
# thus not immediately collapsed when expand_path was called
cell.set_property('isactive', False)
i = self._needs_collapse.index(path)
del self._needs_collapse[i]
model.row_changed(path, it)
return
cell.set_property('isactive', path == self.expanded_path)
return
def _app_activated_cb(self, btn, btn_id, app, store, path):
if self.rowref_is_category(app):
return
# FIXME: would be nice if that would be more elegant
# because we use a treefilter we need to get the "real"
# model first
if type(store) is Gtk.TreeModelFilter:
store = store.get_model()
pkgname = self.appmodel.get_pkgname(app)
if btn_id == CellButtonIDs.INFO:
self.app_view.emit("application-activated",
self.appmodel.get_application(app))
elif btn_id == CellButtonIDs.ACTION:
btn.set_sensitive(False)
store.row_changed(path, store.get_iter(path))
app_manager = get_appmanager()
# be sure we dont request an action for a pkg with
# pre-existing actions
if pkgname in self._action_block_list:
logging.debug("Action already in progress for package:"
" '%s'" % pkgname)
return False
self._action_block_list.append(pkgname)
if self.appmodel.is_installed(app):
action = AppActions.REMOVE
elif self.appmodel.is_purchasable(app):
app_manager.buy_app(self.appmodel.get_application(app))
store.notify_action_request(app, path)
return
else:
action = AppActions.INSTALL
store.notify_action_request(app, path)
app_manager.request_action(
self.appmodel.get_application(app), [], [],
action)
return False
def _set_cursor(self, btn, cursor):
# make sure we have a window instance (LP: #617004)
window = self.get_window()
if isinstance(window, Gdk.Window):
x, y = self.get_pointer()
if btn.point_in(x, y):
window.set_cursor(cursor)
def _on_transaction_started(self, backend, pkgname, appname, trans_id, trans_type, tr):
""" callback when an application install/remove transaction has started """
action_btn = tr.get_button_by_name(CellButtonIDs.ACTION)
if action_btn:
action_btn.set_sensitive(False)
self._set_cursor(action_btn, None)
def _on_transaction_finished(self, backend, result, tr):
""" callback when an application install/remove transaction has finished """
# need to send a cursor-changed so the row button is properly updated
self.emit("cursor-changed")
# remove pkg from the block list
self._check_remove_pkg_from_blocklist(result.pkgname)
action_btn = tr.get_button_by_name(CellButtonIDs.ACTION)
if action_btn:
action_btn.set_sensitive(True)
self._set_cursor(action_btn, self._cursor_hand)
def _on_transaction_stopped(self, backend, result, tr):
""" callback when an application install/remove transaction has stopped """
# remove pkg from the block list
self._check_remove_pkg_from_blocklist(result.pkgname)
action_btn = tr.get_button_by_name(CellButtonIDs.ACTION)
if action_btn:
# this should be a function that decides action button state label...
if action_btn.current_variant == self.VARIANT_INSTALL:
action_btn.set_markup(self.VARIANT_REMOVE)
action_btn.set_sensitive(True)
self._set_cursor(action_btn, self._cursor_hand)
def _on_net_state_changed(self, watcher, state, tr):
self._update_selected_row(self, tr)
# queue a draw just to be sure the view is looking right
self.queue_draw()
return
def _check_remove_pkg_from_blocklist(self, pkgname):
if pkgname in self._action_block_list:
i = self._action_block_list.index(pkgname)
del self._action_block_list[i]
def _xy_is_over_focal_row(self, x, y):
res = self.get_path_at_pos(x, y)
#cur = self.get_cursor()
if not res:
return False
return self.get_path_at_pos(x, y)[0] == self.get_cursor()[0]
def get_query_from_search_entry(search_term):
if not search_term:
return xapian.Query("")
parser = xapian.QueryParser()
user_query = parser.parse_query(search_term)
return user_query
def on_entry_changed(widget, data):
def _work():
new_text = widget.get_text()
(view, enquirer) = data
with ExecutionTime("total time"):
with ExecutionTime("enquire.set_query()"):
enquirer.set_query(get_query_from_search_entry(new_text),
limit=100*1000,
nonapps_visible=NonAppVisibility.ALWAYS_VISIBLE)
store = view.tree_view.get_model()
with ExecutionTime("store.clear()"):
store.clear()
with ExecutionTime("store.set_documents()"):
store.set_from_matches(enquirer.matches)
with ExecutionTime("model settle (size=%s)" % len(store)):
while Gtk.events_pending():
Gtk.main_iteration()
return
if widget.stamp: GObject.source_remove(widget.stamp)
widget.stamp = GObject.timeout_add(250, _work)
def get_test_window():
import softwarecenter.log
softwarecenter.log.root.setLevel(level=logging.DEBUG)
softwarecenter.log.add_filters_from_string("performance")
fmt = logging.Formatter("%(name)s - %(message)s", None)
softwarecenter.log.handler.setFormatter(fmt)
from softwarecenter.paths import XAPIAN_BASE_PATH
xapian_base_path = XAPIAN_BASE_PATH
pathname = os.path.join(xapian_base_path, "xapian")
# the store
from softwarecenter.db.pkginfo import get_pkg_info
cache = get_pkg_info()
cache.open()
# the db
from softwarecenter.db.database import StoreDatabase
db = StoreDatabase(pathname, cache)
db.open()
# additional icons come from app-install-data
icons = Gtk.IconTheme.get_default()
icons.prepend_search_path("/usr/share/app-install/icons/")
icons.prepend_search_path("/usr/share/software-center/icons/")
# create a filter
from softwarecenter.db.appfilter import AppFilter
filter = AppFilter(db, cache)
filter.set_supported_only(False)
filter.set_installed_only(True)
# appview
from softwarecenter.ui.gtk3.models.appstore2 import AppListStore
from softwarecenter.db.enquire import AppEnquire
enquirer = AppEnquire(cache, db)
store = AppListStore(db, cache, icons)
from softwarecenter.ui.gtk3.views.appview import AppView
view = AppView(db, cache, icons, show_ratings=True)
view.set_model(store)
entry = Gtk.Entry()
entry.stamp = 0
entry.connect("changed", on_entry_changed, (view, enquirer))
entry.set_text("gtk3")
scroll = Gtk.ScrolledWindow()
box = Gtk.VBox()
box.pack_start(entry, False, True, 0)
box.pack_start(scroll, True, True, 0)
win = Gtk.Window()
win.connect("destroy", lambda x: Gtk.main_quit())
scroll.add(view)
win.add(box)
win.set_size_request(600, 400)
win.show_all()
return win
if __name__ == "__main__":
win = get_test_window()
Gtk.main()
| armikhael/software-center | softwarecenter/ui/gtk3/widgets/apptreeview.py | Python | gpl-3.0 | 24,448 | [
"ORCA"
] | de2aacb8485364508d6639ca9bf3fcb553e9751a12d80c3f487450afc3b80628 |
"""
Example of running optical and contact cluster stuff on gromacs file
"""
from __future__ import absolute_import, division, print_function
import os.path as op
import numpy as np
import numpy.testing as npt
import pdb
import gsd.hoomd
import sys
import clustering as cl
import random
import scipy
import time
#from context import clustering as cl
#from context import smoluchowski as smol
from cdistances import conOptDistanceCython,alignDistancesCython
#import imp
#cl = imp.load_source('cl','/home/rachael/Analysis_and_run_code/analysis/cluster_analysis/clustering/clustering.py')
data_path ='/home/rachael/coarsegraining/CG/active_learning/martini-assembly/dfmi/4_production' #folder where trajectory is
#trajectory should not have any water
#this can be done as follows:
#gmx trjconv -f after_eq.gro -o after_eq_whole.gro -pbc whole -s md.tpr
#choose protein
#gmx trjconv -f md.xtc -o md_whole.xtc -pbc whole -s md.tpr
#choose protein
#grompp -f md_dummy.mdp -c after_eq_whole.gro -p CG_dfmi_prot.top -o md_dummy.tpr
#where md_dummy is the same as the mdp file except with water removed, same
#for the topology file
def test_trajectory():
"""
Try running on an xtc trajectory (from a pull simulation)
"""
trj = op.join(data_path,'md_whole.xtc')
tpr = op.join(data_path,'md_dummy.tpr')
molno = 100
ats = 33
tstart = 0
ttotal = 4000
comIDs = COMS
cainds = CAINDS
oainds = OAINDS
cldict = {'contact':0.5*0.5,'optical':0.7*0.7}
start = time.time()
syst = cl.SnapSystem(trj,ats,molno,cldict,compairs=comIDs,
ttotal=ttotal,tstart=tstart,tpr=tpr)
end = time.time()
print("Time to setup: "+str(end-start)+"\n")
start = time.time()
syst.get_clusters_serial('contact',np.array([[24.25935],[24.25935],
[24.25935]]))
end = time.time()
print("Time to get contact: "+str(end-start)+"\n")
start = time.time()
syst.get_clusters_serial('optical',np.array([[24.25935],[24.25935],
[24.25935]]))
end = time.time()
print("Time to get optical: "+str(end-start)+"\n")
syst.writeCIDs('contact','contact-CIDs.dat')
syst.writeCIDs('optical','optical-CIDs.dat')
syst.writeSizes('contact','contact-sizes.dat')
syst.writeSizes('optical','optical-sizes.dat')
start = time.time()
syst.writeNNAngSpread('contact','nn-angspread-contact.dat',cainds)
syst.writeNNAngSpread('optical','nn-angspread-optical.dat',oainds)
end = time.time()
print("Time to get/write NN angle data: "+str(end-start)+"\n")
if __name__ == "__main__":
test_trajectory() | ramansbach/cluster_analysis | clustering/scripts/analyze_clusters_martini.py | Python | mit | 2,680 | [
"Gromacs"
] | 7d7e73a96429a90d63c9282e08e4a0c4416403e5559c4bf363a28833c0e413ed |
"""Perform validation of final calls against known reference materials.
Automates the process of checking pipeline results against known valid calls
to identify discordant variants. This provides a baseline for ensuring the
validity of pipeline updates and algorithm changes.
"""
import collections
import contextlib
import csv
import os
import shutil
import subprocess
import time
from pysam import VariantFile
import toolz as tz
import yaml
from bcbio import broad, utils
from bcbio.bam import callable
from bcbio.distributed.transaction import file_transaction
from bcbio.heterogeneity import bubbletree
from bcbio.pipeline import config_utils, shared
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import bedutils, validateplot, vcfutils, multi, naming
# ## Individual sample comparisons
def _get_validate(data):
"""Retrieve items to validate, from single samples or from combined joint calls.
"""
if data.get("vrn_file") and "validate" in data["config"]["algorithm"]:
return data
elif "group_orig" in data:
for sub in multi.get_orig_items(data):
if "validate" in sub["config"]["algorithm"]:
sub_val = utils.deepish_copy(sub)
sub_val["vrn_file"] = data["vrn_file"]
return sub_val
return None
def normalize_input_path(x, data):
"""Normalize path for input files, handling relative paths.
Looks for non-absolute paths in local and fastq directories
"""
if x is None:
return None
elif os.path.isabs(x):
return os.path.normpath(x)
else:
for d in [data["dirs"].get("fastq"), data["dirs"].get("work")]:
if d:
cur_x = os.path.normpath(os.path.join(d, x))
if os.path.exists(cur_x):
return cur_x
raise IOError("Could not find validation file %s" % x)
def _gunzip(f, data):
if f is None:
return None
elif f.endswith(".gz"):
out_file = f.replace(".gz", "")
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = "gunzip -c {f} > {tx_out_file}"
do.run(cmd.format(**locals()), "gunzip input file")
return out_file
else:
return f
def _get_caller(data):
callers = [tz.get_in(["config", "algorithm", "jointcaller"], data),
tz.get_in(["config", "algorithm", "variantcaller"], data),
"precalled"]
return [c for c in callers if c][0]
def _get_caller_supplement(caller, data):
"""Some callers like MuTect incorporate a second caller for indels.
"""
if caller == "mutect":
icaller = tz.get_in(["config", "algorithm", "indelcaller"], data)
if icaller:
caller = "%s/%s" % (caller, icaller)
return caller
def _normalize_cwl_inputs(items):
"""Extract variation and validation data from CWL input list of batched samples.
"""
with_validate = []
vrn_files = []
for data in items:
if tz.get_in(["config", "algorithm", "validate"], data):
with_validate.append(data)
if data.get("vrn_file"):
vrn_files.append(data["vrn_file"])
if len(with_validate) == 0:
return items[0]
else:
assert len(set([tz.get_in(["config", "algorithm", "validate"], data) for data in with_validate])) == 1
assert len(set(vrn_files)) == 1
data = with_validate[0]
data["vrn_file"] = vrn_files[0]
return data
def compare_to_rm(data):
"""Compare final variant calls against reference materials of known calls.
"""
if isinstance(data, (list, tuple)):
data = _normalize_cwl_inputs(data)
toval_data = _get_validate(data)
if toval_data:
caller = _get_caller(toval_data)
sample = dd.get_sample_name(toval_data)
base_dir = utils.safe_makedir(os.path.join(toval_data["dirs"]["work"], "validate", sample, caller))
if isinstance(toval_data["vrn_file"], (list, tuple)):
raise NotImplementedError("Multiple input files for validation: %s" % toval_data["vrn_file"])
else:
vrn_file = os.path.abspath(toval_data["vrn_file"])
rm_file = normalize_input_path(toval_data["config"]["algorithm"]["validate"], toval_data)
rm_interval_file = _gunzip(normalize_input_path(toval_data["config"]["algorithm"].get("validate_regions"),
toval_data),
toval_data)
rm_interval_file = bedutils.clean_file(rm_interval_file, toval_data,
bedprep_dir=utils.safe_makedir(os.path.join(base_dir, "bedprep")))
rm_file = naming.handle_synonyms(rm_file, dd.get_ref_file(data), data["genome_build"], base_dir, data)
rm_interval_file = (naming.handle_synonyms(rm_interval_file, dd.get_ref_file(data),
data["genome_build"], base_dir, data)
if rm_interval_file else None)
vmethod = tz.get_in(["config", "algorithm", "validate_method"], data, "rtg")
if not vcfutils.vcf_has_variants(vrn_file):
# RTG can fail on totally empty files. Skip these since we have nothing.
pass
elif vmethod == "rtg":
eval_files = _run_rtg_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data)
data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data)
elif vmethod == "bcbio.variation":
data["validate"] = _run_bcbio_variation(vrn_file, rm_file, rm_interval_file, base_dir,
sample, caller, toval_data)
return [[data]]
# ## Real Time Genomics vcfeval
def _get_sample_and_caller(data):
return [tz.get_in(["metadata", "validate_sample"], data) or dd.get_sample_name(data),
_get_caller_supplement(_get_caller(data), data)]
def _rtg_add_summary_file(eval_files, base_dir, data):
"""Parse output TP FP and FN files to generate metrics for plotting.
"""
out_file = os.path.join(base_dir, "validate-summary.csv")
if not utils.file_uptodate(out_file, eval_files["tp"]):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["sample", "caller", "vtype", "metric", "value"])
base = _get_sample_and_caller(data)
for metric in ["tp", "fp", "fn"]:
for vtype, bcftools_types in [("SNPs", "--types snps"),
("Indels", "--exclude-types snps")]:
in_file = eval_files[metric]
cmd = ("bcftools view {bcftools_types} {in_file} | grep -v ^# | wc -l")
count = int(subprocess.check_output(cmd.format(**locals()), shell=True))
writer.writerow(base + [vtype, metric, count])
eval_files["summary"] = out_file
return eval_files
def _run_rtg_eval(vrn_file, rm_file, rm_interval_file, base_dir, data):
"""Run evaluation of a caller against the truth set using rtg vcfeval.
"""
out_dir = os.path.join(base_dir, "rtg")
if not utils.file_exists(os.path.join(out_dir, "done")):
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
if not rm_file.endswith(".vcf.gz") or not os.path.exists(rm_file + ".tbi"):
rm_file = vcfutils.bgzip_and_index(rm_file, data["config"], out_dir=base_dir)
if len(vcfutils.get_samples(vrn_file)) > 1:
base, ext = utils.splitext_plus(os.path.basename(vrn_file))
sample_file = os.path.join(base_dir, "%s-%s%s" % (base, dd.get_sample_name(data), ext))
vrn_file = vcfutils.select_sample(vrn_file, dd.get_sample_name(data), sample_file, data["config"])
if not vrn_file.endswith(".vcf.gz") or not os.path.exists(vrn_file + ".tbi"):
vrn_file = vcfutils.bgzip_and_index(vrn_file, data["config"], out_dir=base_dir)
interval_bed = _get_merged_intervals(rm_interval_file, base_dir, data)
rtg_ref = tz.get_in(["reference", "rtg"], data)
assert rtg_ref and os.path.exists(rtg_ref), ("Did not find rtg indexed reference file for validation:\n%s\n"
"Run bcbio_nextgen.py upgrade --data --aligners rtg" % rtg_ref)
cmd = ["rtg", "vcfeval", "--threads", "6",
"-b", rm_file, "--bed-regions", interval_bed,
"-c", vrn_file, "-t", rtg_ref, "-o", out_dir]
cmd += ["--vcf-score-field='%s'" % (_pick_best_quality_score(vrn_file))]
cmd = "export RTG_JAVA_OPTS='-Xms1g' && export RTG_MEM=5g && " + " ".join(cmd)
do.run(cmd, "Validate calls using rtg vcfeval", data)
out = {"fp": os.path.join(out_dir, "fp.vcf.gz"),
"fn": os.path.join(out_dir, "fn.vcf.gz")}
tp_calls = os.path.join(out_dir, "tp.vcf.gz")
tp_baseline = os.path.join(out_dir, "tp-baseline.vcf.gz")
if os.path.exists(tp_baseline):
out["tp"] = tp_baseline
out["tp-calls"] = tp_calls
else:
out["tp"] = tp_calls
return out
def _pick_best_quality_score(vrn_file):
"""Flexible quality score selection, picking the best available.
Implementation based on discussion:
https://github.com/chapmanb/bcbio-nextgen/commit/a538cecd86c0000d17d3f9d4f8ac9d2da04f9884#commitcomment-14539249
(RTG=AVR/GATK=VQSLOD/MuTect=t_lod_fstar, otherwise GQ, otherwise QUAL, otherwise DP.)
For MuTect, it's not clear how to get t_lod_fstar, the right quality score, into VCF cleanly.
MuTect2 has TLOD in the INFO field.
"""
# pysam fails on checking reference contigs if input is empty
if not vcfutils.vcf_has_variants(vrn_file):
return "DP"
to_check = 25
scores = collections.defaultdict(int)
try:
in_handle = VariantFile(vrn_file)
except ValueError:
raise ValueError("Failed to parse input file in preparation for validation: %s" % vrn_file)
with contextlib.closing(in_handle) as val_in:
for i, rec in enumerate(val_in):
if i > to_check:
break
if rec.info.get("VQSLOD") is not None:
scores["INFO=VQSLOD"] += 1
if rec.info.get("TLOD") is not None:
scores["INFO=TLOD"] += 1
for skey in ["AVR", "GQ", "DP"]:
if rec.samples[0].get(skey) is not None:
scores[skey] += 1
if rec.qual:
scores["QUAL"] += 1
for key in ["AVR", "INFO=VQSLOD", "INFO=TLOD", "GQ", "QUAL", "DP"]:
if scores[key] > 0:
return key
raise ValueError("Did not find quality score for validation from %s" % vrn_file)
def _get_merged_intervals(rm_interval_file, base_dir, data):
"""Retrieve intervals to run validation on, merging reference and callable BED files.
"""
a_intervals = get_analysis_intervals(data)
if a_intervals:
final_intervals = shared.remove_lcr_regions(a_intervals, [data])
if rm_interval_file:
caller = _get_caller(data)
sample = dd.get_sample_name(data)
combo_intervals = os.path.join(base_dir, "%s-%s-%s-wrm.bed" %
(utils.splitext_plus(os.path.basename(final_intervals))[0],
sample, caller))
if not utils.file_uptodate(combo_intervals, final_intervals):
with file_transaction(data, combo_intervals) as tx_out_file:
with utils.chdir(os.path.dirname(tx_out_file)):
# Copy files locally to avoid issues on shared filesystems
# where BEDtools has trouble accessing the same base
# files from multiple locations
a = os.path.basename(final_intervals)
b = os.path.basename(rm_interval_file)
try:
shutil.copyfile(final_intervals, a)
except IOError:
time.sleep(60)
shutil.copyfile(final_intervals, a)
try:
shutil.copyfile(rm_interval_file, b)
except IOError:
time.sleep(60)
shutil.copyfile(rm_interval_file, b)
cmd = ("bedtools intersect -nonamecheck -a {a} -b {b} > {tx_out_file}")
do.run(cmd.format(**locals()), "Intersect callable intervals for rtg vcfeval")
final_intervals = combo_intervals
else:
assert rm_interval_file, "No intervals to subset analysis with"
final_intervals = shared.remove_lcr_regions(rm_interval_file, [data])
return final_intervals
def get_analysis_intervals(data):
"""Retrieve analysis regions for the current variant calling pipeline.
"""
if data.get("ensemble_bed"):
return data["ensemble_bed"]
elif dd.get_callable_regions(data):
return dd.get_callable_regions(data)
elif data.get("align_bam"):
return callable.sample_callable_bed(data["align_bam"], dd.get_ref_file(data), data)
elif data.get("work_bam"):
return callable.sample_callable_bed(data["work_bam"], dd.get_ref_file(data), data)
elif data.get("work_bam_callable"):
return callable.sample_callable_bed(data["work_bam_callable"], dd.get_ref_file(data), data)
elif tz.get_in(["config", "algorithm", "callable_regions"], data):
return tz.get_in(["config", "algorithm", "callable_regions"], data)
elif tz.get_in(["config", "algorithm", "variant_regions"], data):
return tz.get_in(["config", "algorithm", "variant_regions"], data)
# ## bcbio.variation comparison -- deprecated approach
def _run_bcbio_variation(vrn_file, rm_file, rm_interval_file, base_dir, sample, caller, data):
"""Run validation of a caller against the truth set using bcbio.variation.
"""
val_config_file = _create_validate_config_file(vrn_file, rm_file, rm_interval_file,
base_dir, data)
work_dir = os.path.join(base_dir, "work")
out = {"summary": os.path.join(work_dir, "validate-summary.csv"),
"grading": os.path.join(work_dir, "validate-grading.yaml"),
"discordant": os.path.join(work_dir, "%s-eval-ref-discordance-annotate.vcf" % sample)}
if not utils.file_exists(out["discordant"]) or not utils.file_exists(out["grading"]):
bcbio_variation_comparison(val_config_file, base_dir, data)
out["concordant"] = filter(os.path.exists,
[os.path.join(work_dir, "%s-%s-concordance.vcf" % (sample, x))
for x in ["eval-ref", "ref-eval"]])[0]
return out
def bcbio_variation_comparison(config_file, base_dir, data):
"""Run a variant comparison using the bcbio.variation toolkit, given an input configuration.
"""
tmp_dir = utils.safe_makedir(os.path.join(base_dir, "tmp"))
resources = config_utils.get_resources("bcbio_variation", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"])
cmd = ["bcbio-variation"] + jvm_opts + broad.get_default_jvm_opts(tmp_dir) + \
["variant-compare", config_file]
do.run(cmd, "Comparing variant calls using bcbio.variation", data)
def _create_validate_config_file(vrn_file, rm_file, rm_interval_file,
base_dir, data):
config_dir = utils.safe_makedir(os.path.join(base_dir, "config"))
config_file = os.path.join(config_dir, "validate.yaml")
if not utils.file_uptodate(config_file, vrn_file):
with file_transaction(data, config_file) as tx_config_file:
with open(tx_config_file, "w") as out_handle:
out = _create_validate_config(vrn_file, rm_file, rm_interval_file,
base_dir, data)
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
return config_file
def _create_validate_config(vrn_file, rm_file, rm_interval_file, base_dir, data):
"""Create a bcbio.variation configuration input for validation.
"""
ref_call = {"file": str(rm_file), "name": "ref", "type": "grading-ref",
"fix-sample-header": True, "remove-refcalls": True}
a_intervals = get_analysis_intervals(data)
if a_intervals:
a_intervals = shared.remove_lcr_regions(a_intervals, [data])
if rm_interval_file:
ref_call["intervals"] = rm_interval_file
eval_call = {"file": vrn_file, "name": "eval", "remove-refcalls": True}
exp = {"sample": data["name"][-1],
"ref": dd.get_ref_file(data),
"approach": "grade",
"calls": [ref_call, eval_call]}
if a_intervals:
exp["intervals"] = os.path.abspath(a_intervals)
if data.get("align_bam"):
exp["align"] = data["align_bam"]
elif data.get("work_bam"):
exp["align"] = data["work_bam"]
return {"dir": {"base": base_dir, "out": "work", "prep": "work/prep"},
"experiments": [exp]}
# ## Summarize comparisons
def _flatten_grading(stats):
vtypes = ["snp", "indel"]
cat = "concordant"
for vtype in vtypes:
yield vtype, cat, stats[cat][cat].get(vtype, 0)
for vtype in vtypes:
for vclass, vitems in sorted(stats["discordant"].get(vtype, {}).iteritems()):
for vreason, val in sorted(vitems.iteritems()):
yield vtype, "discordant-%s-%s" % (vclass, vreason), val
yield vtype, "discordant-%s-total" % vclass, sum(vitems.itervalues())
def _has_grading_info(samples):
for data in (x[0] for x in samples):
for variant in data.get("variants", []):
if variant.get("validate"):
return True
return False
def _group_validate_samples(samples):
extras = []
validated = collections.defaultdict(list)
for data in (x[0] for x in samples):
is_v = False
for variant in data.get("variants", []):
if variant.get("validate"):
is_v = True
if is_v:
for batch_key in (["metadata", "validate_batch"], ["metadata", "batch"],
["description"]):
vname = tz.get_in(batch_key, data)
if vname:
break
if isinstance(vname, (list, tuple)):
vname = vname[0]
validated[vname].append(data)
else:
extras.append([data])
return validated, extras
def summarize_grading(samples):
"""Provide summaries of grading results across all samples.
"""
if not _has_grading_info(samples):
return samples
validate_dir = utils.safe_makedir(os.path.join(samples[0][0]["dirs"]["work"], "validate"))
header = ["sample", "caller", "variant.type", "category", "value"]
validated, out = _group_validate_samples(samples)
for vname, vitems in validated.iteritems():
out_csv = os.path.join(validate_dir, "grading-summary-%s.csv" % vname)
with open(out_csv, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(header)
plot_data = []
plot_files = []
for data in sorted(vitems, key=lambda x: x.get("lane", dd.get_sample_name(x))):
for variant in data.get("variants", []):
if variant.get("validate"):
variant["validate"]["grading_summary"] = out_csv
if tz.get_in(["validate", "grading"], variant):
for row in _get_validate_plotdata_yaml(variant, data):
writer.writerow(row)
plot_data.append(row)
else:
plot_files.append(variant["validate"]["summary"])
if plot_files:
plots = validateplot.classifyplot_from_plotfiles(plot_files, out_csv)
elif plot_data:
plots = validateplot.create(plot_data, header, 0, data["config"],
os.path.splitext(out_csv)[0])
else:
plots = None
for data in vitems:
for variant in data.get("variants", []):
if variant.get("validate"):
variant["validate"]["grading_plots"] = plots
out.append([data])
return out
def _get_validate_plotdata_yaml(variant, data):
"""Retrieve validation plot data from grading YAML file (old style).
"""
with open(variant["validate"]["grading"]) as in_handle:
grade_stats = yaml.load(in_handle)
for sample_stats in grade_stats:
sample = sample_stats["sample"]
for vtype, cat, val in _flatten_grading(sample_stats):
yield [sample, variant.get("variantcaller", ""),
vtype, cat, val]
# ## Summarize by frequency
def freq_summary(val_file, call_file, truth_file, target_name):
"""Summarize true and false positive calls by variant type and frequency.
Resolve differences in true/false calls based on output from hap.py:
https://github.com/sequencing/hap.py
"""
out_file = "%s-freqs.csv" % utils.splitext_plus(val_file)[0]
truth_freqs = _read_truth_freqs(truth_file)
call_freqs = _read_call_freqs(call_file, target_name)
with VariantFile(val_file) as val_in:
with open(out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["vtype", "valclass", "freq"])
for rec in val_in:
call_type = _classify_rec(rec)
val_type = _get_validation_status(rec)
key = _get_key(rec)
freq = truth_freqs.get(key, call_freqs.get(key, 0.0))
writer.writerow([call_type, val_type, freq])
return out_file
def _get_key(rec):
return (rec.contig, rec.pos, rec.ref, rec.alts[0])
def _classify_rec(rec):
"""Determine class of variant in the record.
"""
if max([len(x) for x in rec.alleles]) == 1:
return "snp"
else:
return "indel"
def _get_validation_status(rec):
"""Retrieve the status of the validation, supporting hap.py output
"""
return rec.info["type"]
def _read_call_freqs(in_file, sample_name):
"""Identify frequencies for calls in the input file.
"""
out = {}
with VariantFile(in_file) as call_in:
for rec in call_in:
if rec.filter.keys() == ["PASS"]:
for name, sample in rec.samples.items():
if name == sample_name:
alt, depth = bubbletree.sample_alt_and_depth(sample)
if depth > 0:
out[_get_key(rec)] = float(alt) / float(depth)
return out
def _read_truth_freqs(in_file):
"""Read frequency of calls from truth VCF.
Currently handles DREAM data, needs generalization for other datasets.
"""
out = {}
with VariantFile(in_file) as bcf_in:
for rec in bcf_in:
freq = float(rec.info.get("VAF", 1.0))
out[_get_key(rec)] = freq
return out
| Cyberbio-Lab/bcbio-nextgen | bcbio/variation/validate.py | Python | mit | 23,539 | [
"pysam"
] | fec0d957113c4f35c5aae48b70094c0b059ed570dbb78d323c5c405baa29db05 |
"""
Defines Configuration class for representation of molecular
structure at one particular point in time.
Part of raman package.
Copyright Sean McGrath 2015. Issued under the MIT License.
"""
from .spectrum import Spectrum
from .matrix import DistanceMatrix
class Configuration:
"""
A single molecular Configuration - associates calculated spectra
with a DistanceMatrix at a single point in time and temperature.
"""
def __init__(self, matrix, raman_spectrum=None, ir_spectrum=None, time=None, temperature=None):
"""
Constructor.
:param raman_spectrum: a raman.Spectrum
:param ir_spectrum: a raman.Spectrum
:param matrix: a raman.DistanceMatrix
:param time: a string describing the time the Configuration was captured.
:param temperature: the temperature of the Configuration.
"""
self.raman_spectrum = raman_spectrum
self.ir_spectrum = ir_spectrum
self.matrix = matrix
self.time = time
self.temperature = temperature
def __len__(self):
return len(self.matrix)
def __str__(self):
string = 'Configuration with {} atoms'.format(len(self))
if self.temperature:
string += ' at {}'.format(self.temperature)
if self.time:
string += ' at {}'.format(self.time)
return string
@staticmethod
def from_log_file(filename, time=None, temp=None):
"""
Create a Configuration from the information
in a Gaussian .log file.
:param filename: path to a .log file generated by Gaussian.
"""
raman = Spectrum.from_log_file(filename, type='raman')
ir = Spectrum.from_log_file(filename, type='ir')
matrix = DistanceMatrix.from_log_file(filename)
return Configuration(matrix, raman, ir, time, temp)
| SeanMcGrath/raman | gparse/configuration.py | Python | mit | 1,862 | [
"Gaussian"
] | 8772674828caa39dc1f230c49f5ca0c36fd966917c00bb7c686a93ac2e10a6b4 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import time
import warnings
from . import basex
from . import dasch
from . import direct
from . import hansenlaw
from . import linbasex
from . import onion_bordas
from . import tools
class Transform(object):
"""Abel transform image class.
This class provides whole image forward and inverse Abel
transformations, together with preprocessing (centering, symmetrizing)
and post processing (integration) functions.
The following class attributes are available, depending on the calculation.
Returns
-------
transform : numpy 2D array
the 2D forward/reverse Abel transform.
angular_integration : tuple
(radial-grid, radial-intensity)
radial coordinates, and the radial intensity (speed) distribution,
evaluated using :func:`abel.tools.vmi.angular_integration()`.
residual : numpy 2D array
residual image (not currently implemented).
IM: numpy 2D array
the input image, re-centered (optional) with an odd-size width.
method : str
transform method, as specified by the input option.
direction : str
transform direction, as specified by the input option.
Beta : numpy 2D array
with ``linbasex`` :func:`transform_options=dict(return_Beta=True)`
Beta array coefficients of Newton sphere spherical harmonics
Beta[0] - the radial intensity variation
Beta[1] - the anisotropy parameter variation
...Beta[n] - higher order terms up to `legedre_orders = [0, ..., n]`
radial : numpy 1d array
with ``linbasex`` :func:`transform_options=dict(return_Beta=True)`
radial-grid for Beta array
projection :
with ``linbasex`` :func:`transform_options=dict(return_Beta=True)`
radial projection profiles at angles `proj_angles`
"""
_verbose = False
def __init__(self, IM,
direction='inverse', method='three_point', center='none',
symmetry_axis=None, use_quadrants=(True, True, True, True),
symmetrize_method='average', angular_integration=False,
transform_options=dict(), center_options=dict(),
angular_integration_options=dict(),
recast_as_float64=True, verbose=False):
"""The one stop transform function.
Parameters
----------
IM : a NxM numpy array
This is the image to be transformed
direction : str
The type of Abel transform to be performed.
``forward``
A 'forward' Abel transform takes a (2D) slice of a 3D image
and returns the 2D projection.
``inverse``
An 'inverse' Abel transform takes a 2D projection
and reconstructs a 2D slice of the 3D image.
The default is ``inverse``.
method : str
specifies which numerical approximation to the Abel transform
should be employed (see below). The options are
``hansenlaw``
the recursive algorithm described by Hansen and Law.
``basex``
the Gaussian "basis set expansion" method
of Dribinski et al.
``direct``
a naive implementation of the analytical
formula by Roman Yurchuk.
``two_point``
the two-point transform of Dasch (1992).
``three_point``
the three-point transform of Dasch (1992).
``onion_bordas``
the algorithm of Bordas and co-workers (1996),
re-implemented by Rallis, Wells and co-workers (2014).
``onion_peeling``
the onion peeling deconvolution as described by
Dasch (1992).
``linbasex``
the 1d-projections of VM-images in terms of 1d
spherical functions by Gerber et al. (2013).
center : tuple or str
If a tuple (float, float) is provided, this specifies
the image center in (y,x) (row, column) format.
A value `None` can be supplied
if no centering is desired in one dimension,
for example 'center=(None, 250)'.
If a string is provided, an automatic centering algorithm is used
``image_center``
center is assumed to be the center of the image.
``convolution``
center the image by convolution of two projections along each axis.
``slice``
the center is found my comparing slices in the horizontal and
vertical directions
``com``
the center is calculated as the center of mass
``gaussian``
the center is found using a fit to a Gaussian function. This
only makes sense if your data looks like a Gaussian.
``none``
(Default)
No centering is performed. An image with an odd
number of columns must be provided.
symmetry_axis : None, int or tuple
Symmetrize the image about the numpy axis
0 (vertical), 1 (horizontal), (0,1) (both axes)
use_quadrants : tuple of 4 booleans
select quadrants to be used in the analysis: (Q0,Q1,Q2,Q3).
Quadrants are numbered counter-clockwide from upper right.
See note below for description of quadrants.
Default is ``(True, True, True, True)``, which uses all quadrants.
symmetrize_method: str
Method used for symmetrizing the image.
``average``
average the quadrants, in accordance with the `symmetry_axis`
``fourier``
axial symmetry implies that the Fourier components of the 2-D
projection should be real. Removing the imaginary components
in reciprocal space leaves a symmetric projection.
ref: Overstreet, K., et al.
"Multiple scattering and the density distribution of a Cs MOT."
Optics express 13.24 (2005): 9672-9682.
http://dx.doi.org/10.1364/OPEX.13.009672
angular_integration: boolean
integrate the image over angle to give the radial (speed) intensity
distribution
transform_options : tuple
Additional arguments passed to the individual transform functions.
See the documentation for the individual transform method for options.
center_options : tuple
Additional arguments to be passed to the centering function.
angular_integration_options : tuple (or dict)
Additional arguments passed to the angular_integration transform
functions. See the documentation for angular_integration for options.
recast_as_float64 : boolean
True/False that determines if the input image should be recast to
``float64``. Many images are imported in other formats (such as
``uint8`` or ``uint16``) and this does not always play well with the
transorm algorithms. This should probably always be set to True.
(Default is True.)
verbose : boolean
True/False to determine if non-critical output should be printed.
.. note:: Quadrant combining
The quadrants can be combined (averaged) using the
``use_quadrants`` keyword in order to provide better data quality.
The quadrants are numbered starting from
Q0 in the upper right and proceeding counter-clockwise: ::
+--------+--------+
| Q1 * | * Q0 |
| * | * |
| * | * | AQ1 | AQ0
+--------o--------+ --(inverse Abel transform)--> ----o----
| * | * | AQ2 | AQ3
| * | * |
| Q2 * | * Q3 | AQi == inverse Abel transform
+--------+--------+ of quadrant Qi
Three cases are possible:
1) symmetry_axis = 0 (vertical): ::
Combine: Q01 = Q0 + Q1, Q23 = Q2 + Q3
inverse image AQ01 | AQ01
-----o----- (left and right sides equivalent)
AQ23 | AQ23
2) symmetry_axis = 1 (horizontal): ::
Combine: Q12 = Q1 + Q2, Q03 = Q0 + Q3
inverse image AQ12 | AQ03
-----o----- (top and bottom equivalent)
AQ12 | AQ03
3) symmetry_axis = (0, 1) (both): ::
Combine: Q = Q0 + Q1 + Q2 + Q3
inverse image AQ | AQ
---o--- (all quadrants equivalent)
AQ | AQ
Notes
-----
As mentioned above, PyAbel offers several different approximations
to the the exact abel transform.
All the the methods should produce similar results, but
depending on the level and type of noise found in the image,
certain methods may perform better than others. Please see the
"Transform Methods" section of the documentation for complete information.
``hansenlaw``
This "recursive algorithm" produces reliable results
and is quite fast (~0.1 sec for a 1001x1001 image).
It makes no assumptions about the data
(apart from cylindrical symmetry). It tends to require that the data
is finely sampled for good convergence.
E. W. Hansen and P.-L. Law "Recursive methods for computing
the Abel transform and its inverse"
J. Opt. Soc. A*2, 510-520 (1985)
http://dx.doi.org/10.1364/JOSAA.2.000510
``basex`` *
The "basis set exapansion" algorithm describes the data in terms
of gaussian functions, which themselves can be abel transformed
analytically. Because the gaussian functions are approximately the
size of each pixel, this method also does not make any assumption
about the shape of the data. This method is one of the de-facto
standards in photoelectron/photoion imaging.
Dribinski et al, 2002 (Rev. Sci. Instrum. 73, 2634)
http://dx.doi.org/10.1063/1.1482156
``direct``
This method attempts a direct integration of the Abel
transform integral. It makes no assumptions about the data
(apart from cylindrical symmetry),
but it typically requires fine sampling to converge.
Such methods are typically inefficient,
but thanks to this Cython implementation (by Roman Yurchuk),
this 'direct' method is competitive with the other methods.
``linbasex`` *
VM-images are composed of projected Newton spheres with a common
centre. The 2D images are usually evaluated by a decomposition into
base vectors each representing the 2D projection of a set of
particles starting from a centre with a specific velocity
distribution. `linbasex` evaluate 1D projections of VM-images in
terms of 1D projections of spherical functions, instead.
..Rev. Sci. Instrum. 84, 033101 (2013): <http://scitation.aip.org/content/aip/journal/rsi/84/3/10.1063/1.4793404>
``onion_bordas``
The onion peeling method, also known as "back projection",
originates from Bordas *et al.* `Rev. Sci. Instrum. 67, 2257 (1996)`_.
.. _Rev. Sci. Instrum. 67, 2257 (1996): <http://scitation.aip.org/content/aip/journal/rsi/67/6/10.1063/1.1147044>
The algorithm was subsequently coded in MatLab by Rallis, Wells and co-workers, `Rev. Sci. Instrum. 85, 113105 (2014)`_.
.. _Rev. Sci. Instrum. 85, 113105 (2014): <http://scitation.aip.org/content/aip/journal/rsi/85/11/10.1063/1.4899267>
which was used as the basis of this Python port. See issue `#56`_.
.. _#56: <https://github.com/PyAbel/PyAbel/issues/56>
``onion_peeling`` *
This is one of the most compact and fast algorithms, with the
inverse Abel transfrom achieved in one Python code-line, PR #155.
See also ``three_point`` is the onion peeling algorithm as
described by Dasch (1992), reference below.
``two_point`` *
Another Dasch method. Simple, and fast, but not as accurate as the
other methods.
``three_point`` *
The "Three Point" Abel transform method
exploits the observation that the value of the Abel inverted data
at any radial position r is primarily determined from changes
in the projection data in the neighborhood of r.
This method is also very efficient
once it has generated the basis sets.
Dasch, 1992 (Applied Optics, Vol 31, No 8, March 1992, Pg 1146-1152).
``*``
The methods marked with a * indicate methods that generate basis sets.
The first time they are run for a new image size,
it takes seconds to minutes to generate the basis set.
However, this basis set is saved to disk can can be reloaded,
meaning that future transforms are performed
much more quickly.
"""
# public class variables
self.IM = IM # (optionally) centered, odd-width image
self.method = method
self.direction = direction
# private internal variables
self._symmetry_axis = symmetry_axis
self._symmetrize_method = symmetrize_method
self._use_quadrants = use_quadrants
self._recast_as_float64 = recast_as_float64
_verbose = verbose
# image processing
self._verify_some_inputs()
self._center_image(center, **center_options)
self._abel_transform_image(**transform_options)
self._integration(angular_integration, transform_options,
**angular_integration_options)
# end of class instance
_verboseprint = print if _verbose else lambda *a, **k: None
def _verify_some_inputs(self):
if self.IM.ndim == 1 or np.shape(self.IM)[0] <= 2:
raise ValueError('Data must be 2-dimensional. \
To transform a single row \
use the individual transform function.')
if not np.any(self._use_quadrants):
raise ValueError('No image quadrants selected to use')
if not isinstance(self._symmetry_axis, (list, tuple)):
# if the user supplies an int, make it into a 1-element list:
self._symmetry_axis = [self._symmetry_axis]
if self._recast_as_float64:
self.IM = self.IM.astype('float64')
def _center_image(self, center, **center_options):
if center != "none":
self.IM = tools.center.center_image(self.IM, center,
**center_options)
def _abel_transform_image(self, **transform_options):
if self.method == "linbasex" and self._symmetry_axis is not None:
self._abel_transform_image_full(**transform_options)
else:
self._abel_transform_image_by_quadrant(**transform_options)
def _abel_transform_image_full(self, **transform_options):
abel_transform = {
# "basex": basex.basex_transform,
"linbasex": linbasex.linbasex_transform_full
}
t0 = time.time()
self._verboseprint('Calculating {0} Abel transform using {1} method -'
.format(self.direction, self.method),
'\n image size: {:d}x{:d}'.format(*self.IM.shape))
self.transform, radial, Beta, QLz = abel_transform[self.method](self.IM,
**transform_options)
self._verboseprint("{:.2f} seconds".format(time.time()-t0))
self.Beta = Beta
self.projection = QLz
self.radial = radial
def _abel_transform_image_by_quadrant(self, **transform_options):
abel_transform = {
"basex": basex.basex_transform,
"direct": direct.direct_transform,
"hansenlaw": hansenlaw.hansenlaw_transform,
"linbasex": linbasex.linbasex_transform,
"onion_bordas": onion_bordas.onion_bordas_transform,
"onion_peeling": dasch.onion_peeling_transform,
"two_point": dasch.two_point_transform,
"three_point": dasch.three_point_transform,
}
self._verboseprint('Calculating {0} Abel transform using {1} method -'
.format(self.direction, self.method),
'\n image size: {:d}x{:d}'.format(*self.IM.shape))
t0 = time.time()
# split image into quadrants
Q0, Q1, Q2, Q3 = tools.symmetry.get_image_quadrants(
self.IM, reorient=True,
symmetry_axis=self._symmetry_axis,
symmetrize_method=self._symmetrize_method)
def selected_transform(Z):
return abel_transform[self.method](Z, direction=self.direction,
**transform_options)
AQ0 = AQ1 = AQ2 = AQ3 = None
# Inverse Abel transform for quadrant 1 (all include Q1)
AQ1 = selected_transform(Q1)
if 0 in self._symmetry_axis:
AQ2 = selected_transform(Q2)
if 1 in self._symmetry_axis:
AQ0 = selected_transform(Q0)
if None in self._symmetry_axis:
AQ0 = selected_transform(Q0)
AQ2 = selected_transform(Q2)
AQ3 = selected_transform(Q3)
if self.method == "linbasex" and\
"return_Beta" in transform_options.keys():
# linbasex evaluates speed and anisotropy parameters
# AQi == AIM, R, Beta, QLz
Beta0 = AQ0[2]
Beta1 = AQ1[2]
Beta2 = AQ2[2]
Beta3 = AQ3[2]
# rconstructed images of each quadrant
AQ0 = AQ0[0]
AQ1 = AQ1[0]
AQ2 = AQ2[0]
AQ3 = AQ3[0]
# speed
self.linbasex_angular_integration = self.Beta[0]\
(Beta0[0] + Beta1[0] + Beta2[0] + Beta3[0])/4
# anisotropy
self.linbasex_anisotropy_parameter = self.Beta[1]\
(Beta0[1] + Beta1[1] + Beta2[1] + Beta3[1])/4
# reassemble image
self.transform = tools.symmetry.put_image_quadrants(
(AQ0, AQ1, AQ2, AQ3),
original_image_shape=self.IM.shape,
symmetry_axis=self._symmetry_axis)
self._verboseprint("{:.2f} seconds".format(time.time()-t0))
def _integration(self, angular_integration, transform_options,
**angular_integration_options):
if angular_integration:
if 'dr' in transform_options and\
'dr' not in angular_integration_options:
# assume user forgot to pass grid size
angular_integration_options['dr'] = transform_options['dr']
self.angular_integration = tools.vmi.angular_integration(
self.transform,
**angular_integration_options)
| stggh/PyAbel | abel/transform.py | Python | mit | 20,437 | [
"Gaussian"
] | da364e3c23abe211664a64eae2e653e1a3b86eafa3c6b250064b47269530a8ca |
from aiida import load_dbenv
try:
load_dbenv()
from aiida.orm import load_node, load_workflow
from aiida.orm import Code, DataFactory
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
ArrayData = DataFactory('array')
KpointsData = DataFactory('array.kpoints')
except:
pass
import numpy as np
def plot_data(bs):
import matplotlib.pyplot as plt
for i, freq in enumerate(bs.get_array('frequencies')):
plt.plot(bs.get_array('q_path')[i], freq, color='r')
plt.figure(1)
plt.axes().get_xaxis().set_ticks([])
plt.ylabel('Frequency [THz]')
plt.xlabel('Wave vector')
plt.xlim([0, bs.get_array('q_path')[-1][-1]])
plt.axhline(y=0, color='k', ls='dashed')
plt.suptitle('Phonon band structure')
if 'labels' in bs.get_arraynames():
plt.rcParams.update({'mathtext.default': 'regular'})
labels = bs.get_array('labels')
labels_e = []
x_labels = []
for i, freq in enumerate(bs.get_array('q_path')):
if labels[i][0] == labels[i - 1][1]:
labels_e.append('$' + labels[i][0].replace('GAMMA', '\Gamma') + '$')
else:
labels_e.append('$' + labels[i - 1][1].replace('GAMMA', '\Gamma') + '/' + labels[i][0].replace('GAMMA',
'\Gamma') + '$')
x_labels.append(bs.get_array('q_path')[i][0])
x_labels.append(bs.get_array('q_path')[-1][-1])
labels_e.append('$' + labels[-1][1].replace('GAMMA', '\Gamma') + '$')
labels_e[0] = '$' + labels[0][0].replace('GAMMA', '\Gamma') + '$'
plt.xticks(x_labels, labels_e, rotation='horizontal')
# plt.show()
# Phonon density of states
dos = wf.get_result('dos')
frequency = dos.get_array('frequency')
total_dos = dos.get_array('total_dos')
partial_dos = dos.get_array('partial_dos')
partial_symbols = dos.get_array('partial_symbols')
# Check atom equivalencies
delete_list = []
for i, dos_i in enumerate(partial_dos):
for j, dos_j in enumerate(partial_dos):
if i < j:
if np.allclose(dos_i, dos_j) and partial_symbols[i] == partial_symbols[j]:
dos_i += dos_j
delete_list.append(j)
partial_dos = np.delete(partial_dos, delete_list, 0)
partial_symbols = np.delete(partial_symbols, delete_list)
# print partial_dos
# print partial_symbols
plt.figure(2)
plt.suptitle('Phonon density of states')
plt.ylabel('Density')
plt.xlabel('Frequency [THz]')
plt.ylim([0, np.max(total_dos) * 1.1])
plt.plot(frequency, total_dos, label='Total DOS')
for i, dos in enumerate(partial_dos):
plt.plot(frequency, dos, label='{}'.format(partial_symbols[i]))
plt.legend()
# plt.show()
# Termal properties
thermal = wf.get_result('thermal_properties')
free_energy = thermal.get_array('free_energy')
entropy = thermal.get_array('entropy')
temperature = thermal.get_array('temperature')
cv = thermal.get_array('cv')
plt.figure(3)
plt.xlabel('Temperature [K]')
plt.suptitle('Thermal properties')
plt.plot(temperature, free_energy, label='Free energy (KJ/mol)')
plt.plot(temperature, entropy, label='entropy (KJ/mol)')
plt.plot(temperature, cv, label='Cv (J/mol)')
plt.legend()
plt.show()
def get_path_using_seekpath(structure, band_resolution=30):
import seekpath
cell = structure.cell
positions = [site.position for site in structure.sites]
scaled_positions = np.dot(positions, np.linalg.inv(cell))
numbers = np.unique([site.kind_name for site in structure.sites], return_inverse=True)[1]
structure2 = (cell, scaled_positions, numbers)
path_data = seekpath.get_path(structure2)
labels = path_data['point_coords']
band_ranges = []
for set in path_data['path']:
band_ranges.append([labels[set[0]], labels[set[1]]])
bands =[]
for q_start, q_end in band_ranges:
band = []
for i in range(band_resolution+1):
band.append(np.array(q_start) + (np.array(q_end) - np.array(q_start)) / band_resolution * i)
bands.append(band)
return {'ranges': bands,
'labels': path_data['path']}
def phonopy_calculation_inline(**kwargs):
from phonopy.structure.atoms import Atoms as PhonopyAtoms
from phonopy import Phonopy
structure = kwargs.pop('structure')
phonopy_input = kwargs.pop('phonopy_input').get_dict()
force_constants = kwargs.pop('force_constants').get_array('force_constants')
bands = get_path_using_seekpath(structure)
# Generate phonopy phonon object
bulk = PhonopyAtoms(symbols=[site.kind_name for site in structure.sites],
positions=[site.position for site in structure.sites],
cell=structure.cell)
phonon = Phonopy(bulk,
phonopy_input['supercell'],
primitive_matrix=phonopy_input['primitive'],
distance=phonopy_input['distance'])
phonon.set_force_constants(force_constants)
# Normalization factor primitive to unit cell
normalization_factor = phonon.unitcell.get_number_of_atoms() / phonon.primitive.get_number_of_atoms()
phonon.set_band_structure(bands['ranges'])
phonon.set_mesh(phonopy_input['mesh'], is_eigenvectors=True, is_mesh_symmetry=False)
phonon.set_total_DOS(tetrahedron_method=True)
phonon.set_partial_DOS(tetrahedron_method=True)
# get band structure
band_structure_phonopy = phonon.get_band_structure()
q_points = np.array(band_structure_phonopy[0])
q_path = np.array(band_structure_phonopy[1])
frequencies = np.array(band_structure_phonopy[2])
band_labels = np.array(bands['labels'])
# stores band structure
band_structure = ArrayData()
band_structure.set_array('q_points', q_points)
band_structure.set_array('q_path', q_path)
band_structure.set_array('frequencies', frequencies)
band_structure.set_array('labels', band_labels)
# get DOS (normalized to unit cell)
total_dos = phonon.get_total_DOS() * normalization_factor
partial_dos = phonon.get_partial_DOS() * normalization_factor
# Stores DOS data in DB as a workflow result
dos = ArrayData()
dos.set_array('frequency', total_dos[0])
dos.set_array('total_dos', total_dos[1])
dos.set_array('partial_dos', partial_dos[1])
dos.set_array('partial_symbols', np.array(phonon.primitive.symbols))
# THERMAL PROPERTIES (per primtive cell)
phonon.set_thermal_properties()
t, free_energy, entropy, cv = phonon.get_thermal_properties()
# Stores thermal properties (per unit cell) data in DB as a workflow result
thermal_properties = ArrayData()
thermal_properties.set_array('temperature', t)
thermal_properties.set_array('free_energy', free_energy * normalization_factor)
thermal_properties.set_array('entropy', entropy * normalization_factor)
thermal_properties.set_array('cv', cv * normalization_factor)
return {'thermal_properties': thermal_properties, 'dos': dos, 'band_structure': band_structure}
def phonopy_commensurate_inline(**kwargs):
from phonopy.structure.atoms import Atoms as PhonopyAtoms
from phonopy import Phonopy
from phonopy.units import VaspToTHz
from phonopy.harmonic.dynmat_to_fc import get_commensurate_points, DynmatToForceConstants
structure = kwargs.pop('structure')
phonopy_input = kwargs.pop('phonopy_input').get_dict()
force_constants = kwargs.pop('force_constants').get_array('force_constants')
# Generate phonopy phonon object
bulk = PhonopyAtoms(symbols=[site.kind_name for site in structure.sites],
positions=[site.position for site in structure.sites],
cell=structure.cell)
phonon = Phonopy(bulk,
phonopy_input['supercell'],
primitive_matrix=phonopy_input['primitive'],
distance=phonopy_input['distance'])
phonon.set_force_constants(force_constants)
primitive = phonon.get_primitive()
supercell = phonon.get_supercell()
dynmat2fc = DynmatToForceConstants(primitive, supercell)
com_points = dynmat2fc.get_commensurate_points()
phonon.set_qpoints_phonon(com_points,
is_eigenvectors=True)
frequencies, eigenvectors = phonon.get_qpoints_phonon()
# Stores DOS data in DB as a workflow result
commensurate = ArrayData()
commensurate.set_array('qpoints', com_points)
commensurate.set_array('frequencies', frequencies)
commensurate.set_array('eigenvectors', eigenvectors)
return {'commensurate': commensurate}
def phonopy_commensurate_shifts_inline(**kwargs):
from phonopy.structure.atoms import Atoms as PhonopyAtoms
from phonopy import Phonopy
from phonopy.harmonic.dynmat_to_fc import get_commensurate_points, DynmatToForceConstants
structure = kwargs.pop('structure')
phonopy_input = kwargs.pop('phonopy_input').get_dict()
force_constants = kwargs.pop('force_constants').get_array('force_constants')
r_force_constants = kwargs.pop('r_force_constants').get_array('force_constants')
# Generate phonopy phonon object
bulk = PhonopyAtoms(symbols=[site.kind_name for site in structure.sites],
positions=[site.position for site in structure.sites],
cell=structure.cell)
phonon = Phonopy(bulk,
phonopy_input['supercell'],
primitive_matrix=phonopy_input['primitive'],
distance=phonopy_input['distance'])
primitive = phonon.get_primitive()
supercell = phonon.get_supercell()
phonon.set_force_constants(force_constants)
dynmat2fc = DynmatToForceConstants(primitive, supercell)
com_points = dynmat2fc.get_commensurate_points()
phonon.set_qpoints_phonon(com_points,
is_eigenvectors=True)
frequencies_h = phonon.get_qpoints_phonon()[0]
phonon.set_force_constants(r_force_constants)
phonon.set_qpoints_phonon(com_points,
is_eigenvectors=True)
frequencies_r = phonon.get_qpoints_phonon()[0]
shifts = frequencies_r - frequencies_h
# Stores DOS data in DB as a workflow result
commensurate = ArrayData()
commensurate.set_array('qpoints', com_points)
commensurate.set_array('shifts', shifts)
return {'commensurate': commensurate}
def phonopy_merge(**kwargs):
from phonopy.structure.atoms import Atoms as PhonopyAtoms
from phonopy import Phonopy
from phonopy.units import VaspToTHz
from phonopy.harmonic.dynmat_to_fc import get_commensurate_points, DynmatToForceConstants
structure = kwargs.pop('structure')
phonopy_input = kwargs.pop('phonopy_input').get_dict()
harmonic = kwargs.pop('harmonic')
renormalized = kwargs.pop('renormalized')
eigenvectors = harmonic.get_array('eigenvectors')
frequencies = harmonic.get_array('frequencies')
shifts = renormalized.get_array('shifts')
# Generate phonopy phonon object
bulk = PhonopyAtoms(symbols=[site.kind_name for site in structure.sites],
positions=[site.position for site in structure.sites],
cell=structure.cell)
phonon = Phonopy(bulk,
phonopy_input['supercell'],
primitive_matrix=phonopy_input['primitive'],
distance=phonopy_input['distance'])
primitive = phonon.get_primitive()
supercell = phonon.get_supercell()
total_frequencies = frequencies + shifts
dynmat2fc = DynmatToForceConstants(primitive, supercell)
dynmat2fc.set_dynamical_matrices(total_frequencies / VaspToTHz, eigenvectors)
dynmat2fc.run()
total_force_constants = dynmat2fc.get_force_constants()
# Stores DOS data in DB as a workflow result
total_data = ArrayData()
total_data.set_array('force_constants', total_force_constants)
return {'final_results': total_data}
# Start script here
# Workflow phonon (at given volume)
wf = load_workflow(431)
parameters = wf.get_parameters()
results = wf.get_results()
inline_params = {'structure': results['final_structure'],
'phonopy_input': parameters['phonopy_input'],
'force_constants': results['force_constants']}
harmonic = phonopy_commensurate_inline(**inline_params)
# At reference volume (at T = 0)
wf = load_workflow(432)
parameters = wf.get_parameters()
results_r = wf.get_results()
results_h = wf.get_results()
inline_params = {'structure': results_h['final_structure'],
'phonopy_input': parameters['phonopy_input'],
'force_constants': results_h['force_constants'],
'r_force_constants': results_r['r_force_constants']}
renormalized = phonopy_commensurate_shifts_inline(**inline_params)
inline_params = {'structure': results_h['final_structure'],
'phonopy_input': parameters['phonopy_input'],
'harmonic': harmonic,
'renormalized': renormalized}
total = phonopy_merge(**inline_params)
print total
inline_params = {'structure': results_h['final_structure'],
'phonopy_input': parameters['phonopy_input'],
'force_constants': total['force_constants']}
results = phonopy_calculation_inline(**inline_params)[1]
band = results['band_structure']
# Phonon Band structure plot
plot_data(results['band_structure'])
| abelcarreras/aiida_extensions | workflows/tools/commensurate_phonon.py | Python | mit | 13,854 | [
"phonopy"
] | 8bcc973d41f0036587192e3c1dff930369200848b01b7a56a663443056bb3347 |
import numpy as np
import time
def array_basic():
# create 2x3 matrix
print np.array([(1,2,3), (4,5,6)])
# empty array
print np.empty(5) #1x5
print np.empty((5, 4)) # 5x4
# array of 1s
print np.ones((3,3)) # by default is float
print np.ones((3,3), dtype=np.int) # int
# array of 0s
print np.zeros((1,2))
# random 3x4
print np.random.rand(3, 4)
# random 2x3 from gaussian (normal) distribution (mean = 0, standard deviation = 1)
print np.random.normal(size=(2,3))
# change mean and standard deviation
print np.random.normal(loc=50, scale=10, size=(3,3))
# random int range
print np.random.randint(0, 10, size=(2,2))
# matrix attributes
a = np.random.random((3, 5))
print a
print len(a.shape) # dimension of array
print a.size # total number of items in the matrix (row * col for 2d array)
print a.dtype # item type
print a.shape[0] # number of rows
print a.shape[1] # number of columns
def array_operation():
np.random.seed(693)
a = np.random.randint(0, 10, size=(5, 4))
print "array:\n", a
# sum of all items
print "sum: ", a.sum()
# sum along axis
print "sum of each column:\n", a.sum(axis=0) # axis = 0 because we iterate over the row to compute the sum
print "sum of each row:\n", a.sum(axis=1) # axis = 1 because we iterate over the column to compute the sum
# min, max, mean
print "min of each column:\n", a.min(axis=0)
print "max of each column:\n", a.max(axis=0)
print "min of each row:\n", a.min(axis=1)
print "max of each row:\n", a.max(axis=1)
print "mean of everything: ", a.mean()
# index of min/max value of 1d array
a1 = np.random.randint(0, 10, size=(1,10))
print a1
print "index of min value: ", a1.argmin()
print "index of max value: ", a1.argmax()
# print a.argmax()
def array_elements():
a = np.random.randint(0, 10, size=(5,4))
print "array:\n", a
# access single element
print "element[3,2] = ", a[3,2]
# access range
print "element[0:2, 1:3] =\n", a[0:2, 1:3]
# advance slicing:
# n:m:t
# get element from n <= m, with steps t
# if u dont provide n, 0 will be used
# if u dont provide m, end_index will be used
# if u dont provide t, 1 will be used
print a[0::2, :]
# assigment
a[0,0] = 1234
print a
a[0,:] = 999
print a
# assign matrix, as long as dimension is equal, it will work
a[2:4, 1:3] = [(-123, -123), (-123, -123)]
print a
# index with another array of indeces
a = np.random.randint(0, 10, size=(5))
indices = np.array([1, 1, 2, 3])
print a
print a[indices]
# boolean index (masking)
a = np.random.randint(0, 20, size=(2, 10))
print a
# print all elements lower than the mean
mean = a.mean()
index = a < mean
print mean
print index
print a[a < mean]
# we can also use the index to modify the array
a[index] = -1234
print a
def array_arithmetic():
a = np.array([(1,2,3,4,5), (10,20,30,40,50)])
print "array A:\n", a
b = np.array([(100,200,300,400,500), (1,2,3,4,5)])
print "array B:\n", b
# everything is element wise
print a * 2
print a / 2.0
print a + 2
print a - 1
# including matrix x matrix
print a + b
print a - b
print a * b
print a / b
# this is the actual matrix multiplication (dot product)
# a.dot()
if __name__ == "__main__":
#array_basic()
# array_operation()
# array_elements()
array_arithmetic() | gietal/Stocker | sandbox/udacity/1-3.py | Python | mit | 3,724 | [
"Gaussian"
] | 68e739f43abcba6e3c5749f149202e52baa4f4922590ab33ef6f39e118581900 |
# Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
class ITKtoVTK:
kits = ['itk_kit']
cats = ['Insight']
help = """Use this module to convert from any ITK image type to
the corresponding VTK type.
"""
class VTKtoITK:
kits = ['itk_kit']
cats = ['Insight']
help = """Convert from a VTK image to an ITK image.
By default (AutoType active), the output ITK image has the same pixel
type as the input VTK image. However, if AutoType has been unchecked
in the configuration, the output ITK image has 'Data type' as its type.
"""
class cannyEdgeDetection:
kits = ['itk_kit']
cats = ['Insight']
help = """Performs 3D Canny edge detection on input image.
A rule of thumb for the thersholds: lower threshold == 0.5 * upper
threshold.
NOTE: Due to a bug in ITK [1], the Canny filter gives invalid
results when run more than once with different sets of parameters.
To work around this, DeVIDE re-instantiates the canny filter at
every execution. This means that only parameters that you see in
the GUI are transferred to the new instance.
[1] http://www.itk.org/pipermail/insight-users/2009-August/032018.html
"""
class confidenceSeedConnect:
kits = ['itk_kit']
cats = ['Insight']
keywords = ['region growing', 'confidence', 'seed']
help = """Confidence-based 3D region growing.
This module will perform a 3D region growing starting from the
user-supplied points. The mean and standard deviation are calculated in a
small initial region around the seed points. New contiguous points have
to have intensities on the range [mean - f*stdDev, mean + f*stdDev] to be
included. f is user-definable.
After this initial growing iteration, if the user has specified a larger
than 0 number of iterations, the mean and standard deviation are
recalculated over all the currently selected points and the process is
restarted. This process is repeated for the user-defined number of
iterations, or until now new pixels are added.
Due to weirdness in the underlying ITK filter, deleting all points
won't quite work. In other words, the output of this module can
only be trusted if there's at least a single seed point.
"""
class curvatureAnisotropicDiffusion:
kits = ['itk_kit']
cats = ['Insight']
class curvatureFlowDenoising:
kits = ['itk_kit']
cats = ['Insight', 'Level Sets']
help = """Curvature-driven image denoising.
This uses curvature-based level set techniques to smooth
homogeneous regions whilst retaining boundary information.
"""
class DanielssonDistance:
kits = ['itk_kit']
cats = ['Insight']
help = """Calculates distance image of input image.
The input image can either contain marked objects or binary objects.
"""
class demonsRegistration:
kits = ['itk_kit']
cats = ['Insight', 'Registration', 'Optic Flow']
help = """Performs demons registration on fixed and moving input images,
returns deformation field.
The intensity difference threshold is absolute, so check the values in
your datasets and adjust it accordingly. For example, if you find that
two regions should match but you see intensity differences of 50 (e.g.
in a CT dataset), the threshold should be approximately 60.
NOTE: remember to update help w.r.t. inverse direction of vectors in
deformation field.
Also read this thread:
http://public.kitware.com/pipermail/insight-users/2004-November/011002.html
"""
class discreteLaplacian:
kits = ['itk_kit']
cats = ['Insight']
help = """Calculates Laplacian of input image.
This makes use of a discrete implementation. Due to this, the input
image should probably be pre-smoothed with e.g. a Gaussian as the
Laplacian is very sensitive to noise.
Note: One could also calculate the Laplacian by convolving with the
second derivative of a Gaussian.
Laplacian == secondPartialDerivative(f,x0) + ... +
secondPartialDerivative(f,xn)
"""
# had to disable this one due to stupid itkLevelSetNode non-wrapping
# in ITK-2-4-1
class fastMarching:
kits = ['itk_kit']
cats = ['Insight', 'Level Sets']
help = """Given a set of seed points and a speed image, this module will
propagate a moving front out from those points using the fast marching
level set formulation.
"""
class gaussianConvolve:
kits = ['itk_kit']
cats = ['Insight']
help = """Convolves input with Gaussian, or its first or second
derivative.
Only a single dimension is convolved (i.e. the filter is separated).
Select which dimension in the View/Config window.
The convolution is implemented as an IIR filter.
$Revision: 1.4 $
"""
class geodesicActiveContour:
kits = ['itk_kit']
cats = ['Insight', 'Level Sets']
keywords = ['level set']
help = """Module for performing Geodesic Active Contour-based segmentation
on 3D data.
The input feature image is an edge potential map with values close to 0 in
regions close to the edges and values close to 1 otherwise. The level set
speed function is based on this. For example: smooth an input image,
determine the gradient magnitude and then pass it through a sigmoid
transformation to create an edge potential map.
The initial level set is a volume with the initial surface embedded as the
0 level set, i.e. the 0-value iso-contour (more or less).
Also see figure 9.18 in the ITK Software Guide.
"""
class gradientAnisotropicDiffusion:
kits = ['itk_kit']
cats = ['Insight']
help = """Performs a gradient-based anisotropic diffusion.
This will smooth homogeneous areas whilst preserving features
(e.g. edges).
"""
class gradientMagnitudeGaussian:
kits = ['itk_kit']
cats = ['Insight']
help = """Calculates gradient magnitude of an image by convolving with the
derivative of a Gaussian.
The ITK class that this is based on uses a recursive gaussian filter
implementation.
"""
# isn't wrapped anymore, no idea why.
#class gvfgac:
# kits = ['itk_kit']
# cats = ['Insight']
# will fix when I rework the registration modules
#class imageStackRDR:
# kits = ['itk_kit']
# cats = ['Insight']
class isolatedConnect:
kits = ['itk_kit']
cats = ['Insight']
keywords = ['segment']
help = """Voxels connected to the first group of seeds and NOT connected
to the second group of seeds are segmented by optimising an upper or
lower threshold.
For example, to separate two non-touching light objects, you would do the
following:
<ul>
<li>Select point(s) in the first object with slice3dVWR 1</li>
<li>Select point(s) in the second object with slice3dVWR 2</li>
<li>Connect up the three inputs of isolatedConnect as follows: input
image, point(s) of object 1, point(s) of object 2</li>
<li>isolatedConnect will now calculate a threshold so that when this
threshold is applied to the image and a region growing is performed using
the first set of points, only object 1 will be separated.</li>
</il>
</ul>
"""
class ITKReader:
kits = ['itk_kit']
cats = ['Insight', 'Readers']
help = """Reads all the 3D formats supported by ITK. In its default
configuration, this module will derive file type, data type and
dimensionality from the file itself. You can manually set the data type
and dimensionality, in which case ITK will attempt to cast the data.
Keep in mind that DeVIDE mostly uses the float versions of ITK components.
At least the following file formats are available (a choice is made based
on the filename extension that you choose):<br>
<ul>
<li>.mha: MetaImage all-in-one file</li>
<li>.mhd: MetaImage .mhd header file and .raw data file</li>
<li>.hdr or .img: Analyze .hdr header and .img data</li>
</ul>
"""
class ITKWriter:
kits = ['itk_kit']
cats = ['Insight', 'Writers']
help = """Writes any of the image formats supported by ITK.
At least the following file formats are available (a choice is made based
on the filename extension that you choose):<br>
<ul>
<li>.mha: MetaImage all-in-one file</li>
<li>.mhd: MetaImage .mhd header file and .raw data file</li>
<li>.hdr or .img: Analyze .hdr header and .img data</li>
</ul>
"""
# not wrapped by ITK-2-4-1 default wrappings
class levelSetMotionRegistration:
kits = ['itk_kit']
cats = ['Insight', 'Registration', 'Level Sets']
keywords = ['level set', 'registration', 'deformable', 'non-rigid']
help = """Performs deformable registration between two input volumes using
level set motion.
"""
# not wrapped by WrapITK 20060710
# class nbCurvesLevelSet:
# kits = ['itk_kit']
# cats = ['Insight', 'Level Set']
# keywords = ['level set']
# help = """Narrow band level set implementation.
# The input feature image is an edge potential map with values close to 0 in
# regions close to the edges and values close to 1 otherwise. The level set
# speed function is based on this. For example: smooth an input image,
# determine the gradient magnitude and then pass it through a sigmoid
# transformation to create an edge potential map.
# The initial level set is a volume with the initial surface embedded as the
# 0 level set, i.e. the 0-value iso-contour (more or less).
# """
class nbhSeedConnect:
kits = ['itk_kit']
cats = ['Insight']
help = """Neighbourhood-based 3D region growing.
This module will perform a 3D region growing starting from the
user-supplied points. Only pixels with intensities between the
user-configurable thresholds and with complete neighbourhoods where all
pixels have intensities between the thresholds are considered valid
candidates. The size of the neighbourhood can be set as well.
"""
# reactivate when I rework the registration modules
#class register2D:
# kits = ['itk_kit']
# cats = ['Insight']
class sigmoid:
kits = ['itk_kit']
cats = ['Insight']
help = """Perform sigmoid transformation on all input voxels.
f(x) = (max - min) frac{1}{1 + exp(- frac{x - beta}{alpha})} + min
"""
class symmetricDemonsRegistration:
kits = ['itk_kit']
cats = ['Insight', 'Registration', 'Optic Flow']
help = """Performs symmetric forces demons registration on fixed and
moving input images, returns deformation field.
"""
class tpgac:
kits = ['itk_kit']
cats = ['Insight', 'Level Sets']
keywords = ['segment', 'level set']
help = """Module for performing topology-preserving Geodesic Active
Contour-based segmentation on 3D data.
This module requires a DeVIDE-specific ITK class.
The input feature image is an edge potential map with values close to 0 in
regions close to the edges and values close to 1 otherwise. The level set
speed function is based on this. For example: smooth an input image,
determine the gradient magnitude and then pass it through a sigmoid
transformation to create an edge potential map.
The initial level set is a volume with the initial surface embedded as the
0 level set, i.e. the 0-value iso-contour (more or less).
Also see figure 9.18 in the ITK Software Guide.
"""
# will work on this when I rework the 2D registration
#class transform2D:
# kits = ['itk_kit']
# cats = ['Insight']
#class transformStackRDR:
# kits = ['itk_kit']
# cats = ['Insight']
#class transformStackWRT:
# kits = ['itk_kit']
# cats = ['Insight']
class watershed:
kits = ['itk_kit']
cats = ['Insight']
help = """Perform watershed segmentation on input.
Typically, the input will be the gradient magnitude image. Often, data
is smoothed with one of the anisotropic diffusion filters and then the
gradient magnitude image is calculated. This serves as input to the
watershed module.
"""
| nagyistoce/devide | modules/insight/module_index.py | Python | bsd-3-clause | 12,137 | [
"Gaussian",
"VTK"
] | a14e7b947f136bf66b7fd7ebf86beeecbd98bfb5aee2f1c20fe01da082bf6f42 |
""" This tests only need the TaskQueueDB, and connects directly to it
"""
import unittest
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC import gLogger
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB
class TQDBTestCase( unittest.TestCase ):
""" Base class for the JobDB test cases
"""
def setUp( self ):
gLogger.setLevel( 'DEBUG' )
self.tqDB = TaskQueueDB()
def tearDown( self ):
pass
class TQChain( TQDBTestCase ):
"""
"""
def test_basicChain( self ):
""" a basic put - remove
"""
tqDefDict = {'OwnerDN': '/my/DN', 'OwnerGroup':'myGroup', 'Setup':'aSetup', 'CPUTime':50000}
result = self.tqDB.insertJob( 123, tqDefDict, 10 )
self.assertTrue(result['OK'])
result = self.tqDB.getTaskQueueForJobs( [123] )
self.assertTrue(result['OK'])
self.assertTrue( 123 in result['Value'].keys() )
tq = result['Value'][123]
result = self.tqDB.deleteJob( 123 )
self.assertTrue(result['OK'])
result = self.tqDB.deleteTaskQueue( tq )
self.assertTrue(result['OK'])
class TQTests( TQDBTestCase ):
"""
"""
def test_TQ( self ):
""" test of various functions
"""
tqDefDict = {'OwnerDN': '/my/DN', 'OwnerGroup':'myGroup', 'Setup':'aSetup', 'CPUTime':50000}
self.tqDB.insertJob( 123, tqDefDict, 10 )
result = self.tqDB.getNumTaskQueues()
self.assertTrue(result['OK'])
self.assertEqual( result['Value'], 1 )
result = self.tqDB.retrieveTaskQueues()
self.assertTrue(result['OK'])
self.assertEqual( result['Value'].values()[0],
{'OwnerDN': '/my/DN', 'Jobs': 1L, 'OwnerGroup': 'myGroup',
'Setup': 'aSetup', 'CPUTime': 86400L, 'Priority': 1.0} )
result = self.tqDB.findOrphanJobs()
self.assertTrue(result['OK'])
result = self.tqDB.recalculateTQSharesForAll()
self.assertTrue(result['OK'])
# this will also remove the job
result = self.tqDB.matchAndGetJob( {'Setup': 'aSetup', 'CPUTime': 300000} )
self.assertTrue(result['OK'])
self.assertTrue( result['Value']['matchFound'] )
self.assertEqual( result['Value']['jobId'], 123L )
tq = result['Value']['taskQueueId']
result = self.tqDB.deleteTaskQueue( tq )
self.assertTrue(result['OK'])
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TQDBTestCase)
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TQChain ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TQTests ) )
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
| Andrew-McNab-UK/DIRAC | tests/Integration/WorkloadManagementSystem/Test_TaskQueueDB.py | Python | gpl-3.0 | 2,614 | [
"DIRAC"
] | 13ebf741985b3891f6824d92043d2f63dc3f9943682e228890b4f12c0110064c |
# Izhikevich.py ---
#
# Filename: Izhikevich.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Fri May 28 14:42:33 2010 (+0530)
# Version:
# Last-Updated: Tue Sep 11 14:27:18 2012 (+0530)
# By: subha
# Update #: 1212
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# threhold variablity to be checked.
# Bistability not working.
# DAP working with increased parameter value 'a'
# inhibition induced spiking kind of working but not matching with the paper figure
# inhibition induced bursting kind of working but not matching with the paper figure
# Accommodation cannot work with the current implementation: because the equation for u is not what is mentioned in the paper
# it is: u = u + tau*a*(b*(V+65)); [It is nowhere in the paper and you face it only if you look at the matlab code for figure 1].
# It is not possible to tune a, b, c, d in any way to produce this from: u = u + tau*a*(b*V - u)
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import time
from numpy import *
import os
import sys
import moose
class IzhikevichDemo:
"""Class to setup and simulate the various kind of neuronal behaviour using Izhikevich model.
Fields:
"""
# Paramteres for different kinds of behaviour described by Izhikevich
# (1. IEEE TRANSACTIONS ON NEURAL NETWORKS, VOL. 14, NO. 6, NOVEMBER 2003
# and 2. IEEE TRANSACTIONS ON NEURAL NETWORKS, VOL. 15, NO. 5, SEPTEMBER
# 2004)
# Modified and enhanced using: http://www.izhikevich.org/publications/figure1.m
# The entries in the tuple are as follows:
# fig. no. in paper (2), parameter a, parameter b, parameter c (reset value of v in mV), parameter d (after-spike reset value of u), injection current I (uA), initial value of Vm, duration of simulation (ms)
#
# They are all in whatever unit they were in the paper. Just before use we convert them to SI.
parameters = {
"tonic_spiking": ['A', 0.02 , 0.2 , -65.0, 6.0 , 14.0, -70.0, 100.0], # Fig. 1.A
"phasic_spiking": ['B', 0.02 , 0.25 , -65.0, 6.0 , 0.5, -64.0, 200.0], # Fig. 1.B
"tonic_bursting": ['C', 0.02 , 0.2 , -50.0, 2.0 , 15.0, -70.0, 220.0], # Fig. 1.C
"phasic_bursting": ['D', 0.02 , 0.25 , -55.0, 0.05 , 0.6, -64.0, 200.0], # Fig. 1.D
"mixed_mode": ['E', 0.02 , 0.2 , -55.0, 4.0 , 10.0, -70.0, 160.0], # Fig. 1.E
"spike_freq_adapt": ['F', 0.01 , 0.2 , -65.0, 8.0 , 30.0, -70.0, 85.0 ], # Fig. 1.F # spike frequency adaptation
"Class_1": ['G', 0.02 , -0.1 , -55.0, 6.0 , 0, -60.0, 300.0], # Fig. 1.G # Spikining Frequency increases with input strength
"Class_2": ['H', 0.2 , 0.26 , -65.0, 0.0 , 0, -64.0, 300.0], # Fig. 1.H # Produces high frequency spikes
"spike_latency": ['I', 0.02 , 0.2 , -65.0, 6.0 , 7.0, -70.0, 100.0], # Fig. 1.I
"subthresh_osc": ['J', 0.05 , 0.26 , -60.0, 0.0 , 0, -62.0, 200.0], # Fig. 1.J # subthreshold oscillations
"resonator": ['K', 0.1 , 0.26 , -60.0, -1.0 , 0, -62.0, 400.0], # Fig. 1.K
"integrator": ['L', 0.02 , -0.1 , -55.0, 6.0 , 0, -60.0, 100.0], # Fig. 1.L
"rebound_spike": ['M', 0.03 , 0.25 , -60.0, 4.0 , -15, -64.0, 200.0], # Fig. 1.M
"rebound_burst": ['N', 0.03 , 0.25 , -52.0, 0.0 , -15, -64.0, 200.0], # Fig. 1.N
"thresh_var": ['O', 0.03 , 0.25 , -60.0, 4.0 , 0, -64.0, 100.0], # Fig. 1.O # threshold variability
"bistable": ['P', 0.1 , 0.26 , -60.0, 0.0 , 1.24, -61.0, 300.0], # Fig. 1.P
"DAP": ['Q', 1.15 , 0.2 , -60.0, -21.0 , 20, -70.0, 50.0], # Fig. 1.Q # Depolarizing after-potential - a had to be increased in order to reproduce the figure
"accommodation": ['R', 0.02 , 1.0 , -55.0, 4.0 , 0, -65.0, 400.0], # Fig. 1.R
"iispike": ['S', -0.02 , -1.0 , -60.0, 8.0 , 75.0, -63.8, 350.0], # Fig. 1.S # inhibition-induced spiking
"iiburst": ['T', -0.026, -1.0 , -45.0, 0.0 , 75.0, -63.8, 350.0] # Fig. 1.T # inhibition-induced bursting
}
documentation = {
"tonic_spiking": """
Neuron is normally silent but spikes when stimulated with a current injection.""",
"phasic_spiking": """
Neuron fires a single spike only at the start of a current pulse.""",
"tonic_bursting": """
Neuron is normally silent but produces bursts of spikes when
stimulated with current injection.""",
"phasic_bursting": """
Neuron is normally silent but produces a burst of spikes at the
beginning of an input current pulse.""",
"mixed_mode": """
Neuron fires a burst at the beginning of input current pulse, but then
switches to tonic spiking.""",
"spike_freq_adapt": """
Neuron fires spikes when a current injection is applied, but at a
gradually reducing rate.""",
"Class_1": """
Neuron fires low frequency spikes with weak input current injection.""",
"Class_2": """
Neuron fires high frequency (40-200 Hz) spikes when stimulated with
current injection.""",
"spike_latency": """
The spike starts after a delay from the onset of current
injection. The delay is dependent on strength of input.""",
"subthresh_osc": """
Even at subthreshold inputs a neuron exhibits oscillatory membrane potential.""",
"resonator": """
Neuron fires spike only when an input pulsetrain of a frequency
similar to that of the neuron's subthreshold oscillatory frequency is
applied.""",
"integrator": """
The chances of the neuron firing increases with increase in the frequency
of input pulse train.""",
"rebound_spike": """
When the neuron is released from an inhibitory input, it fires a spike.""",
"rebound_burst": """
When the neuron is released from an inhibitory input, it fires a burst
of action potentials.""",
"thresh_var": """
Depending on the previous input, the firing threshold of a neuron may
change. In this example, the first input pulse does not produce
spike, but when the same input is applied after an inhibitory input,
it fires.""",
"bistable": """
These neurons switch between two stable modes (resting and tonic spiking).
The switch happens via an excitatory or inhibitory input.""",
"DAP": """
After firing a spike, the membrane potential shows a prolonged depolarized
after-potential.""",
"accommodation": """
These neurons do not respond to slowly rising input, but a sharp increase
in input may cause firing.""",
"iispike": """
These neurons fire in response to inhibitory input.""",
"iiburst": """
These neurons show bursting in response to inhibitory input."""
}
def __init__(self):
"""Initialize the object."""
self.model_container = moose.Neutral('/model')
self.data_container = moose.Neutral('/data')
self.neurons = {}
self.Vm_tables = {}
self.u_tables = {}
self.inject_tables = {}
self.inputs = {}
self.simtime = 100e-3
self.dt = 0.25e-3
self.steps = int(self.simtime/self.dt)
moose.setClock(0, self.dt)
moose.setClock(1, self.dt)
moose.setClock(2, self.dt)
self.scheduled = {} # this is to bypass multiple clock issue
self.neuron = None
def setup(self, key):
neuron = self._get_neuron(key)
pulsegen = self._make_pulse_input(key)
if pulsegen is None:
print((key, 'Not implemented.'))
def simulate(self, key):
self.setup(key)
return self.run(key)
def run(self, key):
try:
Vm = self.Vm_tables[key]
u = self.u_tables[key]
except KeyError as e:
Vm = moose.Table(self.data_container.path + '/' + key + '_Vm')
nrn = self.neurons[key]
moose.connect(Vm, 'requestOut', nrn, 'getVm')
utable = moose.Table(self.data_container.path + '/' + key + '_u')
utable.connect('requestOut', self.neurons[key], 'getU')
self.Vm_tables[key] = Vm
self.u_tables[key] = utable
try:
Im = self.inject_tables[key]
except KeyError as e:
Im = moose.Table(self.data_container.path + '/' + key + '_inject') # May be different for non-pulsegen sources.
Im.connect('requestOut', self._get_neuron(key), 'getIm')
self.inject_tables[key] = Im
self.simtime = IzhikevichDemo.parameters[key][7] * 1e-3
for obj in moose.wildcardFind('%s/##' % (self.model_container.path)):
if obj not in self.scheduled:
moose.useClock(0, obj.path, 'process')
self.scheduled[obj] = True
for obj in moose.wildcardFind('%s/##' % (self.data_container.path)):
if obj not in self.scheduled:
moose.useClock(2, obj.path, 'process')
self.scheduled[obj] = True
moose.reinit()
moose.start(self.simtime)
while moose.isRunning():
time.sleep(100)
t = linspace(0, IzhikevichDemo.parameters[key][7], len(Vm.vector))
# DEBUG
nrn = self._get_neuron(key)
print(('a = %g, b = %g, c = %g, d = %g, initVm = %g, initU = %g' % (nrn.a,nrn.b, nrn.c, nrn.d, nrn.initVm, nrn.initU)))
#! DEBUG
return (t, Vm, Im)
def _get_neuron(self, key):
try:
params = IzhikevichDemo.parameters[key]
except KeyError as e:
print((' %s : Invalid neuron type. The valid types are:' % (key)))
for key in IzhikevichDemo.parameters:
print(key)
raise e
try:
neuron = self.neurons[key]
return neuron
except KeyError as e:
neuron = moose.IzhikevichNrn(self.model_container.path + '/' + key)
if key == 'integrator' or key == 'Class_1': # Integrator has different constants
neuron.beta = 4.1e3
neuron.gamma = 108.0
if key == 'accommodation':
neuron.accommodating = True
neuron.u0 = -0.065
self.neuron = neuron
neuron.a = params[1] * 1e3 # ms^-1 -> s^-1
neuron.b = params[2] * 1e3 # ms^-1 -> s^-1
neuron.c = params[3] * 1e-3 # mV -> V
neuron.d = params[4] # d is in mV/ms = V/s
neuron.initVm = params[6] * 1e-3 # mV -> V
neuron.Vmax = 0.03 # mV -> V
if key != 'accommodation':
neuron.initU = neuron.initVm * neuron.b
else:
neuron.initU = -16.0 # u is in mV/ms = V/s
moose.showfield(neuron)
self.neurons[key] = neuron
return neuron
def _make_pulse_input(self, key):
"""This is for creating a pulse generator for use as a current
source for all cases except Class_1, Class_2, resonator,
integrator, thresh_var and accommodation."""
try:
return self.inputs[key]
except KeyError:
pass # continue to the reset of the function
baseLevel = 0.0
firstWidth = 1e6
firstDelay = 0.0
firstLevel = IzhikevichDemo.parameters[key][5] * 1e-6
secondDelay = 1e6
secondWidth = 0.0
secondLevel = 0.0
if key == 'tonic_spiking':
firstDelay = 10e-3
elif key == 'phasic_spiking':
firstDelay = 20e-3
elif key == 'tonic_bursting':
firstDelay = 22e-3
elif key == 'phasic_bursting':
firstDelay = 20e-3
elif key == 'mixed_mode':
firstDelay = 16e-3
elif key == 'spike_freq_adapt':
firstDelay = 8.5e-3
elif key == 'spike_latency':
firstDelay = 10e-3
firstWidth = 3e-3
elif key == 'subthresh_osc':
firstDelay = 20e-3
firstWidth = 5e-3
firstLevel = 2e-9
elif key == 'rebound_spike':
firstDelay = 20e-3
firstWidth = 5e-3
elif key == 'rebound_burst':
firstDelay = 20e-3
firstWidth = 5e-3
elif key == 'bistable':
input_table = self._make_bistable_input()
self.inputs[key] = input_table
return input_table
elif key == 'DAP':
firstDelay = 9e-3
firstWidth = 2e-3
elif (key == 'iispike') or (key == 'iiburst'):
baseLevel = 80e-9
firstDelay = 50e-3
firstWidth = 200e-3
fisrtLevel = 75e-9
elif key == 'Class_1':
input_table = self._make_Class_1_input()
self.inputs[key] = input_table
return input_table
elif key == 'Class_2':
input_table = self._make_Class_2_input()
self.inputs[key] = input_table
return input_table
elif key == 'resonator':
input_table = self._make_resonator_input()
self.inputs[key] = input_table
return input_table
elif key == 'integrator':
input_table = self._make_integrator_input()
self.inputs[key] = input_table
return input_table
elif key == 'accommodation':
input_table = self._make_accommodation_input()
self.inputs[key] = input_table
return input_table
elif key == 'thresh_var':
input_table = self._make_thresh_var_input()
self.inputs[key] = input_table
return input_table
else:
raise RuntimeError( key + ': Stimulus is not based on pulse generator.')
pulsegen = self._make_pulsegen(key,
firstLevel,
firstDelay,
firstWidth,
secondLevel,
secondDelay,
secondWidth, baseLevel)
self.inputs[key] = pulsegen
return pulsegen
def _make_pulsegen(self, key, firstLevel, firstDelay, firstWidth=1e6, secondLevel=0, secondDelay=1e6, secondWidth=0, baseLevel=0):
pulsegen = moose.PulseGen(self.model_container.path + '/' + key + '_input')
pulsegen.firstLevel = firstLevel
pulsegen.firstDelay = firstDelay
pulsegen.firstWidth = firstWidth
pulsegen.secondLevel = secondLevel
pulsegen.secondDelay = secondDelay
pulsegen.secondWidth = secondWidth
pulsegen.baseLevel = baseLevel
nrn = self._get_neuron(key)
moose.connect(pulsegen, 'output', nrn, 'injectMsg')
# self.stimulus_table = moose.Table(self.data_container.path + '/stimulus')
# self.stimulus_table.connect('requestOut', pulsegen, 'getOutputValue')
return pulsegen
def _make_Class_1_input(self):
input_table = moose.StimulusTable(self.model_container.path + '/' + 'Class_1_input')
input_table.stepSize = self.dt
input_table.startTime = 30e-3 # The ramp starts at 30 ms
input_table.stopTime = IzhikevichDemo.parameters['Class_1'][7] * 1e-3
# matlab code: if (t>T1) I=(0.075*(t-T1)); else I=0;
input_vec = np.arange(0, int(ceil((input_table.stopTime - input_table.startTime) / input_table.stepSize)), 1.0) * 0.075 * self.dt * 1e3 * 1e-9
input_table.vector = input_vec
input_table.connect('output', self._get_neuron('Class_1'), 'injectMsg')
self.stimulus_table = moose.Table(self.data_container.path + '/stimulus')
moose.connect(input_table, 'output', self.stimulus_table, 'input')
return input_table
def _make_Class_2_input(self):
key = 'Class_2'
input_table = moose.StimulusTable(self.model_container.path + '/' + key + '_input')
input_table.stepSize = self.dt
input_table.startTime = 30e-3 # The ramp starts at 30 ms
input_table.stopTime = IzhikevichDemo.parameters[key][7] * 1e-3
# The matlab code is: if (t>T1) I=-0.5+(0.015*(t-T1)); else I=-0.5
# convert dt from s to ms, and convert total current from nA to A.
input_vec = np.arange(0, int(ceil((input_table.stopTime - input_table.startTime) / input_table.stepSize)), 1.0) * 0.015 * self.dt * 1e3 * 1e-9 - 0.05*1e-9
input_table.vector = input_vec
input_table.connect('output', self._get_neuron(key), 'injectMsg')
return input_table
def _make_bistable_input(self):
key = 'bistable'
input_table = moose.StimulusTable(self.model_container.path + '/' + key + '_input')
input_table.stepSize = self.dt
input_table.startTime = 0
input_table.stopTime = IzhikevichDemo.parameters[key][7] * 1e-3
t1 = IzhikevichDemo.parameters[key][7] * 1e-3/8
t2 = 216e-3
t = np.arange(0,
int(ceil((input_table.stopTime - input_table.startTime) / input_table.stepSize))) * self.dt
input_vec = np.where(np.logical_or(np.logical_and(t > t1, t < t1+5e-3),
np.logical_and(t > t2, t < t2+5e-3)),
1.24e-9,
0.24e-9)
input_table.vector = input_vec
input_table.connect('output', self._get_neuron(key), 'injectMsg')
return input_table
def _make_resonator_input(self):
key = 'resonator'
input_table = moose.StimulusTable(self.model_container.path + '/' + key + '_input')
input_table.stepSize = self.dt
input_table.startTime = 0
input_table.stopTime = IzhikevichDemo.parameters[key][7] * 1e-3
t1 = IzhikevichDemo.parameters[key][7] * 1e-3/10
t2 = t1 + 20e-3
t3 = 0.7 * IzhikevichDemo.parameters[key][7] * 1e-3
t4 = t3 + 40e-3
t = np.arange(0, int(ceil((input_table.stopTime - input_table.startTime) / input_table.stepSize)), 1) * self.dt
input_vec = np.zeros(t.shape)
idx = np.nonzero(((t > t1) & (t < t1 + 4e-3)) |
((t > t2) & (t < t2 + 4e-3)) |
((t > t3) & (t < t3 + 4e-3)) |
((t > t4) & (t < t4 + 4e-3)))[0]
input_vec[idx] = 0.65e-9
input_table.vector = input_vec
input_table.connect('output', self._get_neuron(key), 'injectMsg')
return input_table
def _make_integrator_input(self):
key = 'integrator'
input_table = moose.StimulusTable(self.model_container.path + '/' + key + '_input')
input_table.stepSize = self.dt
input_table.startTime = 0
input_table.stopTime = IzhikevichDemo.parameters[key][7] * 1e-3
t1 = IzhikevichDemo.parameters[key][7] * 1e-3/11
t2 = t1 + 5e-3
t3 = 0.7 * IzhikevichDemo.parameters[key][7] * 1e-3
t4 = t3 + 10e-3
t = np.arange(0, int(ceil((input_table.stopTime - input_table.startTime) / input_table.stepSize))) * self.dt
input_vec = np.where(((t > t1) & (t < t1 + 2e-3)) |
((t > t2) & (t < t2 + 2e-3)) |
((t > t3) & (t < t3 + 2e-3)) |
((t > t4) & (t < t4 + 2e-3)),
9e-9,
0.0)
input_table.vector = input_vec
input_table.connect('output', self._get_neuron(key), 'injectMsg')
return input_table
def _make_accommodation_input(self):
key = 'accommodation'
input_table = moose.StimulusTable(self.model_container.path + '/' + key + '_input')
input_table.stepSize = self.dt
input_table.startTime = 0
input_table.stopTime = IzhikevichDemo.parameters[key][7] * 1e-3
input_vec = np.zeros(int(ceil((input_table.stopTime - input_table.startTime) / input_table.stepSize)))
t = 0.0
for ii in range(len(input_vec)):
if t < 200e-3:
input_vec[ii] = t * 1e-6/25
elif t < 300e-3:
input_vec[ii] = 0.0
elif t < 312.5e-3:
input_vec[ii] = 4e-6 * (t-300e-3)/12.5
else:
input_vec[ii] = 0.0
t = t + self.dt
input_table.vector = input_vec
input_table.connect('output', self._get_neuron(key), 'injectMsg')
return input_table
def _make_thresh_var_input(self):
key = 'thresh_var'
input_table = moose.StimulusTable(self.model_container.path + '/' + key + '_input')
input_table.stepSize = self.dt
input_table.startTime = 0
input_table.stopTime = IzhikevichDemo.parameters[key][7] * 1e-3
t = np.arange(0, int(ceil((input_table.stopTime - input_table.startTime) / input_table.stepSize)), 1) * self.dt
input_vec = np.zeros(t.shape)
input_vec[((t > 10e-3) & (t < 15e-3)) | ((t > 80e-3) & (t < 85e-3))] = 1e-9
input_vec[(t > 70e-3) & (t < 75e-3)] = -6e-9
input_table.vector = input_vec
nrn = self._get_neuron(key)
input_table.connect('output', nrn, 'injectMsg')
return input_table
def getEquation(self, key):
params = IzhikevichDemo.parameters[key]
if key != 'accommodation':
equationText = "<i>v' = 0.04v^2 + 5v + 140 - u + I</i><br><i>u' = a(bv - u)</i><p>If <i>v >= 30 mV, v = c</i> and <i>u = u + d</i><br>where <i>a = %g</i>, <i>b = %g</i>, <i>c = %g</i> and <i>d = %g</i>." % (params[1], params[2], params[3], params[4])
else:
equationText = "<i>v' = 0.04v^2 + 5v + 140 - u + I</i><br><i>u' = ab(v + 65)</i><p>If <i>v >= 30 mV, v = c</i> and <i>u = u + d</i><br>where <i>a = %g</i>, <i>b = %g</i>, <i>c = %g</i> and <i>d = %g</i>." % (params[1], params[2], params[3], params[4])
return equationText
import sys
try:
from pylab import *
if __name__ == '__main__':
key = 'thresh_var'
if len(sys.argv) > 1:
key = sys.argv[1]
demo = IzhikevichDemo()
(t, Vm, Im) = demo.simulate(key)
title(IzhikevichDemo.parameters[key][0] + '. ' + key)
subplot(3,1,1)
plot(t, Vm.vector)
subplot(3,1,2)
plot(t, Im.vector)
subplot(3,1,3)
show()
print('Finished simulation.')
except ImportError:
print('Matplotlib not installed.')
#
# Izhikevich.py ends here
| BhallaLab/moose | moose-examples/izhikevich/Izhikevich.py | Python | gpl-3.0 | 23,767 | [
"MOOSE",
"NEURON"
] | ad15d0ed6e996f24a8691f71752f7e61c84f92eba63e830be485b26dde3cc01a |
import xml.dom.minidom
import numpy
import string
def CreateLib():
"""@todo: document me"""
import sys
import time
domimpl = xml.dom.minidom.getDOMImplementation()
doc = domimpl.createDocument(None, "source_library", None)
lib = doc.documentElement
lib.setAttribute("title", "source library")
lib.appendChild(doc.createComment('Source library created by %s at %s' %
(sys.argv[0], time.asctime())))
return lib, doc
def MakeScale(flux_value):
"""Get the scale of the flux value
ex : 1.4e-14 ---> 1e-14"""
return 10 ** numpy.floor(numpy.log10(flux_value) + 0.5)
def addParameter(el, name, free, value, scale, min, max):
"""Add a parameter to a source"""
doc = el.ownerDocument
param = doc.createElement('parameter')
param.setAttribute('name', name)
param.setAttribute('free', '%d' % free)
param.setAttribute('scale', '%g' % scale)
param.setAttribute('value', '%g' % value)
param.setAttribute('max', '%g' % max)
param.setAttribute('min', '%g' % min)
el.appendChild(param)
def AddPointLike(doc,ra,dec,coord_free=0):
spatial = doc.createElement('spatialModel')
spatial.setAttribute('type', 'SkyDirFunction')
addParameter(spatial, 'RA', int(coord_free), ra, 1.0, -360.0, 360.0)
addParameter(spatial, 'DEC', int(coord_free), dec, 1.0, -90.0, 90.0)
return spatial
def AddDisk(doc,ra,dec,radius,coord_free=0):
spatial = doc.createElement('spatialModel')
spatial.setAttribute('type', 'DiskFunction')
addParameter(spatial, 'RA', int(coord_free), ra, 1.0, -360.0, 360.0)
addParameter(spatial, 'DEC', int(coord_free), dec, 1.0, -90.0, 90.0)
addParameter(spatial, 'Radius', 1, radius, 1., 0.01, 10.0)
return spatial
def AddGauss(doc,ra,dec,radius,coord_free=0,radius_free=1):
spatial = doc.createElement('spatialModel')
spatial.setAttribute('type', 'GaussFunction')
addParameter(spatial, 'RA', int(coord_free), ra, 1.0, -360.0, 360.0)
addParameter(spatial, 'DEC', int(coord_free), dec, 1.0, -90.0, 90.0)
addParameter(spatial, 'Sigma', int(radius_free), radius, 1., 0.01, 10.0)
return spatial
def AddShell(doc,ra,dec,radius,width,coord_free=0):
spatial = doc.createElement('spatialModel')
spatial.setAttribute('type', 'ShellFunction')
addParameter(spatial, 'RA', int(coord_free), ra, 1.0, -360.0, 360.0)
addParameter(spatial, 'DEC', int(coord_free), dec, 1.0, -90.0, 90.0)
addParameter(spatial, 'Radius', 1, radius, 1., 0.01, 10.0)
addParameter(spatial, 'Width', 1, width, 1., 0.01, 10.0)
return spatial
def AddEllipticalDisk(doc,ra,dec,PA,MinorRadius,MajorRadius,coord_free=0):
spatial = doc.createElement('spatialModel')
spatial.setAttribute('type', 'EllipticalDisk')
addParameter(spatial, 'RA', int(coord_free), ra, 1.0, -360.0, 360.0)
addParameter(spatial, 'DEC', int(coord_free), dec, 1.0, -90.0, 90.0)
addParameter(spatial, 'PA', 0, PA, 1.0, -360.0, 360.0)
addParameter(spatial, 'MinorRadius', 1, MinorRadius, 1., 0.001, 10.0)
addParameter(spatial, 'MajorRadius', 1, MajorRadius, 1., 0.001, 10.0)
return spatial
def AddEllipticalGauss(doc,ra,dec,PA,MinorRadius,MajorRadius,coord_free=0):
spatial = doc.createElement('spatialModel')
spatial.setAttribute('type', 'EllipticalGauss')
addParameter(spatial, 'RA', int(coord_free), ra, 1.0, -360.0, 360.0)
addParameter(spatial, 'DEC', int(coord_free), dec, 1.0, -90.0, 90.0)
addParameter(spatial, 'PA', 1, PA, 1.0, -360.0, 360.0)
addParameter(spatial, 'MinorRadius', 1, MinorRadius, 1., 0.001, 10.0)
addParameter(spatial, 'MajorRadius', 1, MajorRadius, 1., 0.001, 10.0)
return spatial
def AddConstantValue(doc,value):
spatial = doc.createElement('spatialModel')
spatial.setAttribute('type', 'ConstantValue')
addParameter(spatial, 'Value', 1, value, 1.0, 0.001, 100.0)
return spatial
def AddSpatialMap(doc,value,file = "map.fits"):
spatial = doc.createElement('spatialModel')
spatial.setAttribute('type', 'SpatialMap')
spatial.setAttribute('file', file)
addParameter(spatial, 'Prefactor', 1, value, 1.0, 0.001, 100.0)
return spatial
def AddMapCube(doc,value,file = "map.fits"):
spatial = doc.createElement('spatialModel')
spatial.setAttribute('type', 'MapCubeFunction')
spatial.setAttribute('file', file)
addParameter(spatial, 'Normalization', 1, value, 1.0, 0.001, 100.0)
return spatial
def addCTABackgroundGauss(lib):
doc = lib.ownerDocument
src = doc.createElement('source')
src.setAttribute('name', "Background")
src.setAttribute('type', "RadialAcceptance")
src.setAttribute('instrument', "CTA")
radmod = doc.createElement('radialModel')
radmod.setAttribute('type', 'Gaussian')
addParameter(radmod,'Sigma',1,3.0,1,0.01,10)
src.appendChild(radmod)
return src
def addCTABackgroundProfile(lib, width= 1.5,core = 3, tail =5.):
doc = lib.ownerDocument
src = doc.createElement('source')
src.setAttribute('name', "Background")
src.setAttribute('type', "RadialAcceptance")
src.setAttribute('instrument', "CTA")
radmod = doc.createElement('radialModel')
radmod.setAttribute('type', 'Profile')
addParameter(radmod,'Width',1,width,1,0.1,1000)
addParameter(radmod,'Core',1,core,1,0.1,1000)
addParameter(radmod,'Tail',1,tail,1,0.1,1000)
src.appendChild(radmod)
return src
def addCTABackgroundPolynom(lib, Coeff, Coeff_free):
doc = lib.ownerDocument
src = doc.createElement('source')
src.setAttribute('name', "Background")
src.setAttribute('type', "RadialAcceptance")
src.setAttribute('instrument', "CTA")
radmod = doc.createElement('radialModel')
radmod.setAttribute('type', 'Polynom')
for i in xrange(len(Coeff)):
try :
addParameter(radmod,'Coeff'+str(i),Coeff_free[i],Coeff[i],1,-10,10)
except :
addParameter(radmod,'Coeff'+str(i),1,Coeff[i],1,-10,10)
src.appendChild(radmod)
return src
def addCTAIrfBackground(lib):
doc = lib.ownerDocument
src = doc.createElement('source')
src.setAttribute('name', "CTABackgroundModel")
src.setAttribute('type', "CTAIrfBackground")
src.setAttribute('instrument', "CTA")
spec = doc.createElement('spectrum')
spec.setAttribute('type', 'PowerLaw')
addParameter(spec, 'Prefactor',1, 1, 1, 1e-3, 1e3)
addParameter(spec, 'Index', 1, 0, 1.0, -5,5)
addParameter(spec, 'PivotEnergy', 0, 1., 1.e6, 0.01, 1e3)
src.appendChild(spec)
return src
def addCTACubeBackground(lib):
doc = lib.ownerDocument
src = doc.createElement('source')
src.setAttribute('name', "CTABackgroundModel")
src.setAttribute('type', "CTACubeBackground")
src.setAttribute('instrument', "CTA")
spec = doc.createElement('spectrum')
spec.setAttribute('type', 'PowerLaw')
addParameter(spec, 'Prefactor',1, 1, 1, 1e-3, 1e3)
addParameter(spec, 'Index', 1, 0, 1.0, -5,5)
addParameter(spec, 'PivotEnergy', 0, 1., 1.e6, 0.01, 1e3)
src.appendChild(spec)
return src
def addPowerLaw1(lib, name, type = "PointSource", eflux=1e5,
flux_free=1, flux_value=1e-18, flux_scale=0,
flux_max=1000.0, flux_min=1e-5,
index_free=1, index_value=-2.0,
index_min=-5.0, index_max=5.0):
"""Add a source with a POWERLAW1 model"""
elim_min = 30
elim_max = 3e7
if flux_scale == 0:
flux_scale = MakeScale(flux_value)
flux_value /= flux_scale
doc = lib.ownerDocument
src = doc.createElement('source')
src.setAttribute('name', name)
src.setAttribute('type', type)
src.setAttribute('tscalc','1')
spec = doc.createElement('spectrum')
spec.setAttribute('type', 'PowerLaw')
addParameter(spec, 'Prefactor',
flux_free, flux_value, flux_scale, flux_min, flux_max)
addParameter(spec, 'Index', index_free, index_value, 1.0,
index_min, index_max)
addParameter(spec, 'PivotEnergy', 0, eflux, 1.0, elim_min, elim_max)
src.appendChild(spec)
return src
def addFileFunction(lib, name, type = "PointSource",filefun="out/X_File.txt",
flux_free=1, flux_value=1., flux_scale=1.,
flux_max=100000000.0, flux_min=0.0):
"""Add a source with a file function model"""
if flux_scale == 0:
flux_scale = MakeScale(flux_value)
flux_value /= flux_scale
doc = lib.ownerDocument
src = doc.createElement('source')
src.setAttribute('name', name)
src.setAttribute('type', type)
src.setAttribute('tscalc','1')
spec = doc.createElement('spectrum')
spec.setAttribute('type', 'FileFunction')
spec.setAttribute('file', filefun)
addParameter(spec, 'Normalization', flux_free, flux_value, flux_scale, flux_min, flux_max)
src.appendChild(spec)
return src
def addPowerLaw2(lib, name, type = "PointSource", emin=30, emax=3e7,
flux_free=1, flux_value=1.6e-8, flux_scale=0,
flux_max=1000.0, flux_min=1e-5,
index_free=1, index_value=-2.0,
index_min=-5.0, index_max=-0.5):
"""Add a source with a POWERLAW2 model"""
elim_min = 30
elim_max = 300000
if emin < elim_min:
elim_min = emin
if emax > elim_max:
elim_max = emax
if flux_scale == 0:
flux_scale = MakeScale(flux_value)
flux_value /= flux_scale
doc = lib.ownerDocument
src = doc.createElement('source')
src.setAttribute('name', name)
src.setAttribute('type', type)
src.setAttribute('tscalc','1')
spec = doc.createElement('spectrum')
spec.setAttribute('type', 'PowerLaw2')
addParameter(spec, 'Integral',
flux_free, flux_value, flux_scale, flux_min, flux_max)
addParameter(spec, 'Index', index_free, index_value, 1.0,
index_min, index_max)
addParameter(spec, 'LowerLimit', 0, emin, 1.0, elim_min, elim_max)
addParameter(spec, 'UpperLimit', 0, emax, 1.0, elim_min, elim_max)
src.appendChild(spec)
return src
def addLogparabola(lib, name, type = "PointSource", enorm=300,
norm_free=1, norm_value=1e-9, norm_scale=0,
norm_max=1000.0, norm_min=1e-5,
alpha_free=1, alpha_value=1.0,
alpha_min=.1, alpha_max=5.,
beta_free=1, beta_value=1.0,
beta_min=-5.0, beta_max=5.0):
"""Add a source with a LOGPARABOLA model"""
elim_min = 30
elim_max = 3e7
if enorm == 0:
enorm = 2e5 # meanEnergy(emin,emax,index_value)
norm_value *= (enorm / 100.0) ** alpha_value
if norm_scale == 0:
norm_scale = MakeScale(norm_value)
norm_value /= norm_scale
doc = lib.ownerDocument
src = doc.createElement('source')
src.setAttribute('name', name)
src.setAttribute('type', type)
src.setAttribute('tscalc','1')
spec = doc.createElement('spectrum')
spec.setAttribute('type', 'LogParabola')
addParameter(spec, 'norm',
norm_free, norm_value, norm_scale, norm_min, norm_max)
addParameter(spec, 'alpha', alpha_free, alpha_value, 1.0,
alpha_min, alpha_max)
addParameter(spec, 'Eb', 0, enorm, 1.0, elim_min, elim_max)
addParameter(spec, 'beta', beta_free, beta_value, 1.0, beta_min, beta_max)
#addParameter(spec, 'Prefactor',
#norm_free, norm_value, norm_scale, norm_min, norm_max)
#addParameter(spec, 'Index', alpha_free, alpha_value, 1.0,
#alpha_min, alpha_max)
#addParameter(spec, 'Curvature', 0, enorm, 1.0, elim_min, elim_max)
#addParameter(spec, 'Scale', beta_free, beta_value, 1.0, beta_min, beta_max)
src.appendChild(spec)
return src
def addExponotialCutOffPL(lib, name, type = "PointSource", eflux=0,
flux_free=1, flux_value=1e-9, flux_scale=0,
flux_max=1000.0, flux_min=1e-5,
index_free=1, index_value=-2.0,
index_min=-5.0, index_max=-0.5,
cutoff_free=1, cutoff_value=1e6, cutoff_scale=0,
cutoff_min=0.01, cutoff_max=1000):
"""Add a source with a Exponentially cut-off power law model"""
elim_min = 30
elim_max = 3e7
elim_min = 30
elim_max = 3e7
if flux_scale == 0:
flux_scale = MakeScale(flux_value)
flux_value /= flux_scale
if cutoff_scale == 0:
cutoff_scale = MakeScale(cutoff_value)
cutoff_value /= cutoff_scale
doc = lib.ownerDocument
src = doc.createElement('source')
src.setAttribute('name', name)
src.setAttribute('type', type)
src.setAttribute('tscalc','1')
spec = doc.createElement('spectrum')
spec.setAttribute('type', 'ExpCutoff')
addParameter(spec, 'Prefactor',
flux_free, flux_value, flux_scale, flux_min, flux_max)
addParameter(spec, 'Index', index_free, index_value, 1.0,
index_min, index_max)
addParameter(spec, 'Scale', 0, eflux, 1.0, elim_min, elim_max)
addParameter(spec, 'Cutoff', cutoff_free, cutoff_value, cutoff_scale, cutoff_min, cutoff_max)
src.appendChild(spec)
return src
def addSuperExponotialCutOffPL(lib, name, type = "PointSource", eflux=0,
flux_free=1, flux_value=1e-9, flux_scale=0,
flux_max=1000.0, flux_min=1e-5,
index1_free=1, index1_value=-2.0,
index1_min=-5.0, index1_max=-0.5,
cutoff_free=1, cutoff_value=1e6,
cutoff_min=0.01, cutoff_max=1000,
index2_free=1, index2_value=-2.0,
index2_min=0.01, index2_max=5.0):
"""Add a source with a Super Exponentially cut-off power law model"""
elim_min = 30
elim_max = 3e7
elim_min = 30
elim_max = 3e7
if flux_scale == 0:
flux_scale = MakeScale(flux_value)
flux_value /= flux_scale
doc = lib.ownerDocument
src = doc.createElement('source')
src.setAttribute('name', name)
src.setAttribute('type', type)
src.setAttribute('tscalc','1')
spec = doc.createElement('spectrum')
spec.setAttribute('type', 'PLSuperExpCutoff')
addParameter(spec, 'Prefactor',
flux_free, flux_value, flux_scale, flux_min, flux_max)
addParameter(spec, 'Index1', index1_free, index1_value, 1.0,
index1_min, index1_max)
addParameter(spec, 'Scale', 0, eflux, 1.0, elim_min, elim_max)
addParameter(spec, 'Cutoff', cutoff_free, cutoff_value, 1.0, cutoff_min, cutoff_max)
addParameter(spec, 'Index2', index2_free, index2_value, 1.0,
index2_min, index2_max)
src.appendChild(spec)
return src
def addGaussian(lib, name, type = "PointSource",norm_scale=0,
norm_free=1, norm_value=1e-10,
norm_max=1000.0, norm_min=1e-5,
mean_free=1, mean_value=5.0,mean_scale=0,
mean_min=0.01, mean_max=100,
sigma_free=1, sigma_value=1.0,sigma_scale=0,
sigma_min=0.01, sigma_max=100):
"""Add a source with a Gaussian model"""
elim_min = 30
elim_max = 3e7
if norm_scale == 0:
norm_scale = MakeScale(norm_value)
norm_value /= norm_scale
if mean_scale == 0:
mean_scale = MakeScale(mean_value)
mean_value /= mean_scale
if sigma_scale == 0:
sigma_scale = MakeScale(sigma_value)
sigma_value /= sigma_scale
doc = lib.ownerDocument
src = doc.createElement('source')
src.setAttribute('name', name)
src.setAttribute('type', type)
spec = doc.createElement('spectrum')
spec.setAttribute('type', 'Gaussian')
addParameter(spec, 'Normalization',norm_free, norm_value, norm_scale, norm_min, norm_max)
addParameter(spec, 'Mean', mean_free, mean_value, mean_scale, mean_min, mean_max)
addParameter(spec, 'Sigma', sigma_free, sigma_value, sigma_scale, sigma_min, sigma_max)
src.appendChild(spec)
return src
def PowerLawEBL(lib,source,filefun,eflux=1e5,
flux_free=1, flux_value=1e-14, flux_scale=0,
flux_max=1000.0, flux_min=1e-5,
index_free=1, index_value=-2.0,
index_min=-5.0, index_max=5.0,ebl_free=0):
doc = lib.ownerDocument
src = doc.createElement('source')
src.setAttribute('name', source)
src.setAttribute('type', "PointSource")
src.setAttribute('tscalc','1')
specMulti = doc.createElement('spectrum')
specMulti.setAttribute('type', 'Multiplicative')
elim_min = 30
elim_max = 3e7
if flux_scale == 0:
flux_scale = MakeScale(flux_value)
flux_value /= flux_scale
doc = lib.ownerDocument
specPL = doc.createElement('spectrum')
specPL.setAttribute('type', 'PowerLaw')
specPL.setAttribute('component', 'PowerLawComponent')
addParameter(specPL, 'Prefactor',
flux_free, flux_value, flux_scale, flux_min, flux_max)
addParameter(specPL, 'Index', index_free, index_value, 1.0,
index_min, index_max)
addParameter(specPL, 'PivotEnergy', 0, eflux, 1.0, elim_min, elim_max)
specMulti.appendChild(specPL)
specfile = doc.createElement('spectrum')
specfile.setAttribute('type', 'FileFunction')
specfile.setAttribute('file', filefun)
specfile.setAttribute('component', "EBLComponent")
addParameter(specfile, 'Normalization', ebl_free, 1, 1, 1e-5, 1e5)
specMulti.appendChild(specfile)
src.appendChild(specMulti)
return src
def LogParabolaEBL(lib,source,filefun,enorm=300,
norm_free=1, norm_value=1e-9, norm_scale=0,
norm_max=1000.0, norm_min=1e-5,
alpha_free=1, alpha_value=1.0,
alpha_min=.1, alpha_max=5.,
beta_free=1, beta_value=1.0,
beta_min=-5.0, beta_max=5.0,ebl_free=0):
doc = lib.ownerDocument
src = doc.createElement('source')
src.setAttribute('name', source)
src.setAttribute('type', "PointSource")
src.setAttribute('tscalc','1')
specMulti = doc.createElement('spectrum')
specMulti.setAttribute('type', 'Multiplicative')
elim_min = 30
elim_max = 3e7
if enorm == 0:
enorm = 2e5 # meanEnergy(emin,emax,index_value)
norm_value *= (enorm / 100.0) ** alpha_value
if norm_scale == 0:
norm_scale = MakeScale(norm_value)
norm_value /= norm_scale
doc = lib.ownerDocument
specLP = doc.createElement('spectrum')
specLP.setAttribute('type', 'PowerLaw')
specLP.setAttribute('component', 'LogParabolaComponent')
addParameter(specLP, 'norm',
norm_free, norm_value, norm_scale, norm_min, norm_max)
addParameter(specLP, 'alpha', alpha_free, alpha_value, 1.0,
alpha_min, alpha_max)
addParameter(specLP, 'Eb', 0, enorm, 1.0, elim_min, elim_max)
addParameter(specLP, 'beta', beta_free, beta_value, 1.0, beta_min, beta_max)
specMulti.appendChild(specLP)
specfile = doc.createElement('spectrum')
specfile.setAttribute('type', 'FileFunction')
specfile.setAttribute('file', filefun)
specfile.setAttribute('component', "EBLComponent")
addParameter(specfile, 'Normalization', ebl_free, 1, 1, 1e-5, 1e5)
specMulti.appendChild(specfile)
src.appendChild(specMulti)
return src
def PowerLawExpCutoffEBL(lib,source,filefun,eflux=1e5,
flux_free=1, flux_value=1e-9, flux_scale=0,
flux_max=1000.0, flux_min=1e-5,
index_free=1, index_value=1.0,
index_min=.1, index_max=5.,
ecut_free=1, ecut_value=1.e6,
ecut_min=100, ecut_max=100.e6,ebl_free=0):
doc = lib.ownerDocument
src = doc.createElement('source')
src.setAttribute('name', source)
src.setAttribute('type', "PointSource")
src.setAttribute('tscalc','1')
specMulti = doc.createElement('spectrum')
specMulti.setAttribute('type', 'Multiplicative')
elim_min = 30
elim_max = 3e7
if flux_scale == 0:
flux_scale = MakeScale(flux_value)
flux_value /= flux_scale
doc = lib.ownerDocument
specPLEC = doc.createElement('spectrum')
specPLEC.setAttribute('type', 'ExponentialCutoffPowerLaw')
specPLEC.setAttribute('component', 'ExponentialCutoffPowerLawComponent')
addParameter(specPLEC, 'Prefactor',
flux_free, flux_value, flux_scale, flux_min, flux_max)
addParameter(specPLEC, 'Index', index_free, index_value, 1.0,
index_min, index_max)
addParameter(specPLEC, 'CutoffEnergy', ecut_free, ecut_value, 1.0,
ecut_min, ecut_max)
addParameter(specPLEC, 'PivotEnergy', 0, eflux, 1.0, elim_min, elim_max)
specMulti.appendChild(specPLEC)
specfile = doc.createElement('spectrum')
specfile.setAttribute('type', 'FileFunction')
specfile.setAttribute('file', filefun)
specfile.setAttribute('component', "EBLComponent")
addParameter(specfile, 'Normalization', ebl_free, 1, 1, 1e-5, 1e5)
specMulti.appendChild(specfile)
src.appendChild(specMulti)
return src
| davidsanchez/CTAtools | ctoolsAnalysis/xml_generator.py | Python | gpl-3.0 | 21,335 | [
"Gaussian"
] | 056f56079be553949a8a2dbaf6ea4357178dd959a2269029168943e81fab3d3b |
# Copyright 2008 Brian Boyer, Ryan Mark, Angela Nitzke, Joshua Pollock,
# Stuart Tiffen, Kayla Webley and the Medill School of Journalism, Northwestern
# University.
#
# This file is part of Crunchberry Pie.
#
# Crunchberry Pie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Crunchberry Pie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Crunchberry Pie. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^$', 'profiles.views.personal'),
(r'^(?P<profile_id>\w*)/$', 'profiles.views.detail'),
(r'^(?P<profile_id>\w*)/follow$', 'profiles.views.follow'),
(r'^(?P<profile_id>\w*)/unfollow$', 'profiles.views.unfollow'),
)
| brianboyer/newsmixer | pie/profiles/urls.py | Python | gpl-3.0 | 1,157 | [
"Brian"
] | bd85120ea8f4114458e835983a71de5b9b8281392c26f639889c29b94e082590 |
# -*- coding: utf-8 -*-
""" Tests for student account views. """
import re
from nose.plugins.attrib import attr
from unittest import skipUnless
from urllib import urlencode
import mock
import ddt
from django.conf import settings
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.messages.middleware import MessageMiddleware
from django.test import TestCase
from django.test.utils import override_settings
from django.http import HttpRequest
from edx_rest_api_client import exceptions
from course_modes.models import CourseMode
from commerce.models import CommerceConfiguration
from commerce.tests import TEST_API_URL, TEST_API_SIGNING_KEY, factories
from commerce.tests.mocks import mock_get_orders
from openedx.core.djangoapps.programs.tests.mixins import ProgramsApiConfigMixin
from openedx.core.djangoapps.user_api.accounts.api import activate_account, create_account
from openedx.core.djangoapps.user_api.accounts import EMAIL_MAX_LENGTH
from openedx.core.djangolib.js_utils import dump_js_escaped_json
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.tests.factories import UserFactory
from student_account.views import account_settings_context, get_user_orders
from third_party_auth.tests.testutil import simulate_running_pipeline, ThirdPartyAuthTestMixin
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from openedx.core.djangoapps.theming.test_util import with_edx_domain_context
@ddt.ddt
class StudentAccountUpdateTest(CacheIsolationTestCase, UrlResetMixin):
""" Tests for the student account views that update the user's account information. """
USERNAME = u"heisenberg"
ALTERNATE_USERNAME = u"walt"
OLD_PASSWORD = u"ḅḷüëṡḳÿ"
NEW_PASSWORD = u"🄱🄸🄶🄱🄻🅄🄴"
OLD_EMAIL = u"walter@graymattertech.com"
NEW_EMAIL = u"walt@savewalterwhite.com"
INVALID_ATTEMPTS = 100
INVALID_EMAILS = [
None,
u"",
u"a",
"no_domain",
"no+domain",
"@",
"@domain.com",
"test@no_extension",
# Long email -- subtract the length of the @domain
# except for one character (so we exceed the max length limit)
u"{user}@example.com".format(
user=(u'e' * (EMAIL_MAX_LENGTH - 11))
)
]
INVALID_KEY = u"123abc"
URLCONF_MODULES = ['student_accounts.urls']
ENABLED_CACHES = ['default']
def setUp(self):
super(StudentAccountUpdateTest, self).setUp()
# Create/activate a new account
activation_key = create_account(self.USERNAME, self.OLD_PASSWORD, self.OLD_EMAIL)
activate_account(activation_key)
# Login
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertTrue(result)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
def test_password_change(self):
# Request a password change while logged in, simulating
# use of the password reset link from the account page
response = self._change_password()
self.assertEqual(response.status_code, 200)
# Check that an email was sent
self.assertEqual(len(mail.outbox), 1)
# Retrieve the activation link from the email body
email_body = mail.outbox[0].body
result = re.search('(?P<url>https?://[^\s]+)', email_body)
self.assertIsNot(result, None)
activation_link = result.group('url')
# Visit the activation link
response = self.client.get(activation_link)
self.assertEqual(response.status_code, 200)
# Submit a new password and follow the redirect to the success page
response = self.client.post(
activation_link,
# These keys are from the form on the current password reset confirmation page.
{'new_password1': self.NEW_PASSWORD, 'new_password2': self.NEW_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Your password has been reset.")
# Log the user out to clear session data
self.client.logout()
# Verify that the new password can be used to log in
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
# Try reusing the activation link to change the password again
response = self.client.post(
activation_link,
{'new_password1': self.OLD_PASSWORD, 'new_password2': self.OLD_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "This password reset link is invalid. It may have been used already.")
self.client.logout()
# Verify that the old password cannot be used to log in
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertFalse(result)
# Verify that the new password continues to be valid
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
@ddt.data(True, False)
def test_password_change_logged_out(self, send_email):
# Log the user out
self.client.logout()
# Request a password change while logged out, simulating
# use of the password reset link from the login page
if send_email:
response = self._change_password(email=self.OLD_EMAIL)
self.assertEqual(response.status_code, 200)
else:
# Don't send an email in the POST data, simulating
# its (potentially accidental) omission in the POST
# data sent from the login page
response = self._change_password()
self.assertEqual(response.status_code, 400)
def test_password_change_inactive_user(self):
# Log out the user created during test setup
self.client.logout()
# Create a second user, but do not activate it
create_account(self.ALTERNATE_USERNAME, self.OLD_PASSWORD, self.NEW_EMAIL)
# Send the view the email address tied to the inactive user
response = self._change_password(email=self.NEW_EMAIL)
# Expect that the activation email is still sent,
# since the user may have lost the original activation email.
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
def test_password_change_no_user(self):
# Log out the user created during test setup
self.client.logout()
# Send the view an email address not tied to any user
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 400)
def test_password_change_rate_limited(self):
# Log out the user created during test setup, to prevent the view from
# selecting the logged-in user's email address over the email provided
# in the POST data
self.client.logout()
# Make many consecutive bad requests in an attempt to trigger the rate limiter
for attempt in xrange(self.INVALID_ATTEMPTS):
self._change_password(email=self.NEW_EMAIL)
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 403)
@ddt.data(
('post', 'password_change_request', []),
)
@ddt.unpack
def test_require_http_method(self, correct_method, url_name, args):
wrong_methods = {'get', 'put', 'post', 'head', 'options', 'delete'} - {correct_method}
url = reverse(url_name, args=args)
for method in wrong_methods:
response = getattr(self.client, method)(url)
self.assertEqual(response.status_code, 405)
def _change_password(self, email=None):
"""Request to change the user's password. """
data = {}
if email:
data['email'] = email
return self.client.post(path=reverse('password_change_request'), data=data)
@attr('shard_3')
@ddt.ddt
class StudentAccountLoginAndRegistrationTest(ThirdPartyAuthTestMixin, UrlResetMixin, ModuleStoreTestCase):
""" Tests for the student account views that update the user's account information. """
USERNAME = "bob"
EMAIL = "bob@example.com"
PASSWORD = "password"
URLCONF_MODULES = ['embargo']
@mock.patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
super(StudentAccountLoginAndRegistrationTest, self).setUp()
# For these tests, three third party auth providers are enabled by default:
self.configure_google_provider(enabled=True)
self.configure_facebook_provider(enabled=True)
self.configure_dummy_provider(
enabled=True,
icon_class='',
icon_image=SimpleUploadedFile('icon.svg', '<svg><rect width="50" height="100"/></svg>'),
)
@ddt.data(
("signin_user", "login"),
("register_user", "register"),
)
@ddt.unpack
def test_login_and_registration_form(self, url_name, initial_mode):
response = self.client.get(reverse(url_name))
expected_data = '"initial_mode": "{mode}"'.format(mode=initial_mode)
self.assertContains(response, expected_data)
@ddt.data("signin_user", "register_user")
def test_login_and_registration_form_already_authenticated(self, url_name):
# Create/activate a new account and log in
activation_key = create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
activate_account(activation_key)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result)
# Verify that we're redirected to the dashboard
response = self.client.get(reverse(url_name))
self.assertRedirects(response, reverse("dashboard"))
@ddt.data(
(False, "signin_user"),
(False, "register_user"),
(True, "signin_user"),
(True, "register_user"),
)
@ddt.unpack
def test_login_and_registration_form_signin_preserves_params(self, is_edx_domain, url_name):
params = [
('course_id', 'edX/DemoX/Demo_Course'),
('enrollment_action', 'enroll'),
]
# The response should have a "Sign In" button with the URL
# that preserves the querystring params
with with_edx_domain_context(is_edx_domain):
response = self.client.get(reverse(url_name), params)
expected_url = '/login?{}'.format(self._finish_auth_url_param(params + [('next', '/dashboard')]))
self.assertContains(response, expected_url)
# Add additional parameters:
params = [
('course_id', 'edX/DemoX/Demo_Course'),
('enrollment_action', 'enroll'),
('course_mode', CourseMode.DEFAULT_MODE_SLUG),
('email_opt_in', 'true'),
('next', '/custom/final/destination')
]
# Verify that this parameter is also preserved
with with_edx_domain_context(is_edx_domain):
response = self.client.get(reverse(url_name), params)
expected_url = '/login?{}'.format(self._finish_auth_url_param(params))
self.assertContains(response, expected_url)
@mock.patch.dict(settings.FEATURES, {"ENABLE_THIRD_PARTY_AUTH": False})
@ddt.data("signin_user", "register_user")
def test_third_party_auth_disabled(self, url_name):
response = self.client.get(reverse(url_name))
self._assert_third_party_auth_data(response, None, None, [])
@ddt.data(
("signin_user", None, None),
("register_user", None, None),
("signin_user", "google-oauth2", "Google"),
("register_user", "google-oauth2", "Google"),
("signin_user", "facebook", "Facebook"),
("register_user", "facebook", "Facebook"),
("signin_user", "dummy", "Dummy"),
("register_user", "dummy", "Dummy"),
)
@ddt.unpack
def test_third_party_auth(self, url_name, current_backend, current_provider):
params = [
('course_id', 'course-v1:Org+Course+Run'),
('enrollment_action', 'enroll'),
('course_mode', CourseMode.DEFAULT_MODE_SLUG),
('email_opt_in', 'true'),
('next', '/custom/final/destination'),
]
# Simulate a running pipeline
if current_backend is not None:
pipeline_target = "student_account.views.third_party_auth.pipeline"
with simulate_running_pipeline(pipeline_target, current_backend):
response = self.client.get(reverse(url_name), params)
# Do NOT simulate a running pipeline
else:
response = self.client.get(reverse(url_name), params)
# This relies on the THIRD_PARTY_AUTH configuration in the test settings
expected_providers = [
{
"id": "oa2-dummy",
"name": "Dummy",
"iconClass": None,
"iconImage": settings.MEDIA_URL + "icon.svg",
"loginUrl": self._third_party_login_url("dummy", "login", params),
"registerUrl": self._third_party_login_url("dummy", "register", params)
},
{
"id": "oa2-facebook",
"name": "Facebook",
"iconClass": "fa-facebook",
"iconImage": None,
"loginUrl": self._third_party_login_url("facebook", "login", params),
"registerUrl": self._third_party_login_url("facebook", "register", params)
},
{
"id": "oa2-google-oauth2",
"name": "Google",
"iconClass": "fa-google-plus",
"iconImage": None,
"loginUrl": self._third_party_login_url("google-oauth2", "login", params),
"registerUrl": self._third_party_login_url("google-oauth2", "register", params)
},
]
self._assert_third_party_auth_data(response, current_backend, current_provider, expected_providers)
def test_hinted_login(self):
params = [("next", "/courses/something/?tpa_hint=oa2-google-oauth2")]
response = self.client.get(reverse('signin_user'), params)
self.assertContains(response, '"third_party_auth_hint": "oa2-google-oauth2"')
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_microsite_uses_old_login_page(self):
# Retrieve the login page from a microsite domain
# and verify that we're served the old page.
resp = self.client.get(
reverse("signin_user"),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertContains(resp, "Log into your Test Microsite Account")
self.assertContains(resp, "login-form")
def test_microsite_uses_old_register_page(self):
# Retrieve the register page from a microsite domain
# and verify that we're served the old page.
resp = self.client.get(
reverse("register_user"),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertContains(resp, "Register for Test Microsite")
self.assertContains(resp, "register-form")
def test_login_registration_xframe_protected(self):
resp = self.client.get(
reverse("register_user"),
{},
HTTP_REFERER="http://localhost/iframe"
)
self.assertEqual(resp['X-Frame-Options'], 'DENY')
self.configure_lti_provider(name='Test', lti_hostname='localhost', lti_consumer_key='test_key', enabled=True)
resp = self.client.get(
reverse("register_user"),
HTTP_REFERER="http://localhost/iframe"
)
self.assertEqual(resp['X-Frame-Options'], 'ALLOW')
def _assert_third_party_auth_data(self, response, current_backend, current_provider, providers):
"""Verify that third party auth info is rendered correctly in a DOM data attribute. """
finish_auth_url = None
if current_backend:
finish_auth_url = reverse("social:complete", kwargs={"backend": current_backend}) + "?"
auth_info = {
"currentProvider": current_provider,
"providers": providers,
"secondaryProviders": [],
"finishAuthUrl": finish_auth_url,
"errorMessage": None,
}
auth_info = dump_js_escaped_json(auth_info)
expected_data = '"third_party_auth": {auth_info}'.format(
auth_info=auth_info
)
self.assertContains(response, expected_data)
def _third_party_login_url(self, backend_name, auth_entry, login_params):
"""Construct the login URL to start third party authentication. """
return u"{url}?auth_entry={auth_entry}&{param_str}".format(
url=reverse("social:begin", kwargs={"backend": backend_name}),
auth_entry=auth_entry,
param_str=self._finish_auth_url_param(login_params),
)
def _finish_auth_url_param(self, params):
"""
Make the next=... URL parameter that indicates where the user should go next.
>>> _finish_auth_url_param([('next', '/dashboard')])
'/account/finish_auth?next=%2Fdashboard'
"""
return urlencode({
'next': '/account/finish_auth?{}'.format(urlencode(params))
})
@override_settings(ECOMMERCE_API_URL=TEST_API_URL, ECOMMERCE_API_SIGNING_KEY=TEST_API_SIGNING_KEY)
class AccountSettingsViewTest(ThirdPartyAuthTestMixin, TestCase, ProgramsApiConfigMixin):
""" Tests for the account settings view. """
USERNAME = 'student'
PASSWORD = 'password'
FIELDS = [
'country',
'gender',
'language',
'level_of_education',
'password',
'year_of_birth',
'preferred_language',
]
@mock.patch("django.conf.settings.MESSAGE_STORAGE", 'django.contrib.messages.storage.cookie.CookieStorage')
def setUp(self):
super(AccountSettingsViewTest, self).setUp()
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
CommerceConfiguration.objects.create(cache_ttl=10, enabled=True)
self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.request = HttpRequest()
self.request.user = self.user
# For these tests, two third party auth providers are enabled by default:
self.configure_google_provider(enabled=True)
self.configure_facebook_provider(enabled=True)
# Python-social saves auth failure notifcations in Django messages.
# See pipeline.get_duplicate_provider() for details.
self.request.COOKIES = {}
MessageMiddleware().process_request(self.request)
messages.error(self.request, 'Facebook is already in use.', extra_tags='Auth facebook')
def test_context(self):
context = account_settings_context(self.request)
user_accounts_api_url = reverse("accounts_api", kwargs={'username': self.user.username})
self.assertEqual(context['user_accounts_api_url'], user_accounts_api_url)
user_preferences_api_url = reverse('preferences_api', kwargs={'username': self.user.username})
self.assertEqual(context['user_preferences_api_url'], user_preferences_api_url)
for attribute in self.FIELDS:
self.assertIn(attribute, context['fields'])
self.assertEqual(
context['user_accounts_api_url'], reverse("accounts_api", kwargs={'username': self.user.username})
)
self.assertEqual(
context['user_preferences_api_url'], reverse('preferences_api', kwargs={'username': self.user.username})
)
self.assertEqual(context['duplicate_provider'], 'facebook')
self.assertEqual(context['auth']['providers'][0]['name'], 'Facebook')
self.assertEqual(context['auth']['providers'][1]['name'], 'Google')
def test_view(self):
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
for attribute in self.FIELDS:
self.assertIn(attribute, response.content)
def test_header_with_programs_listing_enabled(self):
"""
Verify that tabs header will be shown while program listing is enabled.
"""
self.create_programs_config(program_listing_enabled=True)
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
self.assertContains(response, '<li class="tab-nav-item">')
def test_header_with_programs_listing_disabled(self):
"""
Verify that nav header will be shown while program listing is disabled.
"""
self.create_programs_config(program_listing_enabled=False)
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
self.assertContains(response, '<li class="item nav-global-01">')
def test_commerce_order_detail(self):
with mock_get_orders():
order_detail = get_user_orders(self.user)
user_order = mock_get_orders.default_response['results'][0]
expected = [
{
'number': user_order['number'],
'price': user_order['total_excl_tax'],
'title': user_order['lines'][0]['title'],
'order_date': 'Jan 01, 2016',
'receipt_url': '/commerce/checkout/receipt/?orderNum=' + user_order['number']
}
]
self.assertEqual(order_detail, expected)
def test_commerce_order_detail_exception(self):
with mock_get_orders(exception=exceptions.HttpNotFoundError):
order_detail = get_user_orders(self.user)
self.assertEqual(order_detail, [])
def test_incomplete_order_detail(self):
response = {
'results': [
factories.OrderFactory(
status='Incomplete',
lines=[
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory()])
)
]
)
]
}
with mock_get_orders(response=response):
order_detail = get_user_orders(self.user)
self.assertEqual(order_detail, [])
def test_honor_course_order_detail(self):
response = {
'results': [
factories.OrderFactory(
lines=[
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory(
name='certificate_type',
value='honor'
)])
)
]
)
]
}
with mock_get_orders(response=response):
order_detail = get_user_orders(self.user)
self.assertEqual(order_detail, [])
@override_settings(SITE_NAME=settings.MICROSITE_LOGISTRATION_HOSTNAME)
class MicrositeLogistrationTests(TestCase):
"""
Test to validate that microsites can display the logistration page
"""
def test_login_page(self):
"""
Make sure that we get the expected logistration page on our specialized
microsite
"""
resp = self.client.get(
reverse('signin_user'),
HTTP_HOST=settings.MICROSITE_LOGISTRATION_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertIn('<div id="login-and-registration-container"', resp.content)
def test_registration_page(self):
"""
Make sure that we get the expected logistration page on our specialized
microsite
"""
resp = self.client.get(
reverse('register_user'),
HTTP_HOST=settings.MICROSITE_LOGISTRATION_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertIn('<div id="login-and-registration-container"', resp.content)
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_no_override(self):
"""
Make sure we get the old style login/registration if we don't override
"""
resp = self.client.get(
reverse('signin_user'),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('<div id="login-and-registration-container"', resp.content)
resp = self.client.get(
reverse('register_user'),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('<div id="login-and-registration-container"', resp.content)
| waheedahmed/edx-platform | lms/djangoapps/student_account/test/test_views.py | Python | agpl-3.0 | 25,209 | [
"VisIt"
] | ad9786ad003f77fc483da6a7132f97f8dce235a134802448ad5f26efa4725867 |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import MITgcmutils as mit
import scipy.io.netcdf as netcdf
import sys
plt.ion()
iexp = ['6']
dir0 = '../run10_p'
dir1 = dir0 + iexp[0] + '/mnc_test_*/'
#file_s = 'state*.nc'
file_m = 'mean*.nc'
file_g = 'grid*.nc'
#f1 = netcdf.netcdf_file(dir1 + file_m,'r')
fg = mit.mnc_files(dir1 + file_g)
hfacc = fg.variables['HFacC'][:,:,:]
hfacw = fg.variables['HFacW'][:,:,:]
hfacs = fg.variables['HFacS'][:,:,:]
xx = fg.variables['X'][:]
yy = fg.variables['Y'][:]
xp1 = fg.variables['Xp1'][:]
yp1 = fg.variables['Yp1'][:]
zz = -fg.variables['Z'][:]
zl = -fg.variables['Zl'][:]
fg.close()
xg,yg = np.meshgrid(xx,yy)
Lx = xp1[-1]
Ly = yp1[-1]
Lz = 2*zz[-1]-zl[-1]
si_x = len(xx)
si_y = len(yy)
si_z = len(zz)
dx = xx[1] - xx[0]
dy = yy[1] - yy[0]
uvel_me = np.zeros((si_z,si_y,si_x))
vvel_me = np.zeros((si_z,si_y,si_x))
theta_me = np.zeros((si_z,si_y,si_x))
nme = 0
for ie in range(0,len(iexp)):
dir1 = dir0 + iexp[ie] + '/mnc_test_*/'
f1 = mit.mnc_files(dir1 + file_m)
it = f1.variables['iter'][:]
si_t = len(it)
it0 = 0
it1 = si_t-1
for it in range(it0,it1):
uvel = f1.variables['UVEL' ][it,:,:,:]
vvel = f1.variables['VVEL' ][it,:,:,:]
theta = f1.variables['THETA'][it,:,:,:]
uvel_me += uvel[:si_z,:si_y,:si_x]
vvel_me += vvel[:si_z,:si_y,:si_x]
theta_me += theta[:si_z,:si_y,:si_x]
nme += 1
f1.close()
uvel_me /= nme
vvel_me /= nme
theta_me /= nme
# create netcdf file
fileout = dir0 + iexp[0] + '/average.nc'
f = netcdf.netcdf_file(fileout,'w')
f.createDimension('iter',1)
f.createDimension('Z',si_z)
f.createDimension('Y',si_y)
f.createDimension('X',si_x)
ito = f.createVariable('iter', 'f', ('iter',))
zpo = f.createVariable('Z', 'f', ('Z',))
ypo = f.createVariable('Y', 'f', ('Y',))
xpo = f.createVariable('X', 'f', ('X',))
uo = f.createVariable('U' , 'f', ('iter','Z','Y','X',))
vo = f.createVariable('V' , 'f', ('iter','Z','Y','X',))
to = f.createVariable('Temp' , 'f', ('iter','Z','Y','X',))
ito[0] = 1
zpo[:] = zz
ypo[:] = yy
xpo[:] = xx
uo[:,:,:] = uvel_me[:,:,:]
vo[:,:,:] = vvel_me[:,:,:]
to[:,:,:] = theta_me[:,:,:]
f.close()
print ("nb points in file: {0}".format(nme))
| bderembl/mitgcm_configs | natl_square/script/compute_mean.py | Python | mit | 2,255 | [
"NetCDF"
] | 9b57fcdbdda27f4baeb676c564ff13c14b8b22726d5848065eaf2d65ff728b2c |
#!/usr/bin/env python
"""
Remove the outputs produced by a transformation
Usage:
dirac-transformation-remove-output transID [transID] [transID]
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import sys
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
Script.parseCommandLine()
args = Script.getPositionalArgs()
if not args:
Script.showHelp()
transIDs = [int(arg) for arg in args]
from DIRAC.TransformationSystem.Agent.TransformationCleaningAgent import TransformationCleaningAgent
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
agent = TransformationCleaningAgent('Transformation/TransformationCleaningAgent',
'Transformation/TransformationCleaningAgent',
'dirac-transformation-remove-output')
agent.initialize()
client = TransformationClient()
for transID in transIDs:
agent.removeTransformationOutput(transID)
if __name__ == "__main__":
main()
| yujikato/DIRAC | src/DIRAC/TransformationSystem/scripts/dirac_transformation_remove_output.py | Python | gpl-3.0 | 1,144 | [
"DIRAC"
] | ea2dc47f11e812c0d30296311c35b1476b50ffe0aa47a40e02757a25b7238652 |
import sys
from engine.resources.scene import Point3D
from resources.datatables import GalaxyStatus
from services.sui.SUIService import MessageBoxType
from services.sui.SUIWindow import Trigger
from java.util import Vector
import main.NGECore
def setup():
return
def run(core, actor, target, commandString):
core.scriptService.callScript("scripts/commands/", "server", "serverCommand", core, actor);
return
def serverCommand (core, owner):
actor = owner.getSlottedObject('ghost')
window = core.suiService.createSUIWindow('Script.listBox', owner, owner, 0)
window.setProperty('bg.caption.lblTitle:Text', 'ProjectSWG')
window.setProperty('Prompt.lblPrompt:Text', 'Server Commands')
window.setProperty('btnOk:visible', 'True')
window.setProperty('btnCancel:visible', 'True')
window.setProperty('btnOk:Text', '@ok')
window.setProperty('btnCancel:Text', '@cancel')
returnList = Vector()
returnList.add('List.lstList:SelectedRow')
window.addHandler(0, '', Trigger.TRIGGER_OK, returnList, serverCommandCallback)
window.addListBoxMenuItem('Lock Server', 0)
window.addListBoxMenuItem('Unlock Server', 1)
window.addListBoxMenuItem('Shutdown Server (15 Minute Countdown)', 2)
window.addListBoxMenuItem('Stop Server (10 Seconds)', 3)
window.addListBoxMenuItem('Active Connections', 4)
core.suiService.openSUIWindow(window)
def serverCommandCallback(owner, window, eventType, returnList):
if returnList.size()==0:
owner.sendSystemMessage('NULL', 0)
return
if returnList.get(0)=='0':
lockedHandler(owner)
return
if returnList.get(0)=='1':
onlineHandler(owner)
return
if returnList.get(0)=='2':
shutdownHandler(owner)
return
if returnList.get(0)=='3':
stopHandler(owner)
return
if returnList.get(0)=='4':
connectionHandler(owner)
return
def lockedHandler(owner):
core = main.NGECore.getInstance()
core.setGalaxyStatus(GalaxyStatus.Locked)
owner.sendSystemMessage(' \\#FE2EF7 [GM] \\#FFFFFF Server lockServer: Command completed successfully. Server is now in Locked Status.', 0)
return
def onlineHandler(owner):
core = main.NGECore.getInstance()
core.setGalaxyStatus(GalaxyStatus.Online)
owner.sendSystemMessage(' \\#FE2EF7 [GM] \\#FFFFFF Server unlockServer: Command completed successfully. Server is now in Online Status.', 0)
return
def shutdownHandler(owner):
core = main.NGECore.getInstance()
core.initiateShutdown()
owner.sendSystemMessage(' \\#FE2EF7 [GM] \\#FFFFFF Server shutdown: Command completed successfully. Server shutdown initiated.', 0)
return
def stopHandler(owner):
core = main.NGECore.getInstance()
core.initiateStop()
owner.sendSystemMessage(' \\#FE2EF7 [GM] \\#FFFFFF Server stop: Command completed successfully. Emergency server shutdown initiated.', 0)
return
def connectionHandler(owner):
core = main.NGECore.getInstance()
clients = str(core.getActiveZoneClients())
owner.sendSystemMessage(' \\#FE2EF7 [GM] \\#FFFFFF Active Connections: Command completed successfully.', 0)
owner.sendSystemMessage('There are currently ' + clients + ' Characters connected to the Galaxy.', 0)
return | agry/NGECore2 | scripts/commands/server.py | Python | lgpl-3.0 | 3,083 | [
"Galaxy"
] | e6d7b78427a7fa9751973ef716a423b19c78d81805e62be39dfb80edc789f4c2 |
# -*- coding: utf-8 -*-
#
# Moonstone is platform for processing of medical images (DICOM).
# Copyright (C) 2009-2011 by Neppo Tecnologia da Informação LTDA
# and Aevum Softwares LTDA
#
# This file is part of Moonstone.
#
# Moonstone is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import vtk
from ..base import PluginBase
from gui.qt.resliceaction import ResliceAction
class ReslicePlugin(PluginBase):
def __init__(self, ilsa):
logging.debug("In ReslicePlugin::__init__()")
self._name = None
self._action = ResliceAction(ilsa)
ilsa.add(self)
self._ilsa = ilsa
@property
def ilsa(self):
logging.debug("In ReslicePlugin::ilsa()")
return self._ilsa
@property
def action(self):
logging.debug("In ReslicePlugin::action()")
return self._action
@property
def name(self):
logging.debug("In ReslicePlugin::name()")
return self._name
@name.setter
def name(self, name):
logging.debug("In ReslicePlugin::name.setter()")
self._name = name
def notify(self, vtkInteractorStyle=None):
logging.debug("In ReslicePlugin::notify()")
def save(self):
logging.debug("In ReslicePlugin::save()")
def restore(self):
logging.debug("In ReslicePlugin::restore()")
@property
def description(self):
logging.debug("In ReslicePlugin::description()")
return "..."
@property
def separator(self):
logging.debug("In ReslicePlugin::separator()")
return False
@property
def status(self):
logging.debug("In ReslicePlugin::status()")
return True
| aevum/moonstone | src/moonstone/ilsa/plugins/reslice/reslice.py | Python | lgpl-3.0 | 2,331 | [
"VTK"
] | 36ca6457aca25bfbd9fedfdec5961635f915710eb039567fc1645f5258795fd6 |
# -*- coding: utf-8 -*-
#
# Moonstone is platform for processing of medical images (DICOM).
# Copyright (C) 2009-2011 by Neppo Tecnologia da Informação LTDA
# and Aevum Softwares LTDA
#
# This file is part of Moonstone.
#
# Moonstone is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
import yaml
import time
import shutil
from multiprocessing import Process, Value, Pipe, Lock, Queue
import traceback
import sys
import vtk
import gdcm
import vtkgdcm
from PySide import QtGui
try:
from moonstone.bloodstone.importer.database import dbutils
from moonstone.bloodstone.importer.database.patient import Patient
from moonstone.bloodstone.importer.database.serie import Serie
from moonstone.bloodstone.importer.database.study import Study
from moonstone.utils.strUtils import hashStr
from moonstone.utils import constant
except:
from database import dbutils
from database.patient import Patient
from database.serie import Serie
from database.study import Study
from utils.strUtils import hashStr
from utils import constant
from importitemdelegate import ImportItemDelegate
dbutils.DBUtils().createConnection()
class Importer(object):
def __init__(self):
logging.debug("In Importer::__init__()")
self._directories = []
self.series = {}
self.changed = 0
self.queue = Queue()
self.stopCancelQueue = Queue()
self._parentConn = None
self.finished = 0
def clearData(self):
logging.debug("In Importer::clearData()")
def loadDirectory(self, directory, recursive):
logging.debug("In Importer::loadDirectory()")
self.finished = 0
prov = []
while not self.stopCancelQueue.empty():
self.stopCancelQueue.get()
self.process = Process( target=scanDirectory,
args = (directory, recursive,
self.series,
self.queue, self.stopCancelQueue))
self.process.start()
if not directory in self._directories:
prov.append(directory)
self._directories = self._directories+prov
def stop(self):
self.stopCancelQueue.put("stop")
def cancel(self):
self.stopCancelQueue.put("cancel")
def updateSeries(self):
logging.debug("In Importer::updateSeries()")
if not self.queue.empty():
key, value = self.queue.get()
if key == "finished-1":
self.finished = 1
elif key == "finished-2":
self.finished = 2
else:
self.series.update(value)
def makeImport(self, indexes):
logging.debug("In Importer::makeImport()")
self.finished = 0
while not self.queue.empty():
self.queue.get()
while not self.stopCancelQueue.empty():
self.stopCancelQueue.get()
self.process = Process(target=processImport,
args = (indexes,
self.series,
self.queue, self.stopCancelQueue))
self.process.start()
def processImport(indexes, series, queue, stopCancelQueue):
for index in indexes:
try:
serie = series[index]
serie["progress"] = 5
queue.put(["series", series])
if not stopCancelQueue.empty():
break
sortSerie(serie)
serie["progress"] = 25
queue.put(["series", series])
if not stopCancelQueue.empty():
break
copyFiles(serie)
serie["progress"] = 50
queue.put(["series", series])
if not stopCancelQueue.empty():
break
createVTI(serie)
serie["progress"] = 90
queue.put(["series", series])
if not stopCancelQueue.empty():
break
createYAMLFile(serie)
serie["progress"] = 95
queue.put(["series", series])
if not stopCancelQueue.empty():
break
updateDatabase(serie)
serie["progress"] = 100
queue.put(["series", series])
if not stopCancelQueue.empty():
break
serie["error"] = False
except:
serie["error"] = True
rollback(serie)
if not stopCancelQueue.empty():
msg = stopCancelQueue.get()
if msg == "stop":
rollback(series[index])
series[index]["error"] = True
series[index]["progress"] = 0
queue.put(["series", series])
elif msg == "cancel":
for index in indexes:
rollback(series[index])
series[index]["error"] = True
series[index]["progress"] = 0
queue.put(["series", series])
queue.put(["finished-2", True])
def rollback(serie):
if os.path.exists(serie["path"]):
shutil.rmtree(serie["path"])
seriesDB = list(Serie.selectBy(uid=serie["uid"], description=serie["serieDescription"]))
if seriesDB:
serieDB = list(seriesDB)[0]
study = serieDB.study
patient = study.patient
toRemove = serieDB.delete()
serieList = list(Serie.selectBy(uid = serieDB.uid))
if not serieList:
imagePath = os.path.join(patient.directory, serieDB.uid)
if os.path.exists(imagePath):
shutil.rmtree(imagePath)
serieList = list(Serie.selectBy(study=study))
if not serieList:
toRemove = study.delete()
for remove in toRemove:
if os.path.exists(remove):
shutil.rmtree(remove)
studyList = list(Study.selectBy(patient=patient))
if not studyList:
toRemove =patient.delete()
for remove in toRemove:
if os.path.exists(remove):
shutil.rmtree(remove)
def createYAMLFile(serie):
logging.debug("In Importer::createYAMLFile()")
outputFile = os.path.join(serie["path"], "{0}{1}".format(hashStr(serie["uid"]), ".yaml"))
serie["yaml"] = os.path.join(
"{0}{1}".format(hashStr(serie["uid"]), hashStr(serie["serieDescription"])),
"{0}{1}".format(hashStr(serie["uid"]),".yaml"))
vtiPath = "{0}{1}/main/main.vti".format(hashStr(serie["uid"]),hashStr(serie["serieDescription"]))
matrix = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]
mScreens = []
save = {"vti": vtiPath,
"mScreens" : mScreens}
mScreens.append({"name": QtGui.QApplication.translate("Importer",
"Main",
None,
QtGui.QApplication.UnicodeUTF8)})
f = file(outputFile, "w")
yaml.dump(save, f)
f.close()
def sortSerie(serie):
logging.debug("In Importer::sortSerie()")
sorter = gdcm.IPPSorter()
sorter.SetComputeZSpacing(True)
sorter.SetZSpacingTolerance(1e-3)
result = sorter.Sort(serie["files"])
serie["zspacing"] = sorter.GetZSpacing() if result else 1.0
if sorter.GetFilenames():
serie["files"] = sorter.GetFilenames()
try:
if not serie["zspacing"]:
reader = gdcm.ImageReader()
reader.SetFileName(serie["files"][0])
if reader.Read():
dataset = reader.GetFile().GetDataSet()
z1 = float(retrieveDicomTag(0x0020, 0x0032, dataset).split("\\")[2])
reader = gdcm.ImageReader()
reader.SetFileName(serie["files"][1])
if reader.Read():
dataset = reader.GetFile().GetDataSet()
z2 = float(retrieveDicomTag(0x0020, 0x0032, dataset).split("\\")[2])
serie["zspacing"] = abs(z2-z1)
except:
return
def copyFiles(serie):
logging.debug("In Importer::copyFiles()")
basePath = os.path.join(constant.INSTALL_DIR, "data")
patientPath = os.path.join(basePath, hashStr(serie["patientUID"]))
seriePath = os.path.join(patientPath,"{0}{1}".format(hashStr(serie["uid"]), hashStr(serie["serieDescription"])))
imagePath = os.path.join(patientPath,hashStr(serie["uid"]), "images/")
serie["path"] = seriePath
serie["patientPath"] = patientPath
if not os.path.exists(imagePath):
os.makedirs(imagePath)
for i, dicomPath in enumerate(serie["files"]):
filePath = os.path.join(imagePath, "{0}.dcm".format(i))
shutil.copy(dicomPath, filePath)
def createVTI(serie):
logging.debug("In Importer::createVTI()")
size = 100
numberOfParts = int(len(serie["files"])/size) +1
reader = vtkgdcm.vtkGDCMImageReader()
reader.ReleaseDataFlagOn()
reslice = vtk.vtkImageReslice()
change = vtk.vtkImageChangeInformation()
change.ReleaseDataFlagOn()
reslice.ReleaseDataFlagOn()
writer = vtk.vtkXMLImageDataWriter()
writer.ReleaseDataFlagOn()
for i in range(numberOfParts):
filenames = vtk.vtkStringArray()
limit = (i+1)*size
if limit > len(serie["files"]):
limit = len(serie["files"])
for filename in serie["files"][i*size:limit]:
filenames.InsertNextValue(filename)
reader.SetFileNames(filenames)
reader.Update()
extent = reader.GetOutput().GetExtent()
#serie["zspacing"] = len(serie["files"])/(extent[5]-extent[4])
spacing = reader.GetOutput().GetSpacing()
change.SetInputConnection(reader.GetOutputPort())
#change.SetOutputOrigin(reader.GetOutput().GetSpacing())
change.SetOutputSpacing(spacing[0], spacing[1], serie["zspacing"])
change.Update()
imagedata = change.GetOutput()
change.GetInput().ReleaseData()
path = os.path.join(serie["path"],"main")
if not os.path.exists(path):
os.makedirs(path)
extent = imagedata.GetExtent()
spacing = imagedata.GetSpacing()
origin = imagedata.GetOrigin()
center = (
origin[0] + spacing[0] * 0.5 * (extent[0] + extent[1]),
origin[1] + spacing[1] * 0.5 * (extent[2] + extent[3]),
origin[2] + spacing[2] * 0.5 * (extent[4] + extent[5])
)
resliceAxes = vtk.vtkMatrix4x4()
vtkMatrix = (
1, 0, 0, 0,
0, -1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1
)
resliceAxes.DeepCopy(vtkMatrix)
resliceAxes.SetElement(0, 3, center[0])
resliceAxes.SetElement(1, 3, center[1])
resliceAxes.SetElement(2, 3, center[2])
reslice.SetInput(imagedata)
reslice.SetInformationInput(imagedata)
reslice.SetResliceAxes(resliceAxes)
reslice.SetOutputDimensionality(3)
reslice.Update()
filename = os.path.join(path, "main{0}.vti".format(i))
imagedata = reslice.GetOutput()
reslice.GetInput().ReleaseData()
writer.SetInput(imagedata)
writer.SetFileName(filename)
writer.Write()
def updateDatabase(serie):
logging.debug("In Importer::updateDatabase()")
patient = list(Patient.selectBy(uid=serie["patientUID"]))
if not patient:
patient = Patient(uid=serie["patientUID"],
name=serie["patientName"],
birthdate=serie["patientBirthdate"],
sex=serie["patientSex"],
tmp = False,
directory=serie["patientPath"])
else:
patient = patient[0]
study = list(Study.selectBy(uid=serie["studyUID"]))
if not study:
study = Study(uid = serie["studyUID"],
modality=serie["studyModality"],
description=serie["studyDescription"],
institution=serie["studyInstitution"],
tmp=False,
patient=patient)
else:
study = study[0]
serieDB = list(Serie.selectBy(uid=serie["uid"], description=serie["serieDescription"]))
if serieDB:
serieDB = serieDB[0]
serieDB.file = serie["yaml"]
serieDB.description = serie["serieDescription"]
serieDB.thickness = serie["serieThickness"]
serieDB.size = serie["serieSize"]
serieDB.zSpacing = serie["zspacing"]
serieDB.dicomImages = len(serie["files"])
else:
Serie(uid=serie["uid"],
file=serie["yaml"],
description=serie["serieDescription"],
thickness=serie["serieThickness"],
size=serie["serieSize"],
zSpacing=serie["zspacing"],
tmp = False,
dicomImages = len(serie["files"]),
study=study)
def scanDirectory(directory, recursive, series, queue, stopCancelQueue):
logging.debug("In Importer::scanDirectory()")
while not queue.empty():
queue.get()
if recursive:
for path, folders, files in os.walk(directory):
if not stopCancelQueue.empty() :
msg = stopCancelQueue.get()
if msg == "stop":
break
if files:
series = {}
scanFiles(files, path, series)
queue.put(["series", series])
else:
files = os.listdir(directory)
scanFiles(files, directory, series)
queue.put(["series", series])
queue.put(["finished-1", True])
def scanFiles(files, path, series):
logging.debug("In Importer::scanFiles()")
for filepath in files:
try:
reader = gdcm.ImageReader()
inputFilepath = normalizeString(os.path.abspath(os.path.join(path, filepath)))
reader.SetFileName(inputFilepath)
if reader.Read():
dataset = reader.GetFile().GetDataSet()
serieUID = retrieveDicomTag(0x0020, 0x000E, dataset)
if not series.has_key(serieUID):
serieDict = {}
serieDict["serieDescription"] = retrieveDicomTag(0x0008, 0x103E, dataset)
serieDict["serieThickness"] = retrieveDicomTag(0x0018, 0x0050, dataset)
if not serieDict["serieThickness"]:
serieDict["serieThickness"] = "0.0"
serieDict["serieSize"] = retrieveDicomTag(0x0070, 0x0020, dataset)
serieDict["studyUID"] = retrieveDicomTag(0x0020, 0x000D, dataset)
serieDict["studyModality"] = retrieveDicomTag(0x0040, 0x0060, dataset)
serieDict["studyDescription"] = retrieveDicomTag(0x0008, 0x1030, dataset)
serieDict["studyInstitution"] = retrieveDicomTag(0x0008, 0x0080, dataset)
serieDict["patientSex"] = retrieveDicomTag(0x0010, 0x0040, dataset)
serieDict["patientName"] = retrieveDicomTag(0x0010, 0x0010, dataset)
serieDict["patientUID"] = retrieveDicomTag(0x0010, 0x0020, dataset)
serieDict["progress"] = 0
serieDict["patientBirthdate"] = retrieveDicomTag(0x0010, 0x0030, dataset)
serieDict["patientBirthdate"] = serieDict["patientBirthdate"].replace(" ", "")
serieDict["patientBirthdate"] = serieDict["patientBirthdate"].replace("/", "")
serieDict["patientBirthdate"] = serieDict["patientBirthdate"].replace("-", "")
serieDict["error"] = False
serieDict["path"] = ""
if serieDict["patientBirthdate"]:
serieDict["patientBirthdate"] = "{0}-{1}-{2}".format(
serieDict["patientBirthdate"][:4],
serieDict["patientBirthdate"][4:6],
serieDict["patientBirthdate"][6:8])
else:
serieDict["patientBirthdate"] = None
serieDict["files"] = [inputFilepath]
serieDict["uid"] = serieUID
serieDict["exists"] = serieExists(serieUID, serieDict["serieDescription"])
if serieDict["exists"]:
serieDict["checked"] = False
else:
serieDict["checked"] = True
series[serieUID] = serieDict
else:
serieDict["files"] = serieDict["files"] + [inputFilepath]
else:
logging.debug("Could not file as DICOM2: {0}".format(filepath))
except:
traceback.print_exc(file=sys.stdout)
logging.debug("Error loading file: {0}".format(filepath))
def serieExists(serieID, description):
if list(Serie.select("uid='{0}' AND description='{1}'".format(serieID, description))):
return True
return False
def retrieveDicomTag(a, b, dataset):
logging.debug("In Importer::retrieveDicomTag()")
tag = gdcm.Tag(a, b)
if dataset.FindDataElement(tag):
data = dataset.GetDataElement(tag).GetValue()
if data:
return str(data)
return ''
def normalizeString(inputPath):
if (sys.platform == 'win32'):
if type(inputPath) == unicode:
return inputPath.encode('latin-1')
else:
return str(inputPath)
| zoulianmp/moonstone-pyqt | moonstone/bloodstone/importer/importer.py | Python | lgpl-3.0 | 18,547 | [
"VTK"
] | 6ba70c37b2df58ee03fba682979b8f43f02f0a13522b849ef057ac5efc734400 |
#!/usr/bin/env python
"""Try out the N-queens problem for an arbitrary number of queens.
This program uses Genetic Algorithms to try to solve the N queens
problem, in which you place N different queens on an N by N chess
board such that no two queens will be attacking each other.
We represent queens on the board as a tuple like (1, 2, 3, 4, 5)
which would be 5 queens diaganol across the board.
Usage:
python test_GAQueens.py <Number of Queens to place>
where <Number of Queens to place> is just a number specifying how many
queens you want to try to calculate this for.
When called as part of the Biopython unit test suite, 5 queens are used.
"""
# standard library
from __future__ import print_function
import sys
import random
import time
import unittest
# Biopython
from Bio import Alphabet
# Genetic Algorithm stuff
from Bio.GA.Evolver import GenerationEvolver
from Bio.GA import Organism
from Bio.GA.Mutation.Simple import ConversionMutation
from Bio.GA.Crossover.Point import SinglePointCrossover
from Bio.GA.Selection.RouletteWheel import RouletteWheelSelection
from Bio.GA.Selection.Tournament import TournamentSelection
VERBOSE = 0
def main(num_queens):
print("Calculating for %s queens..." % num_queens)
num_orgs = 1000
print("Generating an initial population of %s organisms..." % num_orgs)
queen_alphabet = QueensAlphabet(num_queens)
start_population = Organism.random_population(queen_alphabet, num_queens,
num_orgs, queens_fitness)
print("Evolving the population and searching for a solution...")
mutator = QueensMutation(mutation_rate=0.05)
crossover = QueensCrossover(queens_fitness, crossover_prob=.2,
max_crossover_size=4)
repair = QueensRepair()
# rw_selector = RouletteWheelSelection(mutator, crossover, repair)
t_selector = TournamentSelection(mutator, crossover, repair, 5)
start_time = time.ctime(time.time())
evolver = GenerationEvolver(start_population, t_selector)
evolved_pop = evolver.evolve(queens_solved)
end_time = time.ctime(time.time())
unique_solutions = []
for org in evolved_pop:
if org.fitness == num_queens:
if org not in unique_solutions:
unique_solutions.append(org)
if VERBOSE:
print("Search started at %s and ended at %s" % (start_time, end_time))
for orgm in unique_solutions:
print("We did it! %s" % org)
display_board(org.genome)
def display_board(genome):
"""Display a genome in the N-queens problem.
Inspired by the display function in the queens.py solution to the N-queens
problem in the Python demo scripts.
"""
print('+-' + '--'*len(genome) + '+')
for row in range(len(genome)):
elements = []
for genome_item in genome:
if genome_item == row:
elements.append('Q')
else:
elements.append('.')
print('|' + ''.join(elements) + '|')
print('+-' + '--'*len(genome) + '+')
def queens_solved(organisms):
"""Determine if we have solved the problem.
We just search through the population for an organism that has a
fitness that is equal to the number of queens in the population.
If so, we have a solution, otherwise we need to keep looking.
"""
for org in organisms:
if org.fitness == len(org.genome):
return 1
# if we got here we didn't do it
return 0
def queens_fitness(genome):
"""Calculate the fitness of an organization of queens on the chessboard.
Arguments:
o genome -- A MutableSeq object specifying an organism genome.
The number returned is the number of unattacked queens on the board.
"""
fitness = 0
# check each queen on the board
for check_queen_col in range(len(genome)):
is_attacked = 0
# check against all other queens on the board
for other_queen_col in range(len(genome)):
# only check a queen if it isn't exactly the same queen
if check_queen_col != other_queen_col:
# get the row for the two queens we are comparing
check_queen_row = int(genome[check_queen_col])
other_queen_row = int(genome[other_queen_col])
# a queen is attacked if it is in a row with another queen
if check_queen_row == other_queen_row:
is_attacked = 1
break
# or it is attacked if it is diaganol to another queen
elif (abs(check_queen_row - other_queen_row) ==
abs(check_queen_col - other_queen_col)):
is_attacked = 1
break
if not(is_attacked):
fitness += 1
return fitness
class QueensAlphabet(Alphabet.Alphabet):
def __init__(self, num_queens):
"""Initialize with the number of queens we are calculating for.
"""
# set up the letters for the alphabet
assert 0 < num_queens <= 9
self.letters = "".join(str(i) for i in range(num_queens))
# --- Problem specific crossover, mutation and repair operations
class QueensRepair:
"""A repair function to help create correct N-Queens solutions.
This attempts to help generate correct solutions by offering some
amount of repair to remove queens that are located in the same rows.
After repair, a sequence should have no queens in the same row.
So, if you start with something infeasible like (1, 2, 2, 3, 3, 4),
after running it through repair you'll get a feasible individual
like (1, 2, 5, 3, 6, 4). This should greatly reduce the number of
individuals that need to be searched through in a population.
"""
def __init__(self, repair_prob=1):
"""Initialize the repairer.
Arguments:
o repair_prob -- The probability that we'll repair a genome.
By default, we always repair.
"""
self._repair_prob = repair_prob
def _get_duplicates(self, genome):
"""Return all of the letters in the genome that are duplicated.
This checks every letter in the genome (which are the rows of
the chessboard, in this case), and adds them to a list of duplicated
items if there is more than one of them, and then returns this list.
"""
duplicates = []
for item in genome.alphabet.letters:
if genome.count(str(item)) > 1:
duplicates.append(item)
return duplicates
def _get_unused(self, genome):
"""Return all of the letters in the genome which are unused.
This checks the letters in the genome (which are th rows on the
chessboard) and returns all items which are not used.
"""
unused = []
for item in genome.alphabet.letters:
if genome.count(str(item)) == 0:
unused.append(item)
return unused
def repair(self, organism):
"""Repair the specified genome to make it feasible.
Arguments:
o organism -- The Organism object we are going to perform the
repair on.
"""
# check if we should repair or not
repair_chance = random.random()
if repair_chance <= self._repair_prob:
while True:
# get the duplicated items we need to work on
duplicated_items = self._get_duplicates(organism.genome)
if len(duplicated_items) == 0:
break
# take the first duplicated element, and convert it to
# a row that is not already taken
duplicated_pos = organism.genome.index(duplicated_items[0])
free_rows = self._get_unused(organism.genome)
assert len(free_rows) > 0, "Unexpected lack of empty rows"
new_item = random.choice(free_rows)
organism.genome[duplicated_pos] = new_item
return organism
class QueensCrossover:
"""Crossover operation to help in solving the N-Queens problem.
This tries to perform smarter crossovers by picking out regions of
the genome that have high fitness.
It scans through both genomes in the crossover with a window half the
size of the genome, and finds the region with the highest fitness in
both genomes. It then recombines these high fitness windows to form
the new genome that is returned.
"""
def __init__(self, fitness_func, crossover_prob=.1,
max_crossover_size=4):
"""Initialize to do N-Queens optimized crossover.
Arguments:
o fitness_func -- A function that can calculate the fitness of
a genome.
o crossover_prob -- The probability of having a crossover
between two passed in organisms.
o max_crossover_size -- The maximum crossover size of the 'best' region
to search for.
"""
self._crossover_prob = crossover_prob
self._fitness_calc = fitness_func
self._max_crossover_size = max_crossover_size
def do_crossover(self, org_1, org_2):
"""Perform a crossover between two organisms.
"""
new_org_1 = org_1.copy()
new_org_2 = org_2.copy()
# determine if we have a crossover
crossover_chance = random.random()
if crossover_chance <= self._crossover_prob:
# find the region of highest probability in both orgs
best_1, rest_1 = self._find_best_region(new_org_1.genome,
make_best_larger=1)
best_2, rest_2 = self._find_best_region(new_org_2.genome,
make_best_larger=0)
assert len(best_1) + len(best_2) == len(rest_1) + len(rest_2), \
"Did not preserve genome length!"
new_org_1.genome = best_1 + best_2
new_org_2.genome = rest_1 + rest_2
return new_org_1, new_org_2
def _find_best_region(self, genome, make_best_larger=1):
"""Find the best region in the given genome.
Arguments:
o genome -- A MutableSeq object specifying the genome of an organism
o make_best_larger -- A flag to determine whether the best region
we should search for should be the larger region of the split
caused by crossover or the smaller region. This makes it easy
to split two genomes, recombine them, and get a solution that
makes sense.
Returns:
o Two MutableSeq objects. They are both half of the size of the passed
genome. The first is the highest fitness region of the genome and the
second is the rest of the genome.
"""
first_region = max(len(genome) / 2, self._max_crossover_size)
second_region = len(genome) - first_region
if make_best_larger:
region_size = max(first_region, second_region)
else:
region_size = min(first_region, second_region)
# loop through all of the segments and find the best fitness segment
# represent best_fitness as a three tuple with the coordinates of
# the start and end as the first two elements, and the fitness of
# the region as the last element. Start with a value that
# will overridden right away
best_fitness = [0, 0, -1]
for start_index in range(len(genome) - region_size):
region_fitness = \
self._fitness_calc(genome[start_index: start_index + region_size])
if region_fitness > best_fitness[2]:
best_fitness = [start_index, start_index + region_size,
region_fitness]
# get the two regions and return 'em
best_region = genome[best_fitness[0]:best_fitness[1]]
rest_region = genome[0:best_fitness[0]] + genome[best_fitness[1]:]
return best_region, rest_region
class QueensMutation:
"""Mutation operation to help in the N-Queens problem.
This performs mutation, but instead of randomly mutating a single
item to any other, it tries to mutate it to a row that is not already
taken at some other position in the genome. This thus tries to
generate more 'correct' mutations that will help achieve the solution.
"""
def __init__(self, mutation_rate=0.001):
"""Inititialize a mutator.
Arguments:
o mutation_rate -- The change of a mutation happening at any
position in the genome.
"""
self._mutation_rate = mutation_rate
def mutate(self, organism):
"""Mutate the genome trying to put in 'helpful' mutations.
"""
new_org = organism.copy()
gene_choices = list(new_org.genome.alphabet.letters)
# potentially mutate any gene in the genome
for gene_index in range(len(new_org.genome)):
mutation_chance = random.random()
# if we have a mutation
if mutation_chance <= self._mutation_rate:
# find only choices that are not already taken elsewhere
# in the genome
gene_choices = list(new_org.genome.alphabet.letters)
for gene in new_org.genome:
if gene in gene_choices:
gene_choices.remove(gene)
# if there are no choices left, we are stuck going for random
if len(gene_choices) == 0:
gene_choices = list(new_org.genome.alphabet.letters)
# get a new letter with the left-over choices
new_letter = random.choice(gene_choices)
new_org.genome[gene_index] = new_letter
return new_org
num_queens = 5
# Class defined for use via run_tests.py
class QueensTest(unittest.TestCase):
def test_queens(self):
"""Place five queens with a GA"""
main(num_queens)
if __name__ == "__main__":
if len(sys.argv) == 1:
# Run with defaults, for use as a unit test
main(num_queens)
elif len(sys.argv) == 2:
num_queens = int(sys.argv[1])
main(num_queens)
else:
print("Usage:")
print("python test_GAQueens.py <Number of Queens to place>\n")
print("where <Number of Queens to place> is an optional parameter")
print("specifying how many queens you want to try to calculate")
print("this for. The default number of queens to place is 5.")
print("Range 1 to 9 is supported.")
sys.exit(1)
| updownlife/multipleK | dependencies/biopython-1.65/Tests/test_GAQueens.py | Python | gpl-2.0 | 14,662 | [
"Biopython"
] | 78dde304966058ad840d114e8d5556a8f3b15acd4aaf8f2127278c7b2faa05e7 |
from math import ceil
from numpy import apply_along_axis, full, nan
from numpy.random import choice, get_state, seed, set_state
from ._match_target_and_data import _match_target_and_data
from .compute_nd_array_margin_of_error import compute_nd_array_margin_of_error
def _match_randomly_sampled_target_and_data_to_compute_margin_of_errors(
target,
data,
random_seed,
n_sampling,
match_function,
n_required_for_match_function,
raise_for_n_less_than_required,
):
print("Computing MoE with {} sampling ...".format(n_sampling))
seed(random_seed)
index_x_sampling = full((data.shape[0], n_sampling), nan)
n_sample = ceil(0.632 * target.size)
for i in range(n_sampling):
random_indices = choice(target.size, size=n_sample, replace=True)
sampled_target = target[random_indices]
sampled_data = data[:, random_indices]
random_state = get_state()
index_x_sampling[:, i] = _match_target_and_data(
sampled_target,
sampled_data,
match_function,
n_required_for_match_function,
raise_for_n_less_than_required,
)
set_state(random_state)
return apply_along_axis(
compute_nd_array_margin_of_error, 1, index_x_sampling, raise_for_bad=False
)
| UCSD-CCAL/ccal | ccal/_match_randomly_sampled_target_and_data_to_compute_margin_of_errors.py | Python | mit | 1,318 | [
"MOE"
] | fef0d89fdbacd3dd7f9f245d32303ab41d8b67a56a14f129a0cfc2d7e533a696 |
#! /usr/bin/python2
import subprocess
import sys
import os
import time
from subprocess import PIPE
import socket
default_params = ''
output_file_prefix = 'output'
if len(sys.argv) > 1:
output_file_prefix = sys.argv[1]
#
# SCENARIO
#
# 0: radial dam break
# 1: gaussian
# 2: balanced steady state u
# 3: balanced steady state v
# 4: diamond initial condition
# 5: waves
#default_params += ' -s 5'
default_params += ' --initial-freq-x-mul=2.0'
default_params += ' --initial-freq-y-mul=1.0'
scenario_name = "SinCos Waves"
curdir_name = os.getcwd()
print ("Current working directory: "+curdir_name)
#
# 1 hour timeout
#
timeout = "02:00:00"
#
# run for 1 seconds
#
max_time = 50
#
# time step size for coarse time steps
#
dt = 5.0
#
# order of time step for RK
# Use order 4 to make time errors very small to make the spatial error dominate
#
timestep_order = 4
cfl=0.3
print "Max simulation time: "+str(max_time)
print "Time step size for REXI time step: "+str(dt)
print "Time step order: "+str(timestep_order)
#
# default params
#
default_params += ' -f 1 -g 1 -H 1 -X 1 -Y 1 --compute-error 1 -t '+str(max_time)
# Use higher-order time stepping?
default_params += ' -R '+str(timestep_order)
###########################
# threads
###########################
# 24 threads per node on Hartree center
#T_list = [24]
# Use only single-threaded test (Parallelization-in-time only)
#thread_list = [1, 2, 4, 8, 12, 24]
thread_list = [1, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28]
###########################
# MPI RANKS
###########################
#mpi_ranks = [1, 2, 4, 8, 16, 32, 64, 128, 256]
#mpi_ranks = [1, 2, 4, 8, 16, 32, 64, 128, 170, 256, 512, 768, 1024, 1536, 2048, 4096]
mpi_ranks = [1]
# Maximum of total threads (MPI ranks x threads)
max_total_threads = 4*1024
###########################
# resolutions
###########################
res_list = [32, 64, 128, 256]
###########################
# M REXI sampling points
###########################
M_list = []
m = 64
while m < 2000:
M_list.append(m)
m *= 2;
M_list = [64, 128, 256, 512, 1024, 2048, 2048*4, 2048*8, 2048*16, 2048*32, 2048*64]
#M_list = [32, 64, 128, 256, 512]
#M_list = [32, 64, 128, 256, 512, 1024, 2048, 2048*4, 2048*8, 2048*16]
#M_list = [32, 64, 128, 256, 512, 1024, 1024*4, 1024*16, 1024*32]
###########################
# MEM ALLOC
###########################
A_list = [0, 1, 2]
A_list = [1]
###########################
# HYPER VISCOSITY
###########################
hyperviscosity = {}
# http://math.boisestate.edu/~wright/research/FlyerEtAl2012.pdf
# not necessary for these short-range runs
for n in res_list:
hyperviscosity[n] = 4.*pow(float(n), float(-4))
hyperviscosity[n] = 0
comp_spec='scons --compiler=intel --sweet-mpi=enable --program=swe_rexi --plane-spectral-space=enable --libfft=enable --plane-spectral-dealiasing=disable --mode=release'
comp_cart='scons --compiler=intel --sweet-mpi=enable --program=swe_rexi --plane-spectral-space=disable --libfft=enable --plane-spectral-dealiasing=disable --mode=release'
comp_rexi='scons --compiler=intel --sweet-mpi=enable --program=swe_rexi --plane-spectral-space=disable --libfft=enable --rexi-parallel-sum=disable --plane-spectral-dealiasing=disable --mode=release'
comp_rexi_par='scons --compiler=intel --sweet-mpi=enable --program=swe_rexi --plane-spectral-space=disable --libfft=enable --rexi-parallel-sum=enable --plane-spectral-dealiasing=disable --mode=release'
if False:
tests = []
else:
# short description, binary, parameters, title
tests = [
# [ 'nr_fd_spec_agrid', ' -S 0 --timestepping-mode 0 --staggering 0 -C '+str(cfl), 'Finite differences in Fourier space, A-grid', comp_spec, 'nr_fd_spec_agrid' ],
# [ 'nr_fd_cart_agrid', ' -S 0 --timestepping-mode 0 --staggering 0 -C '+str(cfl), 'Finite differences in Cartesian space, A-grid', comp_cart, 'nr_fd_cart_agrid' ],
# [ 'nr_fd_spec_cgrid', ' -S 0 --timestepping-mode 0 --staggering 1 -C '+str(cfl), 'Finite differences in Fourier space, C-grid', comp_spec, 'nr_fd_spec_cgrid' ],
[ 'nr_fd_cart_cgrid', ' -S 0 --timestepping-mode 0 --staggering 1 -C '+str(cfl), 'Finite differences in Cartesian space, C-grid', comp_cart, 'nr_fd_cart_cgrid' ],
# [ 'nr_spec_spec_agrid', ' -S 1 --timestepping-mode 0 --staggering 0 -C '+str(cfl), 'Spectral derivatives in Fourier space, A-grid', comp_spec, 'nr_spec_spec_agrid' ],
]
# add rexi tests
for m in M_list:
# tests.append(['rexi_m'+str(m).zfill(6), ' -S 0 --use-specdiff-for-complex-array 1 --rexi-h 0.2 --timestepping-mode 1 --staggering 0 --rexi-m='+str(m)+' -C '+str(-dt), 'REXI M='+str(m), comp_rexi, 'rexi_m'])
# tests.append(['rexi_par_m'+str(m).zfill(6), ' -S 0 --use-specdiff-for-complex-array 1 --rexi-h 0.2 --timestepping-mode 1 --staggering 0 --rexi-m='+str(m)+' -C '+str(-dt), 'REXI PAR M='+str(m), comp_rexi_par, 'rexi_par_m'])
# tests.append(['rexi_fd_m'+str(m).zfill(4), ' -S 0 --use-specdiff-for-complex-array 0 --rexi-h 0.8 --timestepping-mode 1 --staggering 0 --rexi-m='+str(m)+' -C '+str(-dt), 'REXI FD M='+str(m), comp_rexi, 'rexi_fd_m'])
# tests.append(['rexi_fd_par_m'+str(m).zfill(4), ' -S 0 --use-specdiff-for-complex-array 0 --rexi-h 0.8 --timestepping-mode 1 --staggering 0 --rexi-m='+str(m)+' -C '+str(-dt), 'REXI PAR FD M='+str(m), comp_rexi_par, 'rexi_fd_par_m'])
pass
| schreiberx/sweet | archive/benchmarks_plane/rexi_tests_lrz/2015_12_27_scalability_nr_fd/params.py | Python | mit | 5,305 | [
"Gaussian"
] | 45ac0477c78fa7e91ce236bb2c3f5528aaf00a419950eff879704d0ee284e970 |
from __future__ import annotations
import os
import procrunner
import pytest
from dxtbx.serialize import load
from dials.array_family import flex
def test_slice_sequence_and_compare_with_expected_results(dials_regression, tmpdir):
# use the i04_weak_data for this test
data_dir = os.path.join(dials_regression, "refinement_test_data", "i04_weak_data")
experiments_path = os.path.join(data_dir, "experiments.json")
pickle_path = os.path.join(data_dir, "indexed_strong.pickle")
for pth in (experiments_path, pickle_path):
assert os.path.exists(pth)
result = procrunner.run(
["dials.slice_sequence", experiments_path, pickle_path, "image_range=1 20"],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
# load results
sliced_exp = load.experiment_list(
tmpdir.join("experiments_1_20.expt").strpath, check_format=False
)[0]
sliced_refs = flex.reflection_table.from_file(tmpdir / "indexed_strong_1_20.refl")
# simple test of results
assert sliced_exp.scan.get_image_range() == (1, 20)
assert len(sliced_refs) == 3670
def test_slice_sequence_with_first_images_missing(dials_regression, tmpdir):
"""Test slicing where scan image range does not start at 1, exercising
a case that exposed a bug"""
# use the i04_weak_data for this test
data_dir = os.path.join(dials_regression, "refinement_test_data", "i04_weak_data")
experiments_path = os.path.join(data_dir, "experiments.json")
# first slice
result = procrunner.run(
["dials.slice_sequence", experiments_path, "image_range=5,20"],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
# second slice
result = procrunner.run(
["dials.slice_sequence", "experiments_5_20.expt", "image_range=10,20"],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
sliced_exp = load.experiment_list(
tmpdir.join("experiments_5_20_10_20.expt").strpath, check_format=False
)[0]
assert sliced_exp.scan.get_image_range() == (10, 20)
assert sliced_exp.scan.get_array_range() == (9, 20)
assert sliced_exp.scan.get_oscillation()[0] == pytest.approx(83.35)
def test_slice_sequence_to_degree_blocks(dials_data, tmpdir):
"""Slice data into 10 degree blocks i.e. 17 datasets"""
expt = dials_data("l_cysteine_4_sweeps_scaled") / "scaled_30.expt"
refl = dials_data("l_cysteine_4_sweeps_scaled") / "scaled_30.refl"
procrunner.run(
[
"dials.slice_sequence",
"block_size=10",
"output.experiments=sliced.expt",
"output.reflections=sliced.refl",
expt,
refl,
],
working_directory=tmpdir,
)
sliced_expts = load.experiment_list(
tmpdir.join("sliced.expt").strpath, check_format=False
)
assert len(sliced_expts) == 17
sliced_refl = flex.reflection_table.from_file(tmpdir.join("sliced.refl").strpath)
assert len(set(sliced_refl.experiment_identifiers().values())) == 17
sliced_refl.assert_experiment_identifiers_are_consistent(sliced_expts)
def test_slice_sequence_with_scan_varying_crystal(dials_data, tmpdir):
"""test slicing keeps a scan-varying crystal"""
expt = dials_data("l_cysteine_4_sweeps_scaled") / "scaled_30.expt"
procrunner.run(
[
"dials.slice_sequence",
"image_range=10,20",
"output.experiments=sliced.expt",
expt,
],
working_directory=tmpdir,
)
orig = load.experiment_list(expt.strpath, check_format=False)[0]
sliced = load.experiment_list(
tmpdir.join("sliced.expt").strpath, check_format=False
)[0]
assert sliced.crystal.num_scan_points == 12
orig_UB = [
orig.crystal.get_A_at_scan_point(i) for i in range(orig.crystal.num_scan_points)
]
sliced_UB = [
sliced.crystal.get_A_at_scan_point(i)
for i in range(sliced.crystal.num_scan_points)
]
for a, b in zip(orig_UB[9:21], sliced_UB):
assert a == pytest.approx(b)
| dials/dials | tests/command_line/test_slice_sequence.py | Python | bsd-3-clause | 4,156 | [
"CRYSTAL"
] | e82dfbac62b55b47e6e321b11dfe867c0db996caa474062e8ca2ed60c65599e5 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A command that generates all DevSite and manpage documents."""
import os
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import walker_util
from googlecloudsdk.core.util import pkg_resources
_HELP_HTML_DATA_FILES = [
'favicon.ico',
'index.html',
'_menu_.css',
'_menu_.js',
'_title_.html',
]
def WriteHtmlMenu(command, out):
"""Writes the command menu tree HTML on out.
Args:
command: dict, The tree (nested dict) of command/group names.
out: stream, The output stream.
"""
def ConvertPathToIdentifier(path):
return '_'.join(path)
def WalkCommandTree(command, prefix):
"""Visit each command and group in the CLI command tree.
Args:
command: dict, The tree (nested dict) of command/group names.
prefix: [str], The subcommand arg prefix.
"""
level = len(prefix)
visibility = 'visible' if level <= 1 else 'hidden'
indent = level * 2 + 2
name = command.get('_name_')
args = prefix + [name]
out.write('{indent}<li class="{visibility}" id="{item}" '
'onclick="select(event, this.id)">{name}'.format(
indent=' ' * indent, visibility=visibility, name=name,
item=ConvertPathToIdentifier(args)))
commands = command.get('commands', []) + command.get('groups', [])
if commands:
out.write('<ul>\n')
for c in sorted(commands, key=lambda x: x['_name_']):
WalkCommandTree(c, args)
out.write('{indent}</ul>\n'.format(indent=' ' * (indent + 1)))
out.write('{indent}</li>\n'.format(indent=' ' * indent))
else:
out.write('</li>\n'.format(indent=' ' * (indent + 1)))
out.write("""\
<html>
<head>
<meta name="description" content="man page tree navigation">
<meta name="generator" content="gcloud meta generate-help-docs --html-dir=.">
<title> man page tree navigation </title>
<base href="." target="_blank">
<link rel="stylesheet" type="text/css" href="_menu_.css">
<script type="text/javascript" src="_menu_.js"></script>
</head>
<body>
<div class="menu">
<ul>
""")
WalkCommandTree(command, [])
out.write("""\
</ul>
</div>
</body>
</html>
""")
class GenerateHelpDocs(base.Command):
"""Generate all DevSite and man page help docs.
The DevSite docs are generated in the --devsite-dir directory with pathnames
in the reference directory hierarchy. The manpage docs are generated in the
--manpage-dir directory with pathnames in the manN/ directory hierarchy.
"""
@staticmethod
def Args(parser):
parser.add_argument(
'--hidden',
action='store_true',
default=None,
help=('Include documents for hidden commands and groups.'))
parser.add_argument(
'--devsite-dir',
metavar='DIRECTORY',
help=('The directory where the generated DevSite reference document '
'subtree will be written. If not specified then DevSite '
'documents will not be generated.'))
parser.add_argument(
'--html-dir',
metavar='DIRECTORY',
help=('The directory where the standalone manpage HTML files will be '
'generated. index.html contains manpage tree navigation in the '
'left pane. The active command branch and its immediate children '
'are visible and clickable. Hover to navigate the tree. Run '
'`python -m SimpleHTTPServer 8888 &` in DIRECTORY and point '
'your browser at http://localhost:8888 to view the manpage tree. '
'If not specified then the HTML manpage site will not be '
'generated.'))
parser.add_argument(
'--manpage-dir',
metavar='DIRECTORY',
help=('The directory where the generated manpage document subtree will '
'be written. If not specified then manpage documents will not be '
'generated.'))
parser.add_argument(
'restrict',
metavar='COMMAND/GROUP',
nargs='*',
default=None,
help=('Restrict document generation to these dotted command paths. '
'For example: gcloud.alpha gcloud.beta.test'))
def Run(self, args):
if args.devsite_dir:
walker_util.DevSiteGenerator(self.cli, args.devsite_dir).Walk(
args.hidden, args.restrict)
if args.html_dir:
walker_util.HtmlGenerator(
self.cli, args.html_dir).Walk(args.hidden, args.restrict)
tree = walker_util.CommandTreeGenerator(
self.cli).Walk(args.hidden, args.restrict)
with open(os.path.join(args.html_dir, '_menu_.html'), 'w') as out:
WriteHtmlMenu(tree, out)
for file_name in _HELP_HTML_DATA_FILES:
with open(os.path.join(args.html_dir, file_name), 'wb') as out:
file_contents = pkg_resources.GetResource(
'googlecloudsdk.api_lib.meta.help_html_data.', file_name)
out.write(file_contents)
if args.manpage_dir:
walker_util.ManPageGenerator(
self.cli, args.manpage_dir).Walk(args.hidden, args.restrict)
| flgiordano/netcash | +/google-cloud-sdk/lib/surface/meta/generate_help_docs.py | Python | bsd-3-clause | 5,638 | [
"VisIt"
] | 390c6b4a08d8d3bffeaa220005a01c1a60c2657f41c937b57c88a06352a821f5 |
"""
A dialog box for confirming GPG keys.
"""
# Copyright (C) 2009, Thomas Leonard
# -*- coding: utf-8 -*-
# See the README file for details, or visit http://0install.net.
from zeroinstall import _, translation
import gtk
from zeroinstall.injector.model import SafeException
from zeroinstall.injector import gpg, trust
from zeroinstall.support import tasks
from zeroinstall.gtkui import help_box, gtkutils
def frame(page, title, content, expand = False):
frame = gtk.Frame()
label = gtk.Label()
label.set_markup('<b>%s</b>' % title)
frame.set_label_widget(label)
frame.set_shadow_type(gtk.SHADOW_NONE)
if type(content) in (str, unicode):
content = gtk.Label(content)
content.set_alignment(0, 0.5)
content.set_selectable(True)
frame.add(content)
if hasattr(content, 'set_padding'):
content.set_padding(8, 4)
else:
content.set_border_width(8)
page.pack_start(frame, expand, True, 0)
def pretty_fp(fp):
s = fp[0:4]
for x in range(4, len(fp), 4):
s += ' ' + fp[x:x + 4]
return s
def left(text):
label = gtk.Label(text)
label.set_alignment(0, 0.5)
label.set_selectable(True)
return label
def make_hints_area(closed, key_info_fetcher):
def text(parent):
text = ""
for node in parent.childNodes:
if node.nodeType == node.TEXT_NODE:
text = text + node.data
return text
hints = gtk.VBox(False, 4)
shown = set()
def add_hints():
infos = set(key_info_fetcher.info) - shown
for info in infos:
hints.add(make_hint(info.getAttribute("vote"), text(info)))
shown.add(info)
if not(key_info_fetcher.blocker or shown):
hints.add(make_hint("bad", _('Warning: Nothing known about this key!')))
if key_info_fetcher.blocker:
status = left(key_info_fetcher.status)
hints.add(status)
@tasks.async
def update_when_ready():
while key_info_fetcher.blocker:
yield key_info_fetcher.blocker, closed
if closed.happened:
# The dialog box was closed. Stop updating.
return
add_hints()
status.destroy()
update_when_ready()
else:
add_hints()
hints.show()
return hints
def make_hint(vote, hint_text):
hint_icon = gtk.Image()
if vote == "good":
hint_icon.set_from_stock(gtk.STOCK_YES, gtk.ICON_SIZE_BUTTON)
else:
hint_icon.set_from_stock(gtk.STOCK_DIALOG_WARNING, gtk.ICON_SIZE_BUTTON)
hint = left(hint_text)
hint.set_line_wrap(True)
hint_hbox = gtk.HBox(False, 4)
hint_hbox.pack_start(hint_icon, False, True, 0)
hint_hbox.pack_start(hint, True, True, 0)
hint_icon.set_alignment(0, 0)
hint_hbox.show_all()
return hint_hbox
class TrustBox(gtk.Dialog):
"""Display a dialog box asking the user to confirm that one of the
keys is trusted for this domain.
"""
parent = None
closed = None
def __init__(self, pending, valid_sigs, parent):
"""@since: 0.42"""
assert valid_sigs
gtk.Dialog.__init__(self)
self.set_has_separator(False)
self.set_position(gtk.WIN_POS_CENTER)
self.set_transient_for(parent)
self.closed = tasks.Blocker(_("confirming keys with user"))
domain = trust.domain_from_url(pending.url)
assert domain
def destroy(box):
self.closed.trigger()
self.connect('destroy', destroy)
self.set_title(_('Confirm trust'))
vbox = gtk.VBox(False, 4)
vbox.set_border_width(4)
self.vbox.pack_start(vbox, True, True, 0)
notebook = gtk.Notebook()
if len(valid_sigs) == 1:
notebook.set_show_tabs(False)
label = left(_('Checking: %s') % pending.url)
label.set_padding(4, 4)
vbox.pack_start(label, False, True, 0)
currently_trusted_keys = trust.trust_db.get_keys_for_domain(domain)
if currently_trusted_keys:
keys = [gpg.load_key(fingerprint) for fingerprint in currently_trusted_keys]
descriptions = [_("%(key_name)s\n(fingerprint: %(key_fingerprint)s)") % {'key_name': key.name, 'key_fingerprint': pretty_fp(key.fingerprint)}
for key in keys]
else:
descriptions = [_('None')]
frame(vbox, _('Keys already approved for "%s"') % domain, '\n'.join(descriptions))
label = left(translation.ngettext('This key signed the feed:', 'These keys signed the feed:', len(valid_sigs)))
label.set_padding(4, 4)
vbox.pack_start(label, False, True, 0)
vbox.pack_start(notebook, True, True, 0)
self.add_button(gtk.STOCK_HELP, gtk.RESPONSE_HELP)
self.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
self.add_button(gtk.STOCK_ADD, gtk.RESPONSE_OK)
self.set_default_response(gtk.RESPONSE_OK)
trust_checkbox = {} # Sig -> CheckButton
def ok_sensitive():
trust_any = False
for toggle in trust_checkbox.values():
if toggle.get_active():
trust_any = True
break
self.set_response_sensitive(gtk.RESPONSE_OK, trust_any)
first = True
for sig in valid_sigs:
if hasattr(sig, 'get_details'):
name = '<unknown>'
details = sig.get_details()
for item in details:
if item[0] == 'uid' and len(item) > 9:
name = item[9]
break
else:
name = None
page = gtk.VBox(False, 4)
page.set_border_width(8)
frame(page, _('Fingerprint'), pretty_fp(sig.fingerprint))
if name is not None:
frame(page, _('Claimed identity'), name)
frame(page, _('Unreliable hints database says'), make_hints_area(self.closed, valid_sigs[sig]))
already_trusted = trust.trust_db.get_trust_domains(sig.fingerprint)
if already_trusted:
frame(page, _('You already trust this key for these domains'),
'\n'.join(already_trusted))
trust_checkbox[sig] = gtk.CheckButton(_('_Trust this key'))
page.pack_start(trust_checkbox[sig], False, True, 0)
trust_checkbox[sig].connect('toggled', lambda t: ok_sensitive())
notebook.append_page(page, gtk.Label(name or 'Signature'))
if first:
trust_checkbox[sig].set_active(True)
first = False
ok_sensitive()
self.vbox.show_all()
if len(valid_sigs) == 1:
for box in trust_checkbox.values():
box.hide()
def response(box, resp):
if resp == gtk.RESPONSE_HELP:
trust_help.display()
return
if resp == gtk.RESPONSE_OK:
to_trust = [sig for sig in trust_checkbox if trust_checkbox[sig].get_active()]
if not self._confirm_unknown_keys(to_trust, valid_sigs):
return
self.trust_keys(to_trust, domain)
self.destroy()
self.connect('response', response)
def trust_keys(self, agreed_sigs, domain):
assert domain
try:
for sig in agreed_sigs:
trust.trust_db.trust_key(sig.fingerprint, domain)
trust.trust_db.notify()
except Exception, ex:
gtkutils.show_message_box(self, str(ex), gtk.MESSAGE_ERROR)
if not isinstance(ex, SafeException):
raise
def _confirm_unknown_keys(self, to_trust, valid_sigs):
"""Check the key-info server's results for these keys. If we don't know any of them,
ask for extra confirmation from the user.
@param to_trust: the signatures the user wants to trust
@return: True to continue"""
def is_unknown(sig):
for note in valid_sigs[sig].info:
if note.getAttribute("vote") == "good":
return False
return True
unknown = [sig for sig in to_trust if is_unknown(sig)]
if unknown:
if len(unknown) == 1:
msg = _('WARNING: you are confirming a key which was not known to the key server. Are you sure?')
else:
msg = _('WARNING: you are confirming keys which were not known to the key server. Are you sure?')
box = gtk.MessageDialog(self,
gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION, gtk.BUTTONS_OK_CANCEL,
msg)
box.set_position(gtk.WIN_POS_CENTER)
response = box.run()
box.destroy()
return response == gtk.RESPONSE_OK
return True
trust_help = help_box.HelpBox(_("Trust Help"),
(_('Overview'), '\n' +
_("""When you run a program, it typically has access to all your files and can generally do \
anything that you're allowed to do (delete files, send emails, etc). So it's important \
to make sure that you don't run anything malicious.""")),
(_('Digital signatures'), '\n' +
_("""Each software author creates a 'key-pair'; a 'public key' and a 'private key'. Without going \
into the maths, only something encrypted with the private key will decrypt with the public key.
So, when a programmer releases some software, they encrypt it with their private key (which no-one \
else has). When you download it, the injector checks that it decrypts using their public key, thus \
proving that it came from them and hasn't been tampered with.""")),
(_('Trust'), '\n' +
_("""After the injector has checked that the software hasn't been modified since it was signed with \
the private key, you still have the following problems:
1. Does the public key you have really belong to the author?
2. Even if the software really did come from that person, do you trust them?""")),
(_('Key fingerprints'), '\n' +
_("""To confirm (1), you should compare the public key you have with the genuine one. To make this \
easier, the injector displays a 'fingerprint' for the key. Look in mailing list postings or some \
other source to check that the fingerprint is right (a different key will have a different \
fingerprint).
You're trying to protect against the situation where an attacker breaks into a web site \
and puts up malicious software, signed with the attacker's private key, and puts up the \
attacker's public key too. If you've downloaded this software before, you \
should be suspicious that you're being asked to confirm another key!""")),
(_('Reputation'), '\n' +
_("""In general, most problems seem to come from malicous and otherwise-unknown people \
replacing software with modified versions, or creating new programs intended only to \
cause damage. So, check your programs are signed by a key with a good reputation!""")))
| pombredanne/zero-install | zeroinstall/gtkui/trust_box.py | Python | lgpl-2.1 | 9,588 | [
"VisIt"
] | cd473c6816403779dcc49931b85ce8e8072b01545e93877a5ccc5d51935b4feb |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import collections
# thinking now that QCEssential should have one doi and dictionary of
# citations. that way the doi contains the record of the definition of the
# QCEssential but several publications (each with their own doi-s) can be
# associated with the Essential (e.g., original theoretical definition,
# current implementation, expanded atom range, reparameterization)
# links to GitHub Psi4 files accepted as doi for the present
class Citation(object):
"""Class to hold reference to a single published scientific work
"""
def __init__(self, doi, fullname=None, dsdbid=None, comment=None):
"""
"""
self.doi = doi.lower()
self.fullname = fullname
self.dsdbid = dsdbid
self.comment = comment
def __str__(self):
text = ''
text += """ ==> Citation <==\n\n"""
text += """ DOI: %s\n""" % (self.doi)
text += """ PDF database id: %s\n""" % (self.dsdbid)
text += """ Formal Name: %s\n""" % (self.fullname)
text += """ Comment: %s\n""" % (self.comment)
text += """\n"""
return text
class QCEssential(object):
"""Class to link literature and external representation of some
aspect of quantum chemistry (basis set, method, etc.) with a
shorthand and indexed representation of same.
"""
def __init__(self, name, fullname=None, latex=None, citations=None, doi=None, comment=None):
"""
"""
self.name = name.lower()
self.fullname = fullname
if fullname is not None and latex is None:
self.latex = fullname
else:
self.latex = latex
# OrderedDict of roles as keys and qcdb.Citation as values
if citations is None:
self.citations = collections.OrderedDict()
else:
self.citations = citations
self.doi = doi
self.comment = comment
def __str__(self):
text = ''
text += """ ==> %s QCEssential <==\n\n""" % (self.name)
text += """ Formal name: %s\n""" % (self.fullname)
text += """ LaTeX representation: %s\n""" % (self.latex)
text += """ DOI: %s\n""" % (self.doi)
text += """ Literature citations:\n"""
for rol, cit in self.citations.items():
text += """ %17s: %s\n""" % (rol, cit.doi)
text += """ Comment: %s\n""" % (self.comment)
text += """\n"""
return text
class Publication(QCEssential):
"""Specialization of :py:class:`QCEssential` for computational chemistry
publications, presumably containing many quantum chemistry results.
"""
def __init__(self, name, fullname=None, latex=None, dsdbid=None, doi=None, comment=None, owner=None):
primary = Citation(doi=doi, fullname=fullname, dsdbid=dsdbid)
cits = collections.OrderedDict()
cits['primary'] = primary
QCEssential.__init__(self, name=name, fullname=primary.fullname, latex=latex, citations=cits, doi=primary.doi, comment=comment)
self.name = name.lower()
self.owner = owner.upper()
def __str__(self):
text = ''
text += """ ==> %s Publication <==\n\n""" % (self.name)
text += """ Formal name: %s\n""" % (self.fullname)
text += """ LaTeX representation: %s\n""" % (self.latex)
text += """ Owner: %s\n""" % (self.owner)
text += """ DOI: %s\n""" % (self.doi)
text += """ Literature citations:\n"""
for rol, cit in self.citations.items():
text += """ %-17s %s\n""" % (rol, cit.doi)
text += """ Comment: %s\n""" % (self.comment)
text += """\n"""
return text
class BasisSet(QCEssential):
"""Specialization of :py:class:`QCEssential` for basis sets.
"""
def __init__(self, name, fullname=None, latex=None, citations=None, doi=None, comment=None, zeta=None, build=None):
QCEssential.__init__(self, name, fullname, latex, citations, doi, comment)
self.name = name.lower()
self.zeta = zeta
self.build = [[self.name]] if build is None else build
def __str__(self):
text = ''
text += """ ==> %s BasisSet Treatment <==\n\n""" % (self.name)
text += """ Formal name: %s\n""" % (self.fullname)
text += """ LaTeX representation: %s\n""" % (self.latex)
text += """ Zeta: %s\n""" % (self.zeta)
text += """ CBS build: %s\n""" % (self.build)
text += """ DOI: %s\n""" % (self.doi)
text += """ Literature citations:\n"""
for rol, cit in self.citations.items():
text += """ %17s: %s\n""" % (rol, cit.doi)
text += """ Comment: %s\n""" % (self.comment)
text += """\n"""
return text
class Method(QCEssential):
"""Specialization of :py:class:`QCEssential` for quantum chemical methods.
"""
def __init__(self, name, fullname=None, latex=None, citations=None, doi=None, comment=None):
QCEssential.__init__(self, name, fullname, latex, citations, doi, comment)
self.name = name.upper()
def __str__(self):
text = ''
text += """ ==> %s Method <==\n\n""" % (self.name)
text += """ Formal name: %s\n""" % (self.fullname)
text += """ LaTeX representation: %s\n""" % (self.latex)
text += """ DOI: %s\n""" % (self.doi)
text += """ Literature citations:\n"""
for rol, cit in self.citations.items():
text += """ %17s: %s\n""" % (rol, cit.doi)
text += """ Comment: %s\n""" % (self.comment)
text += """\n"""
return text
class Error(QCEssential):
"""Specialization of :py:class:`QCEssential` for measures of error.
"""
def __init__(self, name, fullname=None, latex=None, citations=None, doi=None, comment=None):
QCEssential.__init__(self, name, fullname, latex, citations, doi, comment)
self.name = name.lower()
def __str__(self):
text = ''
text += """ ==> %s Error Measure <==\n\n""" % (self.name)
text += """ Formal name: %s\n""" % (self.fullname)
text += """ LaTeX representation: %s\n""" % (self.latex)
text += """ DOI: %s\n""" % (self.doi)
text += """ Literature citations:\n"""
for rol, cit in self.citations.items():
text += """ %17s: %s\n""" % (rol, cit.doi)
text += """ Comment: %s\n""" % (self.comment)
text += """\n"""
return text
#class Option(QCEssential):
# """Specialization of :py:class:`QCEssential` for computation variation.
#
# """
# def __init__(self, name, fullname=None, latex=None, citations=None, doi=None, comment=None):
# QCEssential.__init__(self, name, fullname, latex, citations, doi, comment)
# self.name = name #.lower()
#
# def __str__(self):
# text = ''
# text += """ ==> %s Computation Mode <==\n\n""" % (self.name)
# text += """ Formal name: %s\n""" % (self.fullname)
# text += """ LaTeX representation: %s\n""" % (self.latex)
# text += """ DOI: %s\n""" % (self.doi)
# text += """ Literature citations:\n"""
# for rol, cit in self.citations.items():
# text += """ %17s: %s\n""" (rol, cit.doi)
# text += """ Comment: %s\n""" % (self.comment)
# text += """\n"""
# return text
_tlist = [
Publication('dhdft', doi='', dsdbid='', owner='CAC',
fullname=""),
Publication('dft', doi='10.1063/1.3545971', dsdbid='Burns:2011:084107', owner='LAB',
fullname="""Density-Functional Approaches to Noncovalent Interactions: A Comparison of Dispersion Corrections (DFT-D), Exchange-Hole Dipole Moment (XDM) Theory, and Specialized Functions. L. A. Burns, A. Vazquez-Mayagoitia, B. G. Sumpter, and C. D. Sherrill, J. Chem. Phys. 134(8), 084107/1-25 (2011)"""),
Publication('saptone', doi='10.1063/1.4867135', dsdbid='Parker:2014:094106', owner='LAB',
fullname="""Levels of Symmetry Adapted Perturbation Theory (SAPT). I. Efficiency and Performance for Interaction Energies. T. M. Parker, L. A. Burns, R. M. Parrish, A. G. Ryno, and C. D. Sherrill, J. Chem. Phys. 140(9), 094106/1-16 (2014)"""),
Publication('pt2', doi='10.1063/1.4903765', dsdbid='Burns:2014:234111', owner='LAB',
fullname="""Appointing Silver and Bronze Standards for Noncovalent Interactions: A Comparison of Spin-Component-Scaled (SCS), Explicitly Correlated (F12), and Specialized Wavefunction Approaches. L. A. Burns, M. S. Marshall, and C. D. Sherrill, J. Chem. Phys. 141(23), 234111/1-21 (2014)"""),
Publication('s22b', doi='10.1063/1.3659142', dsdbid='Marshall:2011:194102', owner='LAB',
fullname="""Basis Set Convergence of the Coupled-Cluster Correction, delta_MP2^CCSD(T): Best Practices for Benchmarking Noncovalent Interactions and the Attendant Revision of the S22, NBC10, HBC6, and HSG Databases. M. S. Marshall, L. A. Burns, and C. D. Sherrill, J. Chem. Phys. 135(19), 194102/1-10 (2011)"""),
Publication('dilabio', doi='10.1021/ct400149j', dsdbid='Burns:2014:49', owner='LAB',
fullname="""Comparing Counterpoise-Corrected, Uncorrected, and Averaged Binding Energies for Benchmarking Noncovalent Interactions. L. A. Burns, M. S. Marshall, and C. D. Sherrill, J. Chem. Theory Comput. 10(1), 49-57 (2014)"""),
Publication('achc', doi='10.1021/acs.jctc.5b00588', dsdbid='', owner='TMP',
fullname="""Assessment of Empirical Models versus High-Accuracy Ab Initio Methods for Nucleobase Stacking: Evaluating the Importance of Charge Penetration"""),
Publication('pt2uncp', doi='', dsdbid='', owner='LAB', fullname=''),
Publication('dfit', doi='', dsdbid='', owner='DGAS', fullname=''),
Publication('merz3', doi='', dsdbid='', owner='LAB', fullname=''),
Publication('bfdbmm', doi='', dsdbid='', owner='LAB', fullname=''),
Publication('saptmisc', doi='', dsdbid='', owner='', fullname=''),
Publication('bfdbdft', doi='', dsdbid='', owner='', fullname=''),
Publication('silver', doi='', dsdbid='', owner='', fullname=''),
Publication('anon', doi='', dsdbid='', owner='', fullname=''),
Publication('f12dilabio', doi='', dsdbid='', owner='', fullname=''),
]
pubs = {}
for item in _tlist:
pubs[item.name] = item
_tlist = [
BasisSet('dz', fullname='cc-pVDZ'),
BasisSet('jadz', fullname='jun-cc-pVDZ'),
BasisSet('hadz', fullname='heavy-aug-cc-pVDZ'),
BasisSet('adz', fullname='aug-cc-pVDZ'),
BasisSet('addz', fullname='aug-cc-pV(D+d)Z'),
BasisSet('tz', fullname='cc-pVTZ'),
BasisSet('matz', fullname='may-cc-pVTZ'),
BasisSet('jatz', fullname='jun-cc-pVTZ'),
BasisSet('hatz', fullname='heavy-aug-cc-pVTZ'),
BasisSet('atz', fullname='aug-cc-pVTZ'),
BasisSet('qz', fullname='cc-pVQZ'),
BasisSet('aaqz', fullname='apr-cc-pVQZ'),
BasisSet('maqz', fullname='may-cc-pVQZ'),
BasisSet('jaqz', fullname='jun-cc-pVQZ'),
BasisSet('haqz', fullname='heavy-aug-cc-pVQZ'),
BasisSet('aqz', fullname='aug-cc-pVQZ'),
BasisSet('a5z', fullname='aug-cc-pV5Z'),
BasisSet('dtz', fullname='cc-pVDTZ', build=[None, ['tz', 'dtz']]),
BasisSet('jadtz', fullname='jun-cc-pVDTZ', build=[None, ['jatz', 'jadtz']]),
BasisSet('hadtz', fullname='heavy-aug-cc-pVDTZ', build=[None, ['hatz', 'hadtz']]),
BasisSet('adtz', fullname='aug-cc-pVDTZ', build=[['adtz'], ['atz', 'adtz']]),
BasisSet('tqz', fullname='cc-pVTQZ', build=[None, ['qz', 'tqz']]),
BasisSet('matqz', fullname='may-cc-pVTQZ', build=[None, ['maqz', 'matqz']]),
BasisSet('jatqz', fullname='jun-cc-pVTQZ', build=[None, ['jaqz', 'jatqz']]),
BasisSet('hatqz', fullname='heavy-aug-cc-pVTQZ', build=[None, ['haqz', 'hatqz']]),
BasisSet('atqz', fullname='aug-cc-pVTQZ', build=[['atqz'], ['aqz', 'atqz']]),
BasisSet('aq5z', fullname='aug-cc-pVQ5Z', build=[['aq5z'], ['a5z', 'aq5z']]),
BasisSet('a6z', fullname='aug-cc-pV6Z'),
BasisSet('a56z', fullname='aug-cc-pV56Z', build=[['a56z'], ['a6z', 'a56z']]),
BasisSet('atzdz', fullname='[aTZ; D:DZ]', latex=r"""[aTZ; $\delta$:DZ]""",
build=[None, None, ['atz', 'atz', 'dz']]),
BasisSet('adtzdz', fullname='[aDTZ; D:DZ]', latex=r"""[aDTZ; $\delta$:DZ]""",
build=[None, None, ['atz', 'adtz', 'dz']]),
BasisSet('atqzdz', fullname='[aTQZ; D:DZ]', latex=r"""[aTQZ; $\delta$:DZ]""",
build=[None, None, ['aqz', 'atqz', 'dz']]),
BasisSet('atzjadz', fullname='[aTZ; D:jaDZ]', latex=r"""[aTZ; $\delta$:jaDZ]""",
build=[None, None, ['atz', 'atz', 'jadz']]),
BasisSet('adtzjadz', fullname='[aDTZ; D:jaDZ]', latex=r"""[aDTZ; $\delta$:jaDZ]""",
build=[None, None, ['atz', 'adtz', 'jadz']]),
BasisSet('atqzjadz', fullname='[aTQZ; D:jaDZ]', latex=r"""[aTQZ; $\delta$:jaDZ]""",
build=[None, None, ['aqz', 'atqz', 'jadz']]),
BasisSet('atzhadz', fullname='[aTZ; D:haDZ]', latex=r"""[aTZ; $\delta$:haDZ]""",
build=[None, None, ['atz', 'atz', 'hadz']]),
BasisSet('adtzhadz', fullname='[aDTZ; D:haDZ]', latex=r"""[aDTZ; $\delta$:haDZ]""",
build=[None, None, ['atz', 'adtz', 'hadz']]),
BasisSet('atqzhadz', fullname='[aTQZ; D:haDZ]', latex=r"""[aTQZ; $\delta$:haDZ]""",
build=[None, None, ['aqz', 'atqz', 'hadz']]),
BasisSet('atzadz', fullname='[aTZ; D:aDZ]', latex=r"""[aTZ; $\delta$:aDZ]""",
build=[None, None, ['atz', 'atz', 'adz']]),
BasisSet('adtzadz', fullname='[aDTZ; D:aDZ]', latex=r"""[aDTZ; $\delta$:aDZ]""",
build=[None, None, ['atz', 'adtz', 'adz']]),
BasisSet('atqzadz', fullname='[aTQZ; D:aDZ]', latex=r"""[aTQZ; $\delta$:aDZ]""",
build=[None, None, ['aqz', 'atqz', 'adz']]),
BasisSet('aq5zadz', fullname='[aQ5Z; D:aDZ]', latex=r"""[aQ5Z; $\delta$:aDZ]""",
build=[None, None, ['a5z', 'aq5z', 'adz']]),
BasisSet('atzdtz', fullname='[aTZ; D:DTZ]', latex=r"""[aTZ; $\delta$:DTZ]""",
build=[None, None, ['atz', 'atz', 'dtz']]),
BasisSet('atqzdtz', fullname='[aTQZ; D:DTZ]', latex=r"""[aTQZ; $\delta$:DTZ]""",
build=[None, None, ['aqz', 'atqz', 'dtz']]),
BasisSet('atzjadtz', fullname='[aTZ; D:jaDTZ]', latex=r"""[aTZ; $\delta$:jaDTZ]""",
build=[None, None, ['atz', 'atz', 'jadtz']]),
BasisSet('atqzjadtz', fullname='[aTQZ; D:jaDTZ]', latex=r"""[aTQZ; $\delta$:jaDTZ]""",
build=[None, None, ['aqz', 'atqz', 'jadtz']]),
BasisSet('atzhadtz', fullname='[aTZ; D:haDTZ]', latex=r"""[aTZ; $\delta$:haDTZ]""",
build=[None, None, ['atz', 'atz', 'hadtz']]),
BasisSet('atqzhadtz', fullname='[aTQZ; D:haDTZ]', latex=r"""[aTQZ; $\delta$:haDTZ]""",
build=[None, None, ['aqz', 'atqz', 'hadtz']]),
BasisSet('atzadtz', fullname='[aTZ; D:aDTZ]', latex=r"""[aTZ; $\delta$:aDTZ]""",
build=[None, None, ['atz', 'atz', 'adtz']]),
BasisSet('atqzadtz', fullname='[aTQZ; D:aDTZ]', latex=r"""[aTQZ; $\delta$:aDTZ]""",
build=[None, None, ['aqz', 'atqz', 'adtz']]),
BasisSet('aq5zadtz', fullname='[aQ5Z; D:aDTZ]', latex=r"""[aQ5Z; $\delta$:aDTZ]""",
build=[None, None, ['a5z', 'aq5z', 'adtz']]),
BasisSet('atqztz', fullname='[aTQZ; D:TZ]', latex=r"""[aTQZ; $\delta$:TZ]""",
build=[None, None, ['aqz', 'atqz', 'tz']]),
BasisSet('atqzjatz', fullname='[aTQZ; D:jaTZ]', latex=r"""[aTQZ; $\delta$:jaTZ]""",
build=[None, None, ['aqz', 'atqz', 'jatz']]),
BasisSet('atqzmatz', fullname='[aTQZ; D:maTZ]', latex=r"""[aTQZ; $\delta$:maTZ]""",
build=[None, None, ['aqz', 'atqz', 'matz']]),
BasisSet('atqzhatz', fullname='[aTQZ; D:haTZ]', latex=r"""[aTQZ; $\delta$:haTZ]""",
build=[None, None, ['aqz', 'atqz', 'hatz']]),
BasisSet('atqzatz', fullname='[aTQZ; D:aTZ]', latex=r"""[aTQZ; $\delta$:aTZ]""",
build=[None, None, ['aqz', 'atqz', 'atz']]),
BasisSet('aq5zatz', fullname='[aQ5Z; D:aTZ]', latex=r"""[aQ5Z; $\delta$:aTZ]""",
build=[None, None, ['a5z', 'aq5z', 'atz']]),
BasisSet('aq5zhatz', fullname='[aQ5Z; D:haTZ]', latex=r"""[aQ5Z; $\delta$:haTZ]""",
build=[None, None, ['a5z', 'aq5z', 'hatz']]),
BasisSet('haq5zatz', fullname='[haQ5Z; D:aTZ]', latex=r"""[haQ5Z; $\delta$:aTZ]""",
build=[None, None, ['ha5z', 'haq5z', 'atz']]),
BasisSet('aq5zaqz', fullname='[aQ5Z; D:aQZ]', latex=r"""[aQ5Z; $\delta$:aQZ]""",
build=[None, None, ['a5z', 'aq5z', 'aqz']]),
BasisSet('tqz631gs025',fullname='[TQZ; D:631G*(0.25)', latex=r"""[TQZ; $\delta$:631gs025]""",
build=[None, None, ['qz', 'tqz', '631gs025']]),
BasisSet('dzf12', fullname='cc-pVDZ-F12'),
BasisSet('tzf12', fullname='cc-pVTZ-F12'),
BasisSet('qzf12', fullname='cc-pVQZ-F12'),
BasisSet('5zf12', fullname='cc-pV5Z-F12'),
BasisSet('dtzf12', fullname='cc-pVDTZ-F12', build=[['dtzf12'], ['tzf12', 'dtzf12']]),
BasisSet('tqzf12', fullname='cc-pVTQZ-F12', build=[['tqzf12'], ['qzf12', 'tqzf12']]),
BasisSet('q5zf12', fullname='cc-pVQ5Z-F12', build=[['q5zf12'], ['5zf12', 'q5zf12']]),
BasisSet('hill1_adtz', build=[['hillcc_adtz'], ['atz', 'hillcc_adtz']]), # TODO should have None or non-xtpl first element?
BasisSet('hill1_atqz', build=[['hillcc_atqz'], ['aqz', 'hillcc_atqz']]),
BasisSet('hill1_aq5z', build=[['hillcc_aq5z'], ['a5z', 'hillcc_aq5z']]),
BasisSet('hill1_dtzf12', build=[['hillcc_dtzf12'], ['tzf12', 'hillcc_dtzf12']]),
BasisSet('hill1_tqzf12', build=[['hillcc_tqzf12'], ['qzf12', 'hillcc_tqzf12']]),
BasisSet('hill2_dtzf12', build=[None, None, ['tzf12', 'hillcc_dtzf12', 'hillt_dtzf12']]),
BasisSet('hill2_tqzf12', build=[None, None, ['qzf12', 'hillcc_tqzf12', 'hillt_tqzf12']]),
BasisSet('hill2_adtz', build=[None, None, ['atz', 'hillcc_adtz', 'hillt_adtz']]),
BasisSet('hill2_atqz', build=[None, None, ['aqz', 'hillcc_atqz', 'hillt_atqz']]),
BasisSet('hill2_aq5z', build=[None, None, ['a5z', 'hillcc_aq5z', 'hillt_aq5z']]),
BasisSet('dadz', fullname='double-aug-cc-pVDZ'),
BasisSet('datz', fullname='double-aug-cc-pVTZ'),
BasisSet('631pgs', fullname='6-31+G(d)'),
BasisSet('6311pg_3df_2p_', fullname='6-311+G(3df,2p)'),
BasisSet('6311ppg_3df_2p_', fullname='6-311++G(3df,2p)'),
BasisSet('631gs025', fullname='6-31G*(0.25)'),
BasisSet('def2qzvp', fullname='def2-QZVP'),
BasisSet('na', fullname='no applicable basis'),
]
bases = {}
for item in _tlist:
bases[item.name] = item
# Key name must be [A-Z], [0-9], and _, being either all upper or all lowercase according to Essential
# fullname can be anything on the keyboard, no ascii codes
# latex can contain escape codes for LaTeX
_tlist = [
Method('SAPT0', fullname='SAPT0'),
Method('SAPT0S', fullname='sSAPT0', latex=r"""$\textit{s}$SAPT0"""), #latex="""\\textit{s}SAPT0"""),
Method('SAPTSCS', fullname='SCS-SAPT0'),
Method('SAPTDFT', fullname='DFT-SAPT'),
Method('SAPT2', fullname='SAPT2'),
Method('SAPT2P', fullname='SAPT2+'),
Method('SAPT3', fullname='SAPT2+(3)'),
Method('SAPT3F', fullname='SAPT2+3'),
Method('SAPT2PC', fullname='SAPT2+(CCD)'),
Method('SAPT3C', fullname='SAPT2+(3)(CCD)'),
Method('SAPT3FC', fullname='SAPT2+3(CCD)'),
Method('SAPT2PM', fullname='SAPT2+dMP2', latex=r"""SAPT2+$\delta$MP2"""),
Method('SAPT3M', fullname='SAPT2+(3)dMP2', latex=r"""SAPT2+(3)$\delta$MP2"""),
Method('SAPT3FM', fullname='SAPT2+3dMP2', latex=r"""SAPT2+3$\delta$MP2"""),
Method('SAPT2PCM', fullname='SAPT2+(CCD)dMP2', latex=r"""SAPT2+(CCD)$\delta$MP2"""),
Method('SAPT3CM', fullname='SAPT2+(3)(CCD)dMP2', latex=r"""SAPT2+(3)(CCD)$\delta$MP2"""),
Method('SAPT3FCM', fullname='SAPT2+3(CCD)dMP2', latex=r"""SAPT2+3(CCD)$\delta$MP2"""),
Method('SAPT2LCM', fullname='MP2(CCD)', comment="""Identical to SAPT2+(CCD)dMP2"""),
Method('HF', fullname='HF'),
Method('MP2', fullname='MP2'),
Method('SCSMP2', fullname='SCS-MP2'),
Method('SCSNMP2', fullname='SCS(N)-MP2'),
Method('SCSMIMP2', fullname='SCS(MI)-MP2'),
Method('DWMP2', fullname='DW-MP2'),
Method('MP2C', fullname='MP2C'),
Method('MP3', fullname='MP3'),
Method('MP25', fullname='MP2.5'),
Method('CCSD', fullname='CCSD'),
Method('SCSCCSD', fullname='SCS-CCSD'),
Method('SCSMICCSD', fullname='SCS(MI)-CCSD'),
Method('CCSDT', fullname='CCSD(T)'),
Method('HFCABS', fullname='HF-CABS'),
Method('MP2F12', fullname='MP2-F12'),
Method('SCSMP2F12', fullname='SCS-MP2-F12'),
Method('SCSNMP2F12', fullname='SCS(N)-MP2-F12'),
Method('SCSMIMP2F12', fullname='SCS(MI)-MP2-F12'),
Method('DWMP2F12', fullname='DW-MP2-F12'),
Method('MP2CF12', fullname='MP2C-F12'),
Method('CCSDAF12', fullname='CCSD-F12a'),
Method('CCSDBF12', fullname='CCSD-F12b'),
Method('CCSDCF12', fullname='CCSD-F12c'),
Method('SCSCCSDAF12', fullname='SCS-CCSD-F12a'),
Method('SCSCCSDBF12', fullname='SCS-CCSD-F12b'),
Method('SCSCCSDCF12', fullname='SCS-CCSD-F12c'),
Method('SCMICCSDAF12', fullname='SCS(MI)-CCSD-F12a'),
Method('SCMICCSDBF12', fullname='SCS(MI)-CCSD-F12b'),
Method('SCMICCSDCF12', fullname='SCS(MI)-CCSD-F12c'),
Method('CCSDTABAVGF12', fullname='AVG-CCSD(T**)-F12'),
Method('CCSDTAF12', fullname='CCSD(T**)-F12a'),
Method('CCSDTBF12', fullname='CCSD(T**)-F12b'),
Method('CCSDTCF12', fullname='CCSD(T**)-F12c'),
Method('DWCCSDTF12', fullname='DW-CCSD(T**)-F12'),
# build=lambda: ['DW-CCSD(T**)-F12 TOTAL ENERGY'],
# ['HF-CABS TOTAL ENERGY', 'DW-CCSD(T**)-F12 CORRELATION ENERGY'],
# ['HF-CABS TOTAL ENERGY', 'MP2-F12 CORRELATION ENERGY', 'DW-CCSD(T**)-F12 CC CORRECTION ENERGY'],
# ['HF-CABS TOTAL ENERGY', 'MP2-F12 CORRELATION ENERGY', 'DW-CCSD-F12 CC CORRECTION ENERGY', 'DW-(T**)-F12 CORRECTION ENERGY'])
Method('B97', fullname='B97'),
Method('B97D2', fullname='B97-D2'),
Method('B97D3', fullname='B97-D3'),
Method('B97D3BJ', fullname='B97-D3(BJ)'),
Method('B97D3M', fullname='B97-D3M'),
Method('B97D3MBJ', fullname='B97-D3M(BJ)'),
Method('B3LYP', fullname='B3LYP'),
Method('B3LYPD2', fullname='B3LYP-D2'),
Method('B3LYPD3', fullname='B3LYP-D3'),
Method('B3LYPD3BJ', fullname='B3LYP-D3(BJ)'),
Method('B3LYPXDM', fullname='B3LYP-XDM'),
Method('B3LYPD3M', fullname='B3LYP-D3M'),
Method('B3LYPD3MBJ', fullname='B3LYP-D3M(BJ)'),
Method('B2PLYP', fullname='B2PLYP'),
Method('B2PLYPD2', fullname='B2PLYP-D2'),
Method('B2PLYPD3', fullname='B2PLYP-D3'),
Method('B2PLYPD3BJ', fullname='B2PLYP-D3(BJ)'),
Method('B2PLYPD3M', fullname='B2PLYP-D3M'),
Method('B2PLYPD3MBJ', fullname='B2PLYP-D3M(BJ)'),
Method('M052X', fullname='M05-2X'),
Method('M052XD3', fullname='M05-2X-D3'),
Method('M062X', fullname='M06-2X'),
Method('M062XD3', fullname='M06-2X-D3'),
Method('M08HX', fullname='M08-HX'),
Method('M08SO', fullname='M08-SO'),
Method('M11', fullname='M11'),
Method('M11L', fullname='M11L'),
Method('XYG3', fullname='XYG3'),
Method('DLDFD', fullname='dlDF+D'),
Method('DSDPBEP86', fullname='DSD-PBEP86'), # this a real thing?
Method('DSDPBEP86D2OPT', fullname='DSD-PBEP86-D2opt'), # email version of DSD
Method('DSDPBEP86D2', fullname='DSD-PBEP86-D2'),
Method('DSDPBEP86D3', fullname='DSD-PBEP86-D3'),
Method('DSDPBEP86D3BJ', fullname='DSD-PBEP86-D3(BJ)'),
Method('VV10', fullname='VV10'),
Method('LCVV10', fullname='LC-VV10'),
Method('WB97XD', fullname='wB97X-D', latex=r"""$\omega$B97X-D"""),
Method('WB97X2', fullname='wB97X-2', latex=r"""$\omega$B97X-2"""),
Method('WB97XV', fullname='wB97X-V', latex=r"""$\omega$B97X-V"""),
Method('PBE', fullname='PBE'),
Method('PBED2', fullname='PBE-D2'),
Method('PBED3', fullname='PBE-D3'),
Method('PBED3BJ', fullname='PBE-D3(BJ)'),
Method('PBED3M', fullname='PBE-D3M'),
Method('PBED3MBJ', fullname='PBE-D3M(BJ)'),
Method('PBE0', fullname='PBE0'),
Method('PBE0D2', fullname='PBE0-D2'),
Method('PBE0D3', fullname='PBE0-D3'),
Method('PBE0D3BJ', fullname='PBE0-D3(BJ)'),
Method('PBE0D3M', fullname='PBE0-D3M'),
Method('PBE0D3MBJ', fullname='PBE0-D3M(BJ)'),
Method('PBE02', fullname='PBE0-2'),
Method('WPBE', fullname='wPBE', latex=r"""$\omega$PBE"""),
Method('WPBED3', fullname='wPBE-D3', latex=r"""$\omega$PBE-D3"""),
Method('WPBED3BJ', fullname='wPBE-D3(BJ)', latex=r"""$\omega$PBE-D3(BJ)"""),
Method('WPBED3M', fullname='wPBE-D3M', latex=r"""$\omega$PBE-D3M"""),
Method('WPBED3MBJ', fullname='wPBE-D3M(BJ)', latex=r"""$\omega$PBE-D3M(BJ)"""),
Method('CCSDTNSAF12', fullname='CCSD(T)-F12a'),
Method('CCSDTNSBF12', fullname='CCSD(T)-F12b'),
Method('CCSDTNSCF12', fullname='CCSD(T)-F12c'),
Method('B970', fullname='B970'),
Method('B970D2', fullname='B970-D2'),
Method('BP86', fullname='BP86'),
Method('BP86D2', fullname='BP86-D2'),
Method('BP86D3', fullname='BP86-D3'),
Method('BP86D3BJ', fullname='BP86-D3(BJ)'),
Method('BP86D3M', fullname='BP86-D3M'),
Method('BP86D3MBJ', fullname='BP86-D3M(BJ)'),
Method('BLYP', fullname='BLYP'),
Method('BLYPD2', fullname='BLYP-D2'),
Method('BLYPD3', fullname='BLYP-D3'),
Method('BLYPD3BJ', fullname='BLYP-D3(BJ)'),
Method('BLYPD3M', fullname='BLYP-D3M'),
Method('BLYPD3MBJ', fullname='BLYP-D3M(BJ)'),
Method('CCSDTQ', fullname='CCSDT(Q)'),
Method('CCSDFULLT', fullname='CCSDT'),
Method('CCSDTSAF12', fullname='CCSD(T*)-F12a'),
Method('CCSDTSBF12', fullname='CCSD(T*)-F12b'),
Method('CCSDTSCF12', fullname='CCSD(T*)-F12c'),
Method('DWCCSDTNSF12', fullname='DW-CCSD(T)-F12'),
Method('DWCCSDTSF12', fullname='DW-CCSD(T*)-F12'),
Method('DELTQ', fullname='d(TQ)', latex=r"""$\delta$(TQ)"""), # TODO kill this once non-IE impl in reap-DB
Method('DEL2T', fullname='d(T)', latex=r"""$\delta$(T)"""), # TODO kill this once non-IE impl in reap-DB
Method('AM1', fullname='AM1'),
Method('GAFF', fullname='GAFF'),
Method('PM6DH2', fullname='PM6-DH2'),
Method('CHARMM', fullname='CHARMM'),
Method('PM3', fullname='PM3'),
Method('PM6', fullname='PM6'),
Method('PDDG', fullname='PDDG'),
Method('FF03', fullname='FF03'),
Method('FF03A', fullname='FF03A'),
Method('FF99SB', fullname='FF99SB'),
Method('FF99SBA', fullname='FF99SBA'),
Method('AM1FS1', fullname='AM1FS1'),
Method('EFP', fullname='EFP'),
]
methods = {}
for item in _tlist:
methods[item.name] = item
_tlist = [
Error('pexe', fullname='pexE'),
Error('nexe', fullname='nexE'),
Error('maxe', fullname='maxE'),
Error('mine', fullname='minE'),
Error('me', fullname='ME'),
Error('mae', fullname='MAE'),
Error('rmse', fullname='rmsE'),
Error('stde', fullname='stdE'),
Error('pexpe', fullname='pexPE'),
Error('nexpe', fullname='nexPE'),
Error('maxpe', fullname='maxPE'),
Error('minpe', fullname='minPE'),
Error('mpe', fullname='MPE'),
Error('mape', fullname='MAPE', latex=r"""MA$\%$E"""), #latex="""MA\%E"""),
Error('rmspe', fullname='rmsPE'),
Error('stdpe', fullname='stdPE'),
Error('pexpbe', fullname='pexPBE'),
Error('nexpbe', fullname='nexPBE'),
Error('maxpbe', fullname='maxPBE'),
Error('minpbe', fullname='minPBE'),
Error('mpbe', fullname='MPBE'),
Error('mapbe', fullname='MAPBE', latex=r"""MA$\%$BE"""), #latex="""MA\%BE"""),
Error('rmspbe', fullname='rmsPBE'),
Error('stdpbe', fullname='stdPBE'),
]
errors = {}
for item in _tlist:
errors[item.name] = item
#_tlist = [
# Option('CP', fullname='CP'),
# Option('unCP', fullname='unCP'),
#]
#options = {item.name: item for item in _tlist}
| lothian/psi4 | psi4/driver/qcdb/modelchems.py | Python | lgpl-3.0 | 30,569 | [
"CHARMM",
"Psi4"
] | d5692a3546b6fbe805fbb26a643f8ea0cee8bf57e8859ba559b67f455505be38 |
#!/usr/bin/python
import urllib2
import sys
import cv
import numpy
# SRGB-linear conversions using NumPy - see http://en.wikipedia.org/wiki/SRGB
def srgb2lin(x):
a = 0.055
return numpy.where(x <= 0.04045,
x * (1.0 / 12.92),
numpy.power((x + a) * (1.0 / (1 + a)), 2.4))
def lin2srgb(x):
a = 0.055
return numpy.where(x <= 0.0031308,
x * 12.92,
(1 + a) * numpy.power(x, 1 / 2.4) - a)
if __name__ == "__main__":
if len(sys.argv) > 1:
img0 = cv.LoadImageM( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
url = 'https://code.ros.org/svn/opencv/trunk/opencv/samples/c/lena.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
img0 = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
cv.NamedWindow("original", 1)
cv.ShowImage("original", img0)
# Image was originally bytes in range 0-255. Turn it into an array of floats in range 0.0 - 1.0
n = numpy.asarray(img0) / 255.0
# Use NumPy to do some transformations on the image
# Negate the image by subtracting it from 1.0
cv.NamedWindow("negative")
cv.ShowImage("negative", cv.fromarray(1.0 - n))
# Assume the image was sRGB, and compute the linear version.
cv.NamedWindow("linear")
cv.ShowImage("linear", cv.fromarray(srgb2lin(n)))
# Look at a subwindow
cv.NamedWindow("subwindow")
cv.ShowImage("subwindow", cv.fromarray(n[200:300,200:400]))
# Compute the grayscale image
cv.NamedWindow("monochrome")
ln = srgb2lin(n)
red = ln[:,:,0]
grn = ln[:,:,1]
blu = ln[:,:,2]
linear_mono = 0.3 * red + 0.59 * grn + 0.11 * blu
cv.ShowImage("monochrome", cv.fromarray(lin2srgb(linear_mono)))
# Apply a blur to the NumPy array using OpenCV
cv.NamedWindow("gaussian")
cv.Smooth(n, n, cv.CV_GAUSSIAN, 15, 15)
cv.ShowImage("gaussian", cv.fromarray(n))
cv.WaitKey(0)
| eirTony/INDI1 | to/lang/OpenCV-2.2.0/samples/python/numpy_array.py | Python | mit | 2,086 | [
"Gaussian"
] | badb1035b949fa8a6d1d87a320698c18140534fdb14f77a3342258c955941600 |
# Author: Samuel Lucidi <mansam@csh.rit.edu>
__version__ = "0.1.113"
import requests
import urllib
import ConfigParser
import liveconnect.conf
import liveconnect.exceptions
config = ConfigParser.SafeConfigParser()
config.read(liveconnect.conf.liveconnect_config_locations)
def connect():
client_id = liveconnect.config.get('liveconnect', 'client_id')
client_secret = liveconnect.config.get('liveconnect', 'client_secret')
return LiveConnect(client_id, client_secret)
class LiveConnect(object):
def __init__(self, client_id, client_secret):
self.client_id = client_id
self.client_secret = client_secret
self.user_auth_url = 'https://login.live.com/oauth20_authorize.srf'
self.token_auth_url = 'https://login.live.com/oauth20_token.srf'
self.mobile_redirect = 'https://login.live.com/oauth20_desktop.srf'
def authorize(self, refresh_token=None, auth_code=None, redirect_uri=None):
"""
Use a previously received auth code or refresh token to get a new
access token and refresh token if applicable.
"""
if not redirect_uri:
redirect_uri = self.mobile_redirect
params = {
"client_id":self.client_id,
"client_secret":self.client_secret,
"redirect_uri":redirect_uri
}
if refresh_token:
params["refresh_token"] = refresh_token
params["grant_type"] = "refresh_token"
elif auth_code:
params["code"] = auth_code
params["grant_type"] = "authorization_code"
else:
raise liveconnect.exceptions.AuthorizationError('Must specify an authorization code or a refresh token.')
return requests.post(self.token_auth_url, params).json()
def generate_auth_url(self, scopes=['wl.basic'], redirect_uri=None, state=""):
"""
Generate a link that a user must visit to authorize the app
to make requests in their name.
"""
if not redirect_uri:
redirect_uri = self.mobile_redirect
params = {
"client_id":self.client_id,
"client_secret":self.client_secret,
"scope":' '.join(scopes),
"response_type":"code",
"redirect_uri":redirect_uri,
"state":state
}
return "%s?%s" % (self.user_auth_url, urllib.urlencode(params))
| mansam/liveconnect | liveconnect/__init__.py | Python | mit | 2,101 | [
"VisIt"
] | 5abe830201ff215c9c84417fd34798c68ea5d88e5009124bfee807c50a6a9aeb |
# Copyright 2010 by Tiago Antao. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Large file parsing of Genepop files
The standard parser loads the whole file into memory. This parser
provides an iterator over data.
Classes:
LargeRecord Holds GenePop data.
Functions:
read Parses a GenePop record (file) into a Record object.
"""
from copy import deepcopy
def get_indiv(line):
indiv_name, marker_line = line.split(',')
markers = marker_line.replace('\t', ' ').split(' ')
markers = [marker for marker in markers if marker!='']
if len(markers[0]) in [2, 4]: #2 digits per allele
marker_len = 2
else:
marker_len = 3
try:
allele_list = [(int(marker[0:marker_len]),
int(marker[marker_len:]))
for marker in markers]
except ValueError: #Haploid
allele_list = [(int(marker[0:marker_len]),)
for marker in markers]
return indiv_name, allele_list, marker_len
def read(handle):
"""Parses a handle containing a GenePop file.
handle is a file-like object that contains a GenePop record.
"""
record = Record(handle)
record.comment_line = str(handle.readline()).rstrip()
#We can now have one loci per line or all loci in a single line
#separated by either space or comma+space...
#We will remove all commas on loci... that should not be a problem
sample_loci_line = str(handle.readline()).rstrip().replace(',', '')
all_loci = sample_loci_line.split(' ')
record.loci_list.extend(all_loci)
line = handle.readline()
while line!="":
line = line.rstrip()
if line.upper()=="POP":
record.stack.append("POP")
break
record.loci_list.append(line)
line = handle.readline()
next_line = handle.readline().rstrip()
indiv_name, allele_list, record.marker_len = get_indiv(next_line)
record.stack.append(next_line)
return record
class Record(object):
"""Holds information from a GenePop record.
Members:
marker_len The marker length (2 or 3 digit code per allele).
comment_line Comment line.
loci_list List of loci names.
data_generator Iterates over population data.
The generator will only work once. If you want to read a handle
twice you have to re-open it!
data_generator can either be () - an empty tuple - marking a new
population or an individual. An individual is something like
('Ind1', [(1,1), (3,None), (200,201)],
In the case above the individual is called Ind1,
has three diploid loci. For the second loci, one of the alleles
is unknown.
"""
def __init__(self, handle):
self.handle = handle
self.marker_len = 0
self.comment_line = ""
self.loci_list = []
self.populations = []
self.data_generator = None
self.stack = []
def data_generator(self):
for handle in [self.stack, self.handle]:
for line in handle:
line = line.rstrip()
if line.upper()=='POP':
yield ()
else:
indiv_name, allele_list, marker_len = get_indiv(line)
clean_list = []
for locus in allele_list:
mk_real = []
for al in locus:
if al==0:
mk_real.append(None)
else:
mk_real.append(al)
clean_list.append(tuple(mk_real))
yield indiv_name, clean_list
raise StopIteration()
| bryback/quickseq | genescript/Bio/PopGen/GenePop/LargeFileParser.py | Python | mit | 3,945 | [
"Biopython"
] | 2fee16f0a4658e2ad21077f0efd08f5a46e04fe5aacc33ba2b608284dc24a3c1 |
"""The basic dict based notebook format.
The Python representation of a notebook is a nested structure of
dictionary subclasses that support attribute access
(IPython.utils.ipstruct.Struct). The functions in this module are merely
helpers to build the structs in the right form.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import pprint
import uuid
from IPython.utils.ipstruct import Struct
from IPython.utils.py3compat import cast_unicode
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# Change this when incrementing the nbformat version
nbformat = 3
nbformat_minor = 0
class NotebookNode(Struct):
pass
def from_dict(d):
if isinstance(d, dict):
newd = NotebookNode()
for k,v in d.items():
newd[k] = from_dict(v)
return newd
elif isinstance(d, (tuple, list)):
return [from_dict(i) for i in d]
else:
return d
def new_output(output_type=None, output_text=None, output_png=None,
output_html=None, output_svg=None, output_latex=None, output_json=None,
output_javascript=None, output_jpeg=None, prompt_number=None,
ename=None, evalue=None, traceback=None, stream=None, metadata=None):
"""Create a new code cell with input and output"""
output = NotebookNode()
if output_type is not None:
output.output_type = unicode(output_type)
if metadata is None:
metadata = {}
if not isinstance(metadata, dict):
raise TypeError("metadata must be dict")
output.metadata = metadata
if output_type != 'pyerr':
if output_text is not None:
output.text = cast_unicode(output_text)
if output_png is not None:
output.png = cast_unicode(output_png)
if output_jpeg is not None:
output.jpeg = cast_unicode(output_jpeg)
if output_html is not None:
output.html = cast_unicode(output_html)
if output_svg is not None:
output.svg = cast_unicode(output_svg)
if output_latex is not None:
output.latex = cast_unicode(output_latex)
if output_json is not None:
output.json = cast_unicode(output_json)
if output_javascript is not None:
output.javascript = cast_unicode(output_javascript)
if output_type == u'pyout':
if prompt_number is not None:
output.prompt_number = int(prompt_number)
if output_type == u'pyerr':
if ename is not None:
output.ename = cast_unicode(ename)
if evalue is not None:
output.evalue = cast_unicode(evalue)
if traceback is not None:
output.traceback = [cast_unicode(frame) for frame in list(traceback)]
if output_type == u'stream':
output.stream = 'stdout' if stream is None else cast_unicode(stream)
return output
def new_code_cell(input=None, prompt_number=None, outputs=None,
language=u'python', collapsed=False, metadata=None):
"""Create a new code cell with input and output"""
cell = NotebookNode()
cell.cell_type = u'code'
if language is not None:
cell.language = cast_unicode(language)
if input is not None:
cell.input = cast_unicode(input)
if prompt_number is not None:
cell.prompt_number = int(prompt_number)
if outputs is None:
cell.outputs = []
else:
cell.outputs = outputs
if collapsed is not None:
cell.collapsed = bool(collapsed)
cell.metadata = NotebookNode(metadata or {})
return cell
def new_text_cell(cell_type, source=None, rendered=None, metadata=None):
"""Create a new text cell."""
cell = NotebookNode()
# VERSIONHACK: plaintext -> raw
# handle never-released plaintext name for raw cells
if cell_type == 'plaintext':
cell_type = 'raw'
if source is not None:
cell.source = cast_unicode(source)
if rendered is not None:
cell.rendered = cast_unicode(rendered)
cell.metadata = NotebookNode(metadata or {})
cell.cell_type = cell_type
return cell
def new_heading_cell(source=None, rendered=None, level=1, metadata=None):
"""Create a new section cell with a given integer level."""
cell = NotebookNode()
cell.cell_type = u'heading'
if source is not None:
cell.source = cast_unicode(source)
if rendered is not None:
cell.rendered = cast_unicode(rendered)
cell.level = int(level)
cell.metadata = NotebookNode(metadata or {})
return cell
def new_worksheet(name=None, cells=None, metadata=None):
"""Create a worksheet by name with with a list of cells."""
ws = NotebookNode()
if name is not None:
ws.name = cast_unicode(name)
if cells is None:
ws.cells = []
else:
ws.cells = list(cells)
ws.metadata = NotebookNode(metadata or {})
return ws
def new_notebook(name=None, metadata=None, worksheets=None):
"""Create a notebook by name, id and a list of worksheets."""
nb = NotebookNode()
nb.nbformat = nbformat
nb.nbformat_minor = nbformat_minor
if worksheets is None:
nb.worksheets = []
else:
nb.worksheets = list(worksheets)
if metadata is None:
nb.metadata = new_metadata()
else:
nb.metadata = NotebookNode(metadata)
if name is not None:
nb.metadata.name = cast_unicode(name)
return nb
def new_metadata(name=None, authors=None, license=None, created=None,
modified=None, gistid=None):
"""Create a new metadata node."""
metadata = NotebookNode()
if name is not None:
metadata.name = cast_unicode(name)
if authors is not None:
metadata.authors = list(authors)
if created is not None:
metadata.created = cast_unicode(created)
if modified is not None:
metadata.modified = cast_unicode(modified)
if license is not None:
metadata.license = cast_unicode(license)
if gistid is not None:
metadata.gistid = cast_unicode(gistid)
return metadata
def new_author(name=None, email=None, affiliation=None, url=None):
"""Create a new author."""
author = NotebookNode()
if name is not None:
author.name = cast_unicode(name)
if email is not None:
author.email = cast_unicode(email)
if affiliation is not None:
author.affiliation = cast_unicode(affiliation)
if url is not None:
author.url = cast_unicode(url)
return author
| noslenfa/tdjangorest | uw/lib/python2.7/site-packages/IPython/nbformat/v3/nbbase.py | Python | apache-2.0 | 7,046 | [
"Brian"
] | 2f6c65316724eb45f495cb7055afd50f6cef6208762b4601e1f96bd0bb4717a4 |
#!/usr/bin/env python
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import vtk
import chigger
camera = vtk.vtkCamera()
camera.SetViewUp(-0.2488, 0.8185, -0.5178)
camera.SetPosition(1.8403, 2.7164, 3.4098)
camera.SetFocalPoint(0.0000, 0.0000, 0.0000)
cube0 = chigger.geometric.CubeSource(position=[0,0,0], lengths=[1,1,1], rotation=[45,0,0], color=[0.5,0,0], edges=False)
cube1 = chigger.geometric.CubeSource(position=[0,0,0], lengths=[1,1,1], rotation=[0,45,0], color=[0,0.5,0], edges=False)
cube2 = chigger.geometric.CubeSource(position=[0,0,0], lengths=[1,1,1], rotation=[0,0,45], color=[0,0,0.5], edges=False)
cubes = chigger.base.ChiggerResult(cube0, cube1, cube2, camera=camera)
window = chigger.RenderWindow(cubes, size=[300,300], test=True)
window.write('rotation.png')
window.start()
| yipenggao/moose | python/chigger/tests/geometric/base/rotation.py | Python | lgpl-2.1 | 1,643 | [
"MOOSE",
"VTK"
] | 132f28b0470600e07be7cce45a2ce05c9c0f5a2a68e5f8c444958efe327a994a |
"""
An example showing how a custom colormap (or look up table) can be used
for a given object.
Although the end user specifies colormaps by giving the name of a set of
predefined colormaps, Mayavi (and VTK) deal with color internally using
'Look Up Tables' (LUT): a table that associate a scalar value to a
color defined by its RGBA components.
In this example, we show how the LUT of an object can be retrieved and
modified. Specificaly, we start by giving a surf object the 'cool'
colormap, but we modify add to add a transparency effect.
Notice in the resulting image how the surface becomes more transparent
for its lower points.
Note that if you want to use a different number of colors, you can
change the 'number_of_colors' attribute of the lut object and assign a
new array of the right shape to its 'table' attribute.
"""
# Create some data
import numpy as np
x, y = np.mgrid[-10:10:200j, -10:10:200j]
z = 100 * np.sin(x * y) / (x * y)
# Visualize it with mlab.surf
from mayavi import mlab
#mlab.options.backend = 'envisage'
mlab.figure(bgcolor=(1, 1, 1))
surf = mlab.surf(z, colormap='cool')
# Retrieve the LUT of the surf object.
lut = surf.module_manager.scalar_lut_manager.lut.table.to_array()
# The lut is a 255x4 array, with the columns representing RGBA
# (red, green, blue, alpha) coded with integers going from 0 to 255.
# We modify the alpha channel to add a transparency gradient
print lut
lut[:, -1] = np.linspace(0, 255, 256)
print lut
# and finally we put this LUT back in the surface object. We could have
# added any 255*4 array rather than modifying an existing LUT.
surf.module_manager.scalar_lut_manager.lut.table = lut
# We need to force update of the figure now that we have changed the LUT.
mlab.draw()
mlab.view(40, 85)
mlab.show()
| HPCGISLab/STDataViz | WorkingVersion/LibTry/OtherTry/custom_colormap.py | Python | bsd-3-clause | 1,777 | [
"Mayavi",
"VTK"
] | f0a2419d1c3606a4e6d629838bf1a05d0589c81d930bfb80ff4a2bba7662db19 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DataDomainIndex'
db.create_table(u'profiles_datadomainindex', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('order', self.gf('django.db.models.fields.PositiveIntegerField')(default=1, db_index=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('dataDomain', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.DataDomain'])),
('groups', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.Group'])),
))
db.send_create_signal(u'profiles', ['DataDomainIndex'])
# Deleting field 'DataDomain.groups'
db.delete_column(u'profiles_datadomain', 'groups_id')
def backwards(self, orm):
# Deleting model 'DataDomainIndex'
db.delete_table(u'profiles_datadomainindex')
# Adding field 'DataDomain.groups'
db.add_column(u'profiles_datadomain', 'groups', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['profiles.Group']), keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 11, 10, 51, 8, 489155)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 11, 10, 51, 8, 488694)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maps.shapefile': {
'Meta': {'object_name': 'ShapeFile'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geo_meta_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'geom_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'shape_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zoom_threshold': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'profiles.customvalue': {
'Meta': {'object_name': 'CustomValue'},
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_value': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'supress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_operator': ('django.db.models.fields.CharField', [], {'max_length': "'255'"})
},
u'profiles.datadomain': {
'Meta': {'ordering': "['weight']", 'object_name': 'DataDomain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'subdomain_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subdomains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.datadomainindex': {
'Meta': {'ordering': "['order']", 'object_name': 'DataDomainIndex'},
'dataDomain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
'groups': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'profiles.datapoint': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']", 'null': 'True'})
},
u'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'profiles.denominator': {
'Meta': {'object_name': 'Denominator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.denominatorpart': {
'Meta': {'object_name': 'DenominatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.IndicatorPart']"})
},
u'profiles.flatvalue': {
'Meta': {'object_name': 'FlatValue'},
'display_title': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'f_moe': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_number': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_percent': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'geography': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'geography_name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'geography_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'geometry_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'indicator_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'time_key': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'value_type': ('django.db.models.fields.CharField', [], {'max_length': "'100'"})
},
u'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataSource']", 'symmetrical': 'False', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'shapefile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.ShapeFile']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}),
'summary_level': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
u'profiles.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
u'profiles.groupindex': {
'Meta': {'ordering': "['order']", 'object_name': 'GroupIndex'},
'groups': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': u"orm['profiles.Indicator']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_generated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'default': "'U.S. Census Bureau'", 'max_length': '300', 'blank': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
u'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']"})
},
u'profiles.legendoption': {
'Meta': {'object_name': 'LegendOption'},
'bin_options': ('django.db.models.fields.TextField', [], {'default': "''"}),
'bin_type': ('django.db.models.fields.CharField', [], {'default': "'jenks'", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.precalculatedvalue': {
'Meta': {'object_name': 'PrecalculatedValue'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'geo_record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'table': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'profiles.taskstatus': {
'Meta': {'object_name': 'TaskStatus'},
'error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
't_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'profiles.time': {
'Meta': {'object_name': 'Time'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
},
u'profiles.value': {
'Meta': {'object_name': 'Value'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataPoint']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['profiles']
| ProvidencePlan/Profiles | communityprofiles/profiles/oldmigrations/0067_auto__add_datadomainindex__del_field_datadomain_groups.py | Python | mit | 23,543 | [
"MOE"
] | e985de5b9426df9c759d6a71994cdd93fd939a059a55775c118164e804678251 |
#!/usr/bin/env python
# Testing the new parallel scanner class - detection of invalid states
# --jr20101218
import os
backupdir = os.getcwd()
import numpy as np
import pysces
tbox=pysces.PyscesUtils.TimerBox()
import time
m=pysces.model('isola2a')
ser = pysces.Scanner(m)
print "Serial execution..."
print "Start: ", tbox.normal_timer('SER')
print tbox.SER.next()
t1=time.time()
ser.quietRun = True
ser.addScanParameter('V4',0.01,200,2000,log=True)
ser.addUserOutput('J_R1', 'A')
ser.Run()
print "Done: ", tbox.SER.next()
t2=time.time()
print "Duration: %.2f seconds" % (t2-t1)
ser.statespersecond = len(ser.ScanSpace)/(t2-t1)
print "States per second: %.1f" % ser.statespersecond
print "\n\nParallel execution...scans per run =", 36
par = pysces.ParScanner(m)
par.scans_per_run = 36
t3=time.time()
par.addScanParameter('V4',0.01,200,2000,log=True)
par.addUserOutput('J_R1', 'A')
par.Run()
t4=time.time()
print "Duration: %.2f seconds" % (t4-t3)
par.statespersecond = par.Tsteps/(t4-t3)
print "States per second: %.1f" % par.statespersecond
print "\n Speedup: %.2f" % (par.statespersecond/ser.statespersecond)
print "\n\nParallel execution...with scatter and gather"
par2 = pysces.ParScanner(m)
t5=time.time()
par2.addScanParameter('V4',0.01,200,2000,log=True)
par2.addUserOutput('J_R1', 'A')
par2.RunScatter()
t6=time.time()
print "Duration: %.2f seconds" % (t6-t5)
par2.statespersecond = par2.Tsteps/(t6-t5)
print "States per second: %.1f" % par2.statespersecond
print "\n Speedup: %.2f" % (par2.statespersecond/ser.statespersecond)
print "\n===========\nComparing results..."
sss = ser.SteadyStateResults
pss = par.SteadyStateResults
suo = ser.UserOutputResults
puo = par.UserOutputResults
p2ss = par2.SteadyStateResults
p2uo = par2.UserOutputResults
print "\nTesting nan's:"
print "serial and parallel (tc): ", \
np.alltrue(np.where(np.isnan(sss))[0] == np.where(np.isnan(pss))[0]), \
np.alltrue(np.where(np.isnan(sss))[1] == np.where(np.isnan(pss))[1]), \
np.alltrue(np.where(np.isnan(suo))[0] == np.where(np.isnan(puo))[0]), \
np.alltrue(np.where(np.isnan(suo))[1] == np.where(np.isnan(puo))[1])
print "serial and parallel (scatter): ", \
np.alltrue(np.where(np.isnan(sss))[0] == np.where(np.isnan(p2ss))[0]), \
np.alltrue(np.where(np.isnan(sss))[1] == np.where(np.isnan(p2ss))[1]), \
np.alltrue(np.where(np.isnan(suo))[0] == np.where(np.isnan(p2uo))[0]), \
np.alltrue(np.where(np.isnan(suo))[1] == np.where(np.isnan(p2uo))[1])
print "\nTesting finite values:"
print "serial and parallel (tc): ", \
np.alltrue(sss[np.where(np.isfinite(sss))]==pss[np.where(np.isfinite(pss))]), \
np.alltrue(suo[np.where(np.isfinite(suo))]==puo[np.where(np.isfinite(puo))])
print "serial and parallel (scatter): ", \
np.alltrue(sss[np.where(np.isfinite(sss))]==p2ss[np.where(np.isfinite(p2ss))]), \
np.alltrue(suo[np.where(np.isfinite(suo))]==p2uo[np.where(np.isfinite(p2uo))])
os.chdir(backupdir) | asttra/pysces | pysces/examples/testinvalidstate.py | Python | bsd-3-clause | 2,997 | [
"PySCeS"
] | 61ac1b586042f56f40b2b1e7ce39667a153c17f743f78aefb827315563edac05 |
""" The Machine/Job Features TimeLeft utility interrogates the MJF values
for the current CPU and Wallclock consumed, as well as their limits.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import os
import time
from six.moves.urllib.request import urlopen
from DIRAC import S_OK, S_ERROR
from DIRAC.Resources.Computing.BatchSystems.TimeLeft.ResourceUsage import ResourceUsage
class MJFResourceUsage(ResourceUsage):
"""
This is the MJF plugin of the TimeLeft Utility
"""
#############################################################################
def __init__(self):
""" Standard constructor
"""
super(MJFResourceUsage, self).__init__('MJF', 'JOB_ID')
self.queue = os.environ.get('QUEUE')
self.log.verbose('jobID=%s, queue=%s' % (self.jobID, self.queue))
self.startTime = time.time()
#############################################################################
def getResourceUsage(self):
""" Returns S_OK with a dictionary containing the entries CPU, CPULimit,
WallClock, WallClockLimit, and Unit for current slot.
"""
cpuLimit = None
wallClockLimit = None
wallClock = None
jobStartSecs = None
jobFeaturesPath = None
machineFeaturesPath = None
# Getting info from JOBFEATURES
try:
# We are not called from TimeLeft.py if these are not set
jobFeaturesPath = os.environ['JOBFEATURES']
except KeyError:
self.log.warn('$JOBFEATURES is not set')
if jobFeaturesPath:
try:
wallClockLimit = int(urlopen(jobFeaturesPath + '/wall_limit_secs').read())
self.log.verbose("wallClockLimit from JF = %d" % wallClockLimit)
except ValueError:
self.log.warn("/wall_limit_secs is unreadable")
except IOError as e:
self.log.exception("Issue with $JOBFEATURES/wall_limit_secs", lException=e)
self.log.warn("Could not determine cpu limit from $JOBFEATURES/wall_limit_secs")
try:
jobStartSecs = int(urlopen(jobFeaturesPath + '/jobstart_secs').read())
self.log.verbose("jobStartSecs from JF = %d" % jobStartSecs)
except ValueError:
self.log.warn("/jobstart_secs is unreadable, setting a default")
jobStartSecs = self.startTime
except IOError as e:
self.log.exception("Issue with $JOBFEATURES/jobstart_secs", lException=e)
self.log.warn("Can't open jobstart_secs, setting a default")
jobStartSecs = self.startTime
try:
cpuLimit = int(urlopen(jobFeaturesPath + '/cpu_limit_secs').read())
self.log.verbose("cpuLimit from JF = %d" % cpuLimit)
except ValueError:
self.log.warn("/cpu_limit_secs is unreadable")
except IOError as e:
self.log.exception("Issue with $JOBFEATURES/cpu_limit_secs", lException=e)
self.log.warn('Could not determine cpu limit from $JOBFEATURES/cpu_limit_secs')
wallClock = int(time.time()) - jobStartSecs
# Getting info from MACHINEFEATURES
try:
# We are not called from TimeLeft.py if these are not set
machineFeaturesPath = os.environ['MACHINEFEATURES']
except KeyError:
self.log.warn('$MACHINEFEATURES is not set')
if machineFeaturesPath and jobStartSecs:
try:
shutdownTime = int(urlopen(machineFeaturesPath + '/shutdowntime').read())
self.log.verbose("shutdownTime from MF = %d" % shutdownTime)
if int(time.time()) + wallClockLimit > shutdownTime:
# reduce wallClockLimit if would overrun shutdownTime
wallClockLimit = shutdownTime - jobStartSecs
except ValueError:
self.log.warn("/shutdowntime is unreadable")
except IOError as e:
self.log.warn("Issue with $MACHINEFEATURES/shutdowntime", repr(e))
self.log.warn('Could not determine a shutdowntime value from $MACHINEFEATURES/shutdowntime')
# Reporting
consumed = {'CPU': None, 'CPULimit': cpuLimit, 'WallClock': wallClock, 'WallClockLimit': wallClockLimit}
if cpuLimit and wallClock and wallClockLimit:
self.log.verbose("MJF consumed: %s" % str(consumed))
return S_OK(consumed)
self.log.info('Could not determine some parameters')
retVal = S_ERROR('Could not determine some parameters')
retVal['Value'] = consumed
return retVal
| yujikato/DIRAC | src/DIRAC/Resources/Computing/BatchSystems/TimeLeft/MJFResourceUsage.py | Python | gpl-3.0 | 4,360 | [
"DIRAC"
] | d779b7eed58c8179e16f9443aa3376ed9d3b0e673e2bb3b80cd55f5ffe57a76b |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of TPU helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import math
from typing import List, Optional, Text, Tuple
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu.topology import Topology
from tensorflow.python.util.tf_export import tf_export
SINGLE_CORE_ASSIGNMENT = [[[0, 0, 0, 0]]]
def _compute_task_and_cores_to_replicas(core_assignment, topology):
"""Computes a nested dict which maps task and logical core to replicas."""
task_and_cores_to_replicas = {}
for replica in xrange(core_assignment.shape[0]):
for logical_core in xrange(core_assignment.shape[1]):
coordinates = core_assignment[replica, logical_core, :]
task_id = topology.task_ordinal_at_coordinates(coordinates)
if task_id not in task_and_cores_to_replicas:
task_and_cores_to_replicas[task_id] = {}
if logical_core not in task_and_cores_to_replicas[task_id]:
task_and_cores_to_replicas[task_id][logical_core] = set()
task_and_cores_to_replicas[task_id][logical_core].add(replica)
task_to_sorted_replica_id = {}
for task, core_to_replicas in task_and_cores_to_replicas.items():
core_to_sorted_replicas = {}
for core, replicas in core_to_replicas.items():
core_to_sorted_replicas[core] = sorted(replicas)
task_to_sorted_replica_id[task] = core_to_sorted_replicas
return task_to_sorted_replica_id
@tf_export("tpu.experimental.DeviceAssignment")
class DeviceAssignment(object):
"""Mapping from logical cores in a computation to the physical TPU topology.
Prefer to use the `DeviceAssignment.build()` helper to construct a
`DeviceAssignment`; it is easier if less flexible than constructing a
`DeviceAssignment` directly.
"""
def __init__(self, topology: Topology, core_assignment: np.ndarray):
"""Constructs a `DeviceAssignment` object.
Args:
topology: A `Topology` object that describes the physical TPU topology.
core_assignment: A logical to physical core mapping, represented as a
rank 3 numpy array. See the description of the `core_assignment`
property for more details.
Raises:
ValueError: If `topology` is not `Topology` object.
ValueError: If `core_assignment` is not a rank 3 numpy array.
"""
if not isinstance(topology, Topology):
raise ValueError("topology must be a Topology object, got {}".format(
type(topology)))
core_assignment = np.asarray(core_assignment, dtype=np.int32)
self._topology = topology
if core_assignment.ndim != 3:
raise ValueError("core_assignment must be a rank 3 numpy array, "
"got shape {}".format(core_assignment.shape))
self._num_replicas = core_assignment.shape[0]
self._num_cores_per_replica = core_assignment.shape[1]
if core_assignment.shape[-1] != topology.mesh_rank:
raise ValueError(
"minor dimension of core_assignment must have size equal to topology "
"rank ({}), got shape {}".format(topology.mesh_rank,
core_assignment.shape))
self._core_assignment = core_assignment
self._task_and_cores_to_replicas = _compute_task_and_cores_to_replicas(
self._core_assignment, topology)
@property
def topology(self) -> Topology:
"""A `Topology` that describes the TPU topology."""
return self._topology
@property
def num_cores_per_replica(self) -> int:
"""The number of cores per replica."""
return self._num_cores_per_replica
@property
def num_replicas(self) -> int:
"""The number of replicas of the computation."""
return self._num_replicas
@property
def core_assignment(self) -> np.ndarray:
"""The logical to physical core mapping.
Returns:
An integer numpy array of rank 3, with shape
`[num_replicas, num_cores_per_replica, topology_rank]`. Maps
(replica, logical core) pairs to physical topology coordinates.
"""
return self._core_assignment
def coordinates(self, replica: int, logical_core: int) -> Tuple: # pylint:disable=g-bare-generic
"""Returns the physical topology coordinates of a logical core."""
return tuple(self.core_assignment[replica, logical_core, :])
def lookup_replicas(self, task_id: int, logical_core: int) -> List[int]:
"""Lookup replica ids by task number and logical core.
Args:
task_id: TensorFlow task number.
logical_core: An integer, identifying a logical core.
Returns:
A sorted list of the replicas that are attached to that task and
logical_core.
Raises:
ValueError: If no replica exists in the task which contains the logical
core.
"""
try:
return self._task_and_cores_to_replicas[task_id][logical_core]
except KeyError:
raise ValueError(
"Can not find any replica in task: {} contains logical_core: {} ".
format(task_id, logical_core))
def tpu_ordinal(self, replica: int = 0, logical_core: int = 0) -> int:
"""Returns the ordinal of the TPU device assigned to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.tpu_device_ordinal_at_coordinates(coordinates)
def host_device(self,
replica: int = 0,
logical_core: int = 0,
job: Optional[Text] = None) -> Text:
"""Returns the CPU device attached to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.cpu_device_name_at_coordinates(coordinates, job=job)
def tpu_device(self,
replica: int = 0,
logical_core: int = 0,
job: Optional[Text] = None) -> Text:
"""Returns the name of the TPU device assigned to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.tpu_device_name_at_coordinates(coordinates, job=job)
@staticmethod
def build(topology: Topology,
computation_shape: Optional[np.ndarray] = None,
computation_stride: Optional[np.ndarray] = None,
num_replicas: int = 1) -> "DeviceAssignment":
return device_assignment(topology, computation_shape, computation_stride,
num_replicas)
def _open_ring_2d(x_size: int, y_size: int,
z_coord: int) -> List[Tuple[int, int, int]]:
"""Ring-order of a X by Y mesh, with a fixed Z coordinate.
For example, in a 4x4 mesh, this returns the following order.
0 -- 1 -- 2 -- 3
| | | |
15-- 6 -- 5 -- 4
| | | |
14-- 7 -- 8 -- 9
| | | |
13-- 12-- 11-- 10
Note that chip 0 is not included in the output.
Args:
x_size: An integer represents the mesh size in the x-dimension. Must be
larger than 1.
y_size: An integer represents the mesh size in the y-dimension. Must be
larger than 1.
z_coord: An integer represents the z-coordinate to use for the chips in the
ring.
Returns:
A list of (x,y,z) triples in ring order.
"""
ret = []
for i in range(y_size // 2):
for j in range(1, x_size):
ret.append((j, 2 * i, z_coord))
for j in range(x_size - 1, 0, -1):
ret.append((j, 2 * i + 1, z_coord))
for i in range(y_size - 1, 0, -1):
ret.append((0, i, z_coord))
return ret
def _ring_3d(x_size: int, y_size: int,
z_size: int) -> List[Tuple[int, int, int]]:
"""Ring-order of a X by Y by Z mesh.
Constructs the 3d ring from 2d rings that are stacked in the Z dimension and
joined in one corner.
z == 0:
0 -- 1 -- 2 -- 3
| | | |
15 - 6 -- 5 -- 4
| | | |
14 - 7 -- 8 -- 9
| | | |
13 - 12 - 11 - 10
z == 1:
63 - 30 - 29 - 28
| | | |
16 - 25 - 26 - 27
| | | |
17 - 24 - 23 - 22
| | | |
18 - 19 - 20 - 21
z == 2:
62 - 31 - 32 - 33
| | | |
45 - 36 - 35 - 34
| | | |
44 - 37 - 38 - 39
| | | |
43 - 42 - 41 - 40
z == 3:
61 - 60 - 59 - 58
| | | |
46 - 55 - 56 - 57
| | | |
47 - 54 - 53 - 52
| | | |
48 - 49 - 50 - 51
Args:
x_size: An integer represents the mesh size in the x-dimension. Must be
larger than 1.
y_size: An integer represents the mesh size in the y-dimension. Must be
larger than 1.
z_size: An integer represents the mesh size in the z-dimension. Must be
larger than 1. For example, in a 4x4x4 mesh, this returns the following
order.
Returns:
A list of (x,y,z) triples in ring order.
"""
# Handle the case where 2 dimensions are size 1.
if x_size == 1 and y_size == 1:
return [(0, 0, i) for i in range(z_size)]
if x_size == 1 and z_size == 1:
return [(0, i, 0) for i in range(y_size)]
if y_size == 1 and z_size == 1:
return [(i, 0, 0) for i in range(x_size)]
# Handle odd mesh dimensions. This never happens in practice, so we don't
# bother to try building something optimal.
if (x_size > 1 and x_size % 2 != 0) or (y_size > 1 and
y_size % 2 != 0) or (z_size > 1 and
z_size % 2 != 0):
logging.warning("Odd dimension")
ret = []
for z in range(z_size):
for y in range(y_size):
ret.extend((x, y, z) for x in range(x_size))
return ret
# Always start with chip 0.
ret = [(0, 0, 0)]
# Handle the case where one dimension is size 1. We just build a flat, 2d
# ring.
if z_size == 1:
ret.extend(_open_ring_2d(x_size, y_size, 0))
return ret
if y_size == 1:
ret = [(0, 0, 0)]
ret.extend((x, y, z) for (x, z, y) in _open_ring_2d(x_size, z_size, 0))
return ret
if x_size == 1:
ret = [(0, 0, 0)]
ret.extend((x, y, z) for (y, z, x) in _open_ring_2d(y_size, z_size, 0))
return ret
# Handle the case where all dimensions have size > 1 and even.
ret = [(0, 0, 0)]
for i in range(0, z_size):
r = _open_ring_2d(x_size, y_size, i)
if i % 2 == 0:
ret.extend(r)
else:
ret.extend(reversed(r))
for i in range(z_size - 1, 0, -1):
ret.append((0, 0, i))
return ret
class DeviceOrderMode(enum.IntEnum):
"""The way of determining device orders when computing device assignment."""
# By default the mode is set to AUTO, the library will choose to form rings
# when that is possible.
AUTO = 0
# Form rings for replicas and model-parallel cores.
RING = 1
# Form meshes for replicas and/or model-parallel cores.
MESH = 2
def device_assignment(
topology: Topology,
computation_shape: Optional[np.ndarray] = None,
computation_stride: Optional[np.ndarray] = None,
num_replicas: int = 1,
device_order_mode: DeviceOrderMode = DeviceOrderMode.AUTO
) -> DeviceAssignment:
"""Computes a device_assignment of a computation across a TPU topology.
Attempts to choose a compact grid of cores for locality.
Returns a `DeviceAssignment` that describes the cores in the topology assigned
to each core of each replica.
`computation_shape` and `computation_stride` values should be powers of 2 for
optimal packing.
Args:
topology: A `Topology` object that describes the TPU cluster topology. To
obtain a TPU topology, evaluate the `Tensor` returned by
`initialize_system` using `Session.run`. Either a serialized
`TopologyProto` or a `Topology` object may be passed. Note: you must
evaluate the `Tensor` first; you cannot pass an unevaluated `Tensor`
here.
computation_shape: A rank 1 int32 numpy array with size equal to the
topology rank, describing the shape of the computation's block of cores.
If None, the `computation_shape` is `[1] * topology_rank`.
computation_stride: A rank 1 int32 numpy array of size `topology_rank`,
describing the inter-core spacing of the `computation_shape` cores in the
TPU topology. If None, the `computation_stride` is `[1] * topology_rank`.
num_replicas: The number of computation replicas to run. The replicas will
be packed into the free spaces of the topology.
device_order_mode: An enum of `DeviceOrderMode` class which indicates
whether to assign devices to form rings or meshes, or let the library to
choose.
Returns:
A DeviceAssignment object, which describes the mapping between the logical
cores in each computation replica and the physical cores in the TPU
topology.
Raises:
ValueError: If `topology` is not a valid `Topology` object.
ValueError: If `computation_shape` or `computation_stride` are not 1D int32
numpy arrays with shape [3] where all values are positive.
ValueError: If computation's replicas cannot fit into the TPU topology.
"""
# Deserialize the Topology proto, if it is a string.
if isinstance(topology, bytes):
topology = Topology(serialized=topology)
if not isinstance(topology, Topology):
raise ValueError("`topology` is not a Topology object; got {}".format(
type(topology)))
topology_rank = len(topology.mesh_shape)
mesh_shape = topology.mesh_shape
if computation_shape is None:
computation_shape = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_shape = np.asarray(computation_shape, dtype=np.int32)
if computation_stride is None:
computation_stride = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_stride = np.asarray(computation_stride, dtype=np.int32)
if computation_shape.shape != (topology_rank,):
raise ValueError("computation_shape must have shape [{}]; got {}".format(
topology_rank, computation_shape.shape))
if computation_stride.shape != (topology_rank,):
raise ValueError("computation_stride must have shape [{}]; got {}".format(
topology_rank, computation_stride.shape))
if any(computation_shape < 1):
raise ValueError(
"computation_shape must be positive; got computation_shape={}".format(
computation_shape))
if any(computation_stride < 1):
raise ValueError(
"computation_stride must be positive; got computation_stride={}".format(
computation_stride))
# Computes the physical size of one computation instance.
computation_footprint = computation_shape * computation_stride
if any(computation_footprint > mesh_shape):
raise ValueError(
"computation footprint {} does not fit in TPU topology shape {}".format(
computation_footprint, mesh_shape))
# Computes how many copies of the computation footprint fit in the mesh.
block_counts = mesh_shape // computation_footprint
replica_counts = block_counts * computation_stride
max_replicas = np.prod(replica_counts)
if num_replicas > max_replicas:
raise ValueError(
"requested {} replicas but only {} replicas with shape {} and "
"computation_stride {} fit in a TPU mesh of shape {}".format(
num_replicas, max_replicas, computation_shape, computation_stride,
mesh_shape))
def ceil_of_ratio(n, m):
return (n + m - 1) // m
if topology.missing_devices.size == 0:
replica_shape = [0] * topology_rank
if num_replicas > 0:
remaining_replicas = num_replicas
remaining_dims = topology_rank
# Choose dimensions as close to an equal cube as possible,
# in order of increasing dimension size. By visiting dimensions
# in increasing size, we assign the most constrained dimension
# first, so we won't make infeasible choices.
#
# As a secondary sort order, visit the last dimension (core index) first,
# then the other dimensions in increasing order. This means we try to use
# both cores on the same chip in preference to two cores on different
# chips. We visit the x dimension first, and the z dimension last, so
# that we prefer to arrange adjacent replicas on the same machine when
# possible.
#
# For example, if num_replicas == 4, we prefer to use a replica_shape of
# (2,1,1,2) over (1,1,2,2).
for x, ni in sorted(((x, ((i + 1) % topology_rank))
for (i, x) in enumerate(replica_counts))):
i = (ni + topology_rank - 1) % topology_rank
target_size = int(math.ceil(remaining_replicas**(1.0 / remaining_dims)))
replica_shape[i] = min(target_size, x)
remaining_replicas = ceil_of_ratio(remaining_replicas, replica_shape[i])
remaining_dims -= 1
assert remaining_replicas == 1 and remaining_dims == 0
# Assigns an offset to each replica such that no two replicas overlap.
replica_offsets = np.full([num_replicas, topology_rank], -1, dtype=np.int32)
enable_3d_tiling = (
topology_rank == 4 and
computation_shape[-1] == mesh_shape[-1] # Only handle 3D case.
and np.prod(computation_stride) == 1 # Ensure no stride.
and num_replicas == max_replicas) # Full replication.
if device_order_mode != DeviceOrderMode.AUTO:
if device_order_mode == DeviceOrderMode.RING and not enable_3d_tiling:
raise ValueError("cannot assign ring order in the given topology")
enable_3d_tiling = device_order_mode == DeviceOrderMode.RING
if enable_3d_tiling:
assignment = []
inner_ring = _ring_3d(computation_shape[0], computation_shape[1],
computation_shape[2])
outer_ring = _ring_3d(replica_shape[0], replica_shape[1],
replica_shape[2])
for replica in xrange(num_replicas):
outer_x, outer_y, outer_z = outer_ring[replica]
per_replica_assignment = []
for index in xrange(np.prod(computation_shape)):
inner_x, inner_y, inner_z = inner_ring[index // mesh_shape[-1]]
px = outer_x * computation_shape[0] + inner_x
py = outer_y * computation_shape[1] + inner_y
pz = outer_z * computation_shape[2] + inner_z
pi = index % mesh_shape[-1]
per_replica_assignment.append([px, py, pz, pi])
assignment.append(per_replica_assignment)
else:
for replica in xrange(num_replicas):
# Chooses a replica number in each axis.
t = replica
pos = []
# Visit the core number first.
for dim in np.concatenate([[replica_shape[-1]], replica_shape[:-1]]):
pos.append(t % dim)
t //= dim
replica_pos = np.concatenate([pos[1:], [pos[0]]])
# Determines where that replica starts in each axis.
outer = replica_pos // computation_stride
inner = replica_pos % computation_stride
replica_offsets[replica, :] = outer * computation_footprint + inner
# Computes a logical core -> physical core mapping for each replica.
indices = [
np.arange(0, computation_shape[i] * computation_stride[i],
computation_stride[i]) for i in range(topology_rank)
]
indices = np.concatenate(
[i[..., np.newaxis] for i in np.meshgrid(*indices, indexing="ij")],
axis=-1)
indices = indices.reshape((-1, topology_rank))
assignment = indices + replica_offsets[:, np.newaxis, :]
else:
# We have a slice with missing chips. We define a simple assignment by
# ignoring computation stride. This assignment should enable a consistent
# and correct device assignment on degraded slices. It is optimal when
# weights are not sharded. But this device assignment may be sub-optimal for
# other model parallelism scenarios.
assert np.prod(computation_stride) == 1
# Next, we check if we have sufficient devices.
assert num_replicas * np.prod(
computation_shape) <= topology.num_tasks * topology.num_tpus_per_task
# Map replicas to physical devices in task order.
device_coordinates = topology.device_coordinates
assignment = []
devices_per_replica = np.prod(computation_shape)
for rindex in xrange(num_replicas):
replica_assignment = []
for index in xrange(devices_per_replica):
logical_id = rindex * devices_per_replica + index
# Pick logical cores in task order
task = logical_id // topology.num_tpus_per_task
device = logical_id % topology.num_tpus_per_task
# Append physical cores to the replica assignment
replica_assignment.append(device_coordinates[task, device, :])
assignment.append(replica_assignment)
return DeviceAssignment(topology, core_assignment=assignment)
| sarvex/tensorflow | tensorflow/python/tpu/device_assignment.py | Python | apache-2.0 | 21,430 | [
"VisIt"
] | 2fec1395eadfe6feb7b7d41621f846bc55f322455e2ebf794f36905bf4083da1 |
#!/usr/bin/env python
"""
Extract alignment statistics from a SAM/BAM file.
Adapted from the Celloline stats script
available at: https://github.com/Teichlab/celloline/blob/master/lib/stats.py
"""
import os
import sys
import re
import argparse
import pysam
import logging
import cPickle as pickle
from collections import Counter, defaultdict, OrderedDict
from intervaltree import IntervalTree
from joblib import Parallel, delayed
#LOAD GTF FILE
def load_gtf(gtf_path):
"""
Load a GTF annotation and create an index using IntervalTrees.
Args:
gtf_path: Path to the GTF file to load.
Returns:
Dictionary containing IntervalTree indexes of the annotation.
"""
gtf_index = defaultdict()
with open(gtf_path) as gtf_file:
for line in gtf_file:
if not line.startswith("#"):
entry = line.split("\t")
entry_addition = entry[8]
entry_addition = entry_addition.split(";")
entry_addition = entry_addition[0].split(" ")
gene_id = entry_addition[1]
feature = entry[2]
#TYPE(Gene, exon etc.), START, END, STRAND, gene_ID
info = [feature, entry[3], entry[4], entry[6], gene_id]
#Build GTF INDEX
if feature != "" and entry[3] != entry[4]:
if entry[0] in gtf_index:
index = gtf_index[entry[0]]
else:
index = IntervalTree()
index.addi(int(info[1]), int(info[2]), info)
gtf_index[entry[0]] = index
return gtf_index
def gen_stats(input_file, input_type, sample_name, gtf_dict):
"""
Generate alignment stats from a SAM/BAM file.
Loop over alignments in a SAM/BAM file and extract statistics such as the
numer of reads aligned to introns, exons, intergenic regions etc.
Args:
input_file: An open BAM or SAM file.
input_type: Whether the file is 'bam' or 'sam'.
sample_name: A name relating to this file.
gtf_dict: Dictionary containing GTF index.
Returns:
Dictionary containing alignment statistics.
"""
logger = logging.getLogger("stats." + sample_name[0:10])
#OUTPUT TABLE CONTAING STATS
output_table = OrderedDict()
#Dict indicating to which genes a specific read maps to
#It is a temporary dict
exonic_mappings_temp = defaultdict(str)
#Dict indicating which read is multi-mapped
#It is a temporary dict
exonic_multi_table = defaultdict(str)
# Sample
output_table["sample"] = sample_name
#MAPPABILITY
output_table["total"] = 0
output_table["mapped"] = 0
output_table["unmapped"] = 0
output_table["unique"] = 0
output_table["multi"] = 0
#CODING VERSUS NON-CODING REGIONS
output_table["intergenic"] = 0
output_table["intragenic"] = 0
output_table["exonic"] = 0
output_table["intronic"] = 0
output_table["ambigious"] = 0
#CODING REGIONS MAPPABILITY
output_table["exonicU"] = 0
output_table["exonicM"] = 0
#ALIGNMENT CODING VS NONCODING
output_table["alignments"] = 0
output_table["multi-intergenic"] = 0
output_table["multi-intragenic"] = 0
output_table["multi-exonic"] = 0
output_table["multi-intronic"] = 0
output_table["multi-ambigious"] = 0
#ERROR
output_table["perfect"] = 0
output_table["partly_perfect"] = 0
output_table["mapped_no_correct"] = 0
for i in range(0, 10):
output_table["S_" + str(i)] = 0
output_table["S_10+"] = 0
output_table["I"] = 0
output_table["D"] = 0
output_table["INDEL"] = 0
reads = Counter()
if input_type == "bam":
ref_map = input_file.references
input_file = input_file.fetch(until_eof=True)
line_count = 0
for line in input_file:
line_count += 1
if input_type == "bam": # BAM input line
split = str(line).split("\t")
split[2] = ref_map[int(split[2])]
split[3] = int(split[3]) + 1
elif not line.startswith("@"): # SAM input line
split = line.split("\t")
else:
continue
read_name = split[0]
flag_code = int(split[1])
chrom = split[2]
pos = split[3]
errors = split[5]
errors_a = list(errors)
number = ""
num = 0
error_table = defaultdict(int)
name_and_flag = read_name
#CHECK IF READ MAPPED OR UNMAPPED
#IT US UNMAPPED
if flag_code & 0x0004 != 0:
output_table["unmapped"] += 1
output_table["total"] += 1
error_table["*"] += 1
#IT IS MAPPED
else:
if flag_code & 0x0001 != 0: #This is paired end
if flag_code & 0x0040 != 0: #1st read
name_and_flag += ";first"
if flag_code & 0x0080 != 0: #2nd read
name_and_flag += ";second"
# CHECK TO WHICH GENE(S) IT MAPPED TO
genes_info, num_genes, num_exons = get_gene(gtf_dict, [chrom, pos])
output_table["alignments"] += 1.0
#STATS
if name_and_flag not in reads:
reads[name_and_flag] += 1
output_table["unique"] += 1
output_table["total"] += 1
output_table["mapped"] += 1
if num_genes == 0:
output_table["intergenic"] += 1
elif num_genes == 1:
output_table["intragenic"] += 1
if num_exons == 0:
output_table["intronic"] += 1
else:
output_table["exonic"] += 1
output_table["exonicU"] += 1
exons = []
if name_and_flag in exonic_mappings_temp:
exons = exonic_mappings_temp[name_and_flag]
exons.append([genes_info[0], chrom, pos])
exonic_mappings_temp[name_and_flag] = exons
elif num_genes > 1:
output_table["ambigious"] += 1
#READ IS MULTI-MAPPED
else:
if reads[name_and_flag] == 1:
output_table["unique"] -= 1
output_table["exonicU"] -= 1
output_table["multi"] += 1
reads[name_and_flag] += 1
exons = []
#GET KNOWLEDGE IF FIRST MAPPING EXONIC OR INTRONIC
if name_and_flag in exonic_mappings_temp:
exons = exonic_mappings_temp[name_and_flag]
if num_genes == 0:
output_table["multi-intergenic"] += (1)
elif num_genes == 1:
output_table["multi-intragenic"] += (1)
if num_exons == 0:
output_table["multi-intronic"] += (1)
else:
output_table["multi-exonic"] += (1)
exons.append([genes_info[0], chrom, pos])
elif num_genes > 1:
output_table["multi-ambigious"] += (1)
#IF AT LEAST ONE EXONIC ALIGNMENT
if len(exons) > 0:
exonic_multi_table[name_and_flag] = exons
#PARSE MAPPING ERRORS
for i in errors_a:
if re.match("[0-9]", i):
number += (i)
elif re.match("[A-Z]", i):
num = int(number)
error_table[i] += num
number = ""
#TABLE OF HOW MANY READS MAP PERFECT, PARTLY PERFECT ETC
if "M" in error_table and len(error_table) == 1:
output_table["perfect"] += 1
elif "M" in error_table and len(error_table) > 1:
output_table["partly_perfect"] += 1
elif "M" not in error_table and "*" not in error_table:
output_table["mapped_no_correct"] += 1
if "S" in error_table:
if int(error_table["S"]) < 10:
output_table["S_" + str(error_table["S"])] += 1
else:
output_table["S_10+"] += 1
elif "S" not in error_table:
output_table["S_0"] += 1
if "I" in error_table:
output_table["I"] += 1
if "D" in error_table:
output_table["D"] += 1
if "I" in error_table or "D" in error_table:
output_table["INDEL"] += 1
if (line_count % 1000000) == 0:
logger.debug(sample_name + " line " + str(line_count) + "...")
output_table["exonicM"] = len(exonic_multi_table.keys())
return output_table
def get_stats_line(stats_table):
"""
Get an output line from a stats table.
Args:
stats_table: Dictionary of alignment statistics.
Returns:
String representing the results for one file.
"""
logger = logging.getLogger("stats.extract")
out_line = ""
for stat, value in stats_table.iteritems():
if stat in ["unique", "multi", "intragenic", "intergenic",
"exonic", "intronic", "ambigious", "exonicM", "exonicU"]:
value = (value + 0.0) / (stats_table["mapped"] + 0.0)
value = "%.2f" % (100.0 * (value))
elif stat in ["multi-intragenic", "multi-intergenic", "multi-exonic",
"multi-intronic", "multi-ambigious"]:
value = (value + 0.0)
if stats_table["alignments"] != 0:
value = value / (stats_table["alignments"] + 0.0)
value = "%.2f" % (100.0 * (value))
value = str(value)
if not stat == "sample":
out_line += "," + value
else:
out_line += value
logger.debug(stat + " : " + value)
out_line += "\n"
return out_line
def write_stats(output_path, stats_list):
"""
Write a series of results to a file.
Args:
output_path: Path to write results to.
stats_list: List of dictionaries containing results from input files.
"""
cols = stats_list[0].keys()
with open(output_path, "w") as out_file:
out_file.write(",".join(cols) + "\n")
for stats_table in stats_list:
stats_line = get_stats_line(stats_table)
out_file.write(stats_line)
def get_gene(gtf_dict, pos_pair):
"""
Identify which genes overlap a given position.
Args:
gtf_dict: Dictionary containing GTF index.
pos_pair: Tuple containing genomic position (chrom, pos).
Returns:
Tuple containing the list of overlapping genes, the number of
overlapping genes and the number of overlapping exons.
"""
num_genes = 0
num_exons = 0
if pos_pair[0] not in gtf_dict:
#print ("Ignored pos: " + pos_pair[0])
return ([], num_genes, num_exons)
entries = gtf_dict[pos_pair[0]]
pos = int(pos_pair[1])
found = []
found = entries.search(pos)
gene_list = []
for entry in found:
info = entry[2]
if info[0] == "gene":
gene_list.append(info)
num_genes += 1
elif info[0] == "exon":
num_exons += 1
return (gene_list, num_genes, num_exons)
def process_file(input_file, input_type, index, is_parallel):
"""
Process an individual SAM/BAM file.
How we want to process the file depends on the input type and whether we
are operating in parallel. If in parallel the index must be loaded for each
input file. If the input is a BAM file it needs to be read using Pysam, if
SAM it can be read directly as a text file.
Args:
input_file: Path to the input file.
input_type: Whether the file is 'bam' or 'sam'.
index: If operating in parallel a string to the index file, if not the
loaded GTF index dictionary.
is_parallel: Whether to operate in parallel.
Returns:
Dictionary containing alignment statistics for the input file.
"""
sample_name = input_file.split("/")[-1]
logger = logging.getLogger("stats." + sample_name[0:10])
logger.info("Processing " + sample_name + "...")
if is_parallel:
logger.info("Loading index...")
with open(index, "rb") as index_file:
loaded_index = pickle.load(index_file)
logger.info("Loaded.")
else:
loaded_index = index
if input_type == "sam":
logger.info("Parsing SAM file...")
with open(input_file) as sam:
output_table = gen_stats(sam, input_type, sample_name, loaded_index)
elif input_type == "bam":
logger.info("Parsing BAM file...")
bam = pysam.AlignmentFile(input_file, "rb")
output_table = gen_stats(bam, input_type, sample_name, loaded_index)
logger.info("Finished " + sample_name)
return output_table
def get_index(args):
"""
Load a GTF index if available or create from GTF file if not found.
If a valid path to an index file is given that file will be loaded. If no
index file was specified or the file does not exist the annotation will be
read from a GTF file. It will then be pickled if an index file is specified.
When running in parallel the path to the index file is returned rather than
the index dictionary itself.
Args:
args: Options from the command line.
Returns:
Dictionary containing GTF index or path to index file if in parallel.
"""
logger = logging.getLogger("stats.index")
if args.index and os.path.isfile(args.index):
logger.info("Index found at " + args.index)
if not args.is_parallel:
logger.info("Loading index...")
with open(args.index, "rb") as index_file:
index = pickle.load(index_file)
logger.info("Loaded.")
else:
index = args.index
elif args.gtf and os.path.isfile(args.gtf):
logger.info("No index file found.")
logger.info("Loading GTF file...")
gtf_dict = load_gtf(args.gtf)
logger.info("Loaded.")
if args.index:
logger.info("Saving index to " + args.index + "...")
with open(args.index, "wb") as index_file:
pickle.dump(gtf_dict, index_file, -1)
logger.info("Saved.")
if not args.is_parallel:
index = gtf_dict
else:
index = args.index
return index
def get_args():
"""
Read arguments from the command line and check they are valid.
"""
logger = logging.getLogger("stats.args")
parser = argparse.ArgumentParser(
description="Extract alignment statistics from a SAM/BAM file")
parser.add_argument("inputs",
metavar="SAM/BAM",
nargs="+",
help="Input SAM or BAM files")
parser.add_argument("-o", "--out",
help="Output file",
required=True)
parser.add_argument("-g", "--gtf",
help="GTF annotation file")
parser.add_argument("-i", "--index",
help="""Annotation index file. Required when
operating in parallel.""")
parser.add_argument("-t", "--type",
choices=["sam", "bam"],
help="Type of input file",
required=True)
parser.add_argument("-p", "--parallel",
type=int,
default=1,
help="""Number of files to process in parallel.
Requires N + 1 threads if greater than 1.""")
args = parser.parse_args()
args.is_parallel = False
if args.parallel < 1:
logger.error("Number of parallel files must be positive")
sys.exit()
elif args.parallel > 1:
args.is_parallel = True
logger.info("Running with " + str(args.parallel) + " jobs")
if args.is_parallel and not args.index:
logger.error("Index file is required when running in parallel.")
sys.exit()
if not (args.index and os.path.isfile(args.index)):
if not (args.gtf and os.path.isfile(args.gtf)):
logger.error("No GTF or index file found.")
sys.exit()
return args
def setup_logging():
"""
Setup logging system.
Log is written to 'alignmentStats.log'.
"""
logger = logging.getLogger("stats")
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
file_handler = logging.FileHandler('alignmentStats.log')
file_handler.setLevel(logging.INFO)
# create console handler with a higher log level
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
format_str = "[%(asctime)s] %(levelname)s %(name)s: %(message)s"
formatter = logging.Formatter(format_str, "%Y-%m-%d %H:%M:%S")
file_handler.setFormatter(formatter)
format_str = "[%(asctime)s] %(message)s"
formatter = logging.Formatter(format_str, "%H:%M:%S")
console_handler.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(console_handler)
logger.addHandler(file_handler)
def main():
"""
Main function.
1. Setup logging
2. Get arguments
3. Get index
4. Process files
5. Write output
"""
setup_logging()
logger = logging.getLogger("stats." + __name__)
args = get_args()
index = get_index(args)
logger.warning("Positions not in annotation will be ignored.")
logger.info("Found " + str(len(args.inputs)) + " input file(s):")
for input_file in sorted(args.inputs):
logger.debug(input_file)
if args.is_parallel:
stats = Parallel(n_jobs=args.parallel,
verbose=100,
batch_size=1)(delayed(process_file)(input_file,
args.type,
index,
args.is_parallel)
for input_file in args.inputs)
else:
stats = []
for input_file in args.inputs:
output_table = process_file(input_file, args.type, index,
args.is_parallel)
stats.append(output_table)
write_stats(args.out, stats)
if __name__ == "__main__":
main()
| lazappi/binf-scripts | alignStats.py | Python | mit | 18,860 | [
"pysam"
] | 5597529dcdc22f4eddd06f1a030d701c7824f13e1f1df7bb38f5bf827ffe2ef6 |
# !/usr/local/bin/python3.4.2
# ----Copyright (c) 2016 Carnegie Hall | The MIT License (MIT)----
# ----For the full license terms, please visit https://github.com/CarnegieHall/linked-data/blob/master/LICENSE----
## Argument[0] is script to run
## Argument[1] is path to Venues CSV
## Argument[2] is path to entityDict
## Argument[3] is path to worksDict
## Argument[4] is path to Events CSV -- if more CSVs needed, add as additional args
import csv
import json
import datetime as dt
from pytz import timezone
import pytz
import rdflib
from rdflib import Graph, Literal, Namespace, URIRef
from rdflib.namespace import DCTERMS, RDF, RDFS, SKOS, XSD
from rdflib.plugins.serializers.nt import NTSerializer
import re
import os
import sys
utc = pytz.timezone("UTC")
eastern = timezone('US/Eastern')
class Event(object):
def __init__(self, date, time):
self.date = date
self.time = time
def create_event_dateTime(self):
dateTimeString = ' '.join([date, time])
isoDateTime = eastern.localize(
dt.datetime.strptime(dateTimeString,'%m/%d/%Y %I:%M %p')).isoformat()
return isoDateTime
venueDict = {}
eventDict = {}
gEvents = Graph()
gVenues = Graph()
chvenues = Namespace('http://data.carnegiehall.org/venues/')
chevents = Namespace('http://data.carnegiehall.org/events/')
dbo = Namespace('http://dbpedia.org/ontology/')
ecrm = Namespace('http://erlangen-crm.org/160714/')
geonames = Namespace('http://www.geonames.org/ontology#')
event = Namespace('http://purl.org/NET/c4dm/event.owl#')
mo = Namespace ('http://purl.org/ontology/mo/')
schema = Namespace('http://schema.org/')
ch = chvenues['96397']
geonamesCH = URIRef('http://sws.geonames.org/5111573/')
geonames_chCinema = URIRef('http://sws.geonames.org/7255414/')
gVenues.add( (ch, RDF.type, schema.EventVenue ) )
gVenues.add( (ch, RDF.type, dbo.ArchitecturalStructure ) )
gVenues.add( (ch, SKOS.exactMatch, geonamesCH))
gVenues.add( (ch, RDFS.label, Literal('Carnegie Hall', lang='en') ) )
filePath_1 = sys.argv[1]
filePath_2 = sys.argv[2]
filePath_3 = sys.argv[3]
with open(filePath_1, 'rU') as f1:
venues = csv.reader(f1, dialect='excel', delimiter=',', quotechar='"')
for row in venues:
venue_id = row[0]
venue_uri = chvenues[str(venue_id)]
venue_code = row[1]
venue_name = row[2]
venue_notes = row[3]
gVenues.add( (URIRef(venue_uri), RDF.type, schema.EventVenue) ) )
gVenues.add( (URIRef(venue_uri), RDF.type, dbo.Venue) ) )
gVenues.add( (URIRef(venue_uri), RDFS.label, Literal(venue_name, lang='en') ) )
gVenues.add( (URIRef(venue_uri), geonames.parentFeature, URIRef(ch) ) )
gVenues.add( (URIRef(ch), schema.containsPlace, URIRef(venue_uri) ) )
if venue_notes:
gVenues.add( (URIRef(venue_uri), RDFS.comment, Literal(venue_notes, lang='en') ) )
if venue_code == 'ISA':
oldName = 'Main Hall'
gVenues.add( (URIRef(venue_uri), geonames.historicalName, Literal(oldName, lang='en') ) )
if venue_code == 'ZH':
oldName = 'Carnegie Hall Cinema'
gVenues.add( (URIRef(venue_uri), geonames.historicalName, Literal(oldName, lang='en') ) )
if venue_code == 'CHPL':
oldName = 'Carnegie Lyceum'
gVenues.add( (URIRef(venue_uri), geonames.historicalName, Literal(oldName, lang='en') ) )
if venue_code == 'CL':
oldName = 'Recital Hall'
gVenues.add( (URIRef(venue_uri), geonames.historicalName, Literal(oldName, lang='en') ) )
if venue_code == 'WRH':
oldName = 'Carnegie Recital Hall'
gVenues.add( (URIRef(venue_uri), geonames.historicalName, Literal(oldName, lang='en') ) )
if venue_code == 'CRH':
oldName = 'Chamber Music Hall'
gVenues.add( (URIRef(venue_uri), geonames.historicalName, Literal(oldName, lang='en') ) )
if venue_code == 'CIN':
gVenues.add( (URIRef(venue_uri), SKOS.exactMatch, geonames_chCinema))
gVenues.add( (URIRef(venue_uri), geonames.parentFeature, ch ) )
gVenues.add( (URIRef(venue_uri), geonames.historicalName, Literal('Carnegie Hall Playhouse', lang='en') ) )
venueDict[str(venue_id)] = venue_uri
with open(filePath_2, 'rU') as f2:
entities = json.load(f2)
for item in sys.argv[4:]:
## Blank lists of various IDs to check for repeat rows while iterating each CSV
idList = []
work_idList = []
soloist_idList = []
programList = []
work_perfDict = {}
with open(item, 'rU') as csvfile:
events = csv.reader(csvfile, dialect='excel', delimiter=',', quotechar='"')
for row in events:
event_id = row[0]
date = row[1].lstrip('0')
time = row[2]
venue_id = row[3]
venue_uri = venueDict[str(venue_id)]
event_uri = chevents[str(event_id)]
event_title = row[4]
orchestra_id = row[5]
orchestra_uri = ''
if orchestra_id != '0':
orchestra_uri = entities[str(orchestra_id)]['uri']
conductor_id = row[6]
conductor_uri = ''
if conductor_id != '0':
conductor_uri = entities[str(conductor_id)]['uri']
work_id = row[7]
soloist_id = row[8]
work_order = row[9]
new_event = Event(date, time)
isoDateTime = new_event.create_event_dateTime()
if event_id not in idList:
idList.append(event_id)
gEvents.add( (URIRef(event_uri), RDF.type, event.Event) )
gEvents.add( (URIRef(event_uri), RDF.type, ecrm.E7_Activity) )
gEvents.add( (URIRef(event_uri), RDF.type, schema.Event) )
gEvents.add( (URIRef(event_uri), RDFS.label, Literal(event_title, lang='en') ) )
gEvents.add( (URIRef(event_uri), DCTERMS.date, Literal(isoDateTime, datatype=XSD.dateTime) ) )
gEvents.add( (URIRef(event_uri), event.place, URIRef(venue_uri)) )
if conductor_uri:
gEvents.add( (URIRef(event_uri), mo.conductor, URIRef(conductor_uri)) )
if orchestra_uri:
gEvents.add( (URIRef(event_uri), mo.performer, URIRef(orchestra_uri)) )
work_idList = []
work_idList.append(work_id)
soloist_idList = []
soloist_idList.append(soloist_id)
work_perfDict = {}
work_perfDict['soloists'] = soloist_idList
work_perfDict['order'] = work_order
eventDict[str(event_id)] = {}
eventDict[str(event_id)]['isoDateTime'] = isoDateTime
eventDict[str(event_id)]['venue id'] = venue_id
eventDict[str(event_id)]['title'] = event_title
eventDict[str(event_id)]['orchestra id'] = orchestra_id
eventDict[str(event_id)]['conductor id'] = conductor_id
eventDict[str(event_id)][str(work_id)] = work_perfDict
eventDict[str(event_id)]['uri'] = event_uri
else:
if work_id not in work_idList:
work_idList.append(work_id)
soloist_idList = []
soloist_idList.append(soloist_id)
work_perfDict = {}
work_perfDict['soloists'] = soloist_idList
work_perfDict['order'] = work_order
eventDict[str(event_id)][str(work_id)] = work_perfDict
else:
eventDict[str(event_id)][str(work_id)]['soloists'].append(soloist_id)
# Each work on an event is represented by a subdict containing its program order and list of soloists
# These workDicts are in turn represented by a key (key=work_id) in the subdict for that event
# Works are related via the work_performance URI>event.Product>work URI
# Solists are related via work_performance URI>mo.performer>entity URI
# Create list of work IDs for 'No program' and 'Soloists not assigned'
placeHolders = ['3319', '10862']
with open(filePath_3, 'rU') as f3:
works = json.load(f3)
for key in eventDict:
event_uri = eventDict[key]['uri']
for item in eventDict[key].keys():
if (item.isdigit() and item != '0'):
programOrder = eventDict[key][str(item)]['order']
work_performance = URIRef(value=''.join([event_uri, '/work_', str(programOrder).zfill(2)]))
soloists = eventDict[key][str(item)]['soloists']
work_uri = works[str(item)]['uri']
if item not in placeHolders:
gEvents.add( (URIRef(event_uri), event.product, URIRef(work_performance)) )
gEvents.add( (work_performance, RDF.type, event.Product) )
gEvents.add( (work_performance, RDF.type, schema.subEvent) )
gEvents.add( (work_performance, event.product, URIRef(work_uri)) )
if '0' not in soloists:
for soloist in soloists:
soloist_uri = entities[str(soloist)]['uri']
gEvents.add( (work_performance, mo.performer, URIRef(soloist_uri)) )
else:
if '0' not in soloists:
for soloist in soloists:
soloist_uri = entities[str(soloist)]['uri']
gEvents.add( (URIRef(event_uri), mo.performer, URIRef(soloist_uri)) )
event_dict_path = os.path.join(os.path.dirname(__file__), os.pardir, 'JSON_dicts', 'eventDict.json')
event_graph_path = os.path.join(os.path.dirname(__file__), os.pardir, 'Graphs', 'eventGraph.nt')
venue_dict_path = os.path.join(os.path.dirname(__file__), os.pardir, 'JSON_dicts', 'venueDict.json')
venue_graph_path = os.path.join(os.path.dirname(__file__), os.pardir, 'Graphs', 'venueGraph.nt')
gVenues.bind("geonames", geonames)
gVenues.bind("skos", SKOS)
gVenues.bind("rdfs", RDFS)
gEvents.bind("chevents", chevents)
gEvents.bind("chvenues", chvenues)
gEvents.bind("dcterms", DCTERMS)
gEvents.bind("event", event)
gEvents.bind("mo", mo)
gEvents.bind("rdf", RDF)
gEvents.bind("rdfs", RDFS)
gVenues = gVenues.serialize(destination=venue_graph_path, format='nt')
gEvents = gEvents.serialize(destination=event_graph_path, format='nt')
with open(venue_dict_path, 'w') as f5:
json.dump(venueDict, f5)
with open(event_dict_path, 'w') as f6:
json.dump(eventDict, f6)
print("Finished with Venues and Events")
| CarnegieHall/linked-data | scripts/OPAS_toRDF_events.py | Python | mit | 11,216 | [
"VisIt"
] | 2bc4ddf7a344f1cb5014c81ef7c6e216d3b4012b5266f265e26db960ae100abd |
#!/usr/bin/env python
# encoding: utf-8
'''
Created by Brian Cherinka on 2016-03-28 23:30:14
Licensed under a 3-clause BSD license.
Revision History:
Initial Version: 2016-03-28 23:30:14 by Brian Cherinka
Last Modified On: 2016-03-28 23:30:14 by Brian
'''
from __future__ import print_function
from __future__ import division
from brain.db.modelGraph import ModelGraph
from marvin import config, log
import inspect
__author__ = 'Brian Cherinka'
class MarvinDB(object):
''' Class designed to handle database related things with Marvin '''
def __init__(self, *args, **kwargs):
self.dbtype = kwargs.get('dbtype', None)
self.db = None
self.log = kwargs.get('log', None)
self.error = []
self.__init_the_db()
def __init_the_db(self):
''' Initialize the db '''
if self.dbtype:
self._setupDB()
if self.db:
self._importModels()
else:
self.datadb = None
self.sampledb = None
self.dapdb = None
self._setSession()
self.testDbConnection()
self._setModelGraph()
self.cache_bits = []
if self.db:
self._addCache()
def _setupDB(self):
''' Try to import the database '''
try:
from marvin.db.database import db
except RuntimeError as e:
log.debug('RuntimeError raised: Problem importing db: {0}'.format(e))
self.db = None
except ImportError as e:
log.debug('ImportError raised: Problem importing db: {0}'.format(e))
self.db = None
else:
self.db = db
def _importModels(self):
''' Try to import the sql alchemy model classes '''
try:
import marvin.db.models.SampleModelClasses as sampledb
except Exception as e:
log.debug('Exception raised: Problem importing mangadb SampleModelClasses: {0}'.format(e))
self.sampledb = None
else:
self.sampledb = sampledb
try:
import marvin.db.models.DataModelClasses as datadb
except Exception as e:
log.debug('Exception raised: Problem importing mangadb DataModelClasses: {0}'.format(e))
self.datadb = None
else:
self.datadb = datadb
try:
import marvin.db.models.DapModelClasses as dapdb
except Exception as e:
log.debug('Exception raised: Problem importing mangadb DapModelClasses: {0}'.format(e))
self.dapdb = None
self.spaxelpropdict = None
else:
self.dapdb = dapdb
self.spaxelpropdict = self._setSpaxelPropDict()
def _setSpaxelPropDict(self):
''' Set the SpaxelProp lookup dictionary '''
newmpls = [m for m in config._mpldict.keys() if m > 'MPL-4']
spdict = {'MPL-4': 'SpaxelProp'}
newdict = {mpl: 'SpaxelProp{0}'.format(mpl.split('-')[1]) for mpl in newmpls}
spdict.update(newdict)
return spdict
def _getSpaxelProp(self):
''' Get the correct SpaxelProp class given an MPL '''
return {'full': self.spaxelpropdict[self._release], 'clean':
'Clean{0}'.format(self.spaxelpropdict[self._release])}
def _setSession(self):
''' Sets the database session '''
self.session = self.db.Session() if self.db else None
def testDbConnection(self):
''' Test the database connection to ensure it works. Sets a boolean variable isdbconnected '''
if self.db and self.datadb:
try:
tmp = self.session.query(self.datadb.PipelineVersion).first()
except Exception as e:
self.isdbconnected = False
self.error.append('Error connecting to manga database: {0}'.format(str(e)))
else:
self.isdbconnected = True
else:
self.isdbconnected = False
def forceDbOff(self):
''' Force the database to turn off '''
self.db = None
self.session = None
self.isdbconnected = False
self.datadb = None
self.dapdb = None
self.sampledb = None
def forceDbOn(self, dbtype=None):
''' Force the database to turn on '''
self.__init_the_db()
def generateClassDict(self, module=None, lower=None):
''' Generates a dictionary of the Model Classes, based on class name as key, to the object class.
Selects only those classes in the module with attribute __tablename__
lower = True makes class name key all lowercase
'''
if not module:
module = self.datadb
classdict = {}
for model in inspect.getmembers(module, inspect.isclass):
keyname = model[0].lower() if lower else model[0]
if hasattr(model[1], '__tablename__'):
# only include the spaxelprop table matching the MPL version
if 'SpaxelProp' in keyname:
if keyname in self._getSpaxelProp().values():
classdict[keyname] = model[1]
else:
classdict[keyname] = model[1]
return classdict
def buildUberClassDict(self, **kwargs):
''' Builds an uber class dictionary from all modelclasses '''
self._release = kwargs.get('release', config.release)
classdict = {}
models = [self.datadb, self.sampledb, self.dapdb]
for model in models:
if model:
modelclasses = self.generateClassDict(module=model)
classdict.update(modelclasses)
return classdict
def _setModelGraph(self):
''' Initiates the ModelGraph using all available ModelClasses '''
models = list(filter(None, [self.datadb, self.sampledb, self.dapdb]))
if models:
self.modelgraph = ModelGraph(models)
else:
self.modelgraph = None
def _addCache(self):
''' Initialize dogpile caching for relationships
Caching options. A set of three RelationshipCache options
which can be applied to Query(), causing the "lazy load"
of these attributes to be loaded from cache.
'''
if self.datadb:
self.cache_bits.append(self.datadb.data_cache)
if self.sampledb:
self.cache_bits.append(self.sampledb.sample_cache)
if self.dapdb:
self.cache_bits.append(self.dapdb.dap_cache)
| bretthandrews/marvin | python/marvin/db/marvindb.py | Python | bsd-3-clause | 6,528 | [
"Brian"
] | 84d7f8da490aa7ec7d7022da848ebc05a6c032cfbe5dc32a5fa0f4e4557b3a2d |
"""
Converting the 'parse-tree' output of pyparsing to a SPARQL Algebra expression
http://www.w3.org/TR/sparql11-query/#sparqlQuery
"""
import functools
import operator
import collections
from rdflib import Literal, Variable, URIRef, BNode
from rdflib.plugins.sparql.sparql import Prologue, Query
from rdflib.plugins.sparql.parserutils import CompValue, Expr
from rdflib.plugins.sparql.operators import (
and_, TrueFilter, simplify as simplifyFilters)
from rdflib.paths import (
InvPath, AlternativePath, SequencePath, MulPath, NegatedPath)
from pyparsing import ParseResults
# ---------------------------
# Some convenience methods
def OrderBy(p, expr):
return CompValue('OrderBy', p=p, expr=expr)
def ToMultiSet(p):
return CompValue('ToMultiSet', p=p)
def Union(p1, p2):
return CompValue('Union', p1=p1, p2=p2)
def Join(p1, p2):
return CompValue('Join', p1=p1, p2=p2)
def Minus(p1, p2):
return CompValue('Minus', p1=p1, p2=p2)
def Graph(term, graph):
return CompValue('Graph', term=term, p=graph)
def BGP(triples=None):
return CompValue('BGP', triples=triples or [])
def LeftJoin(p1, p2, expr):
return CompValue('LeftJoin', p1=p1, p2=p2, expr=expr)
def Filter(expr, p):
return CompValue('Filter', expr=expr, p=p)
def Extend(p, expr, var):
return CompValue('Extend', p=p, expr=expr, var=var)
def Project(p, PV):
return CompValue('Project', p=p, PV=PV)
def Group(p, expr=None):
return CompValue('Group', p=p, expr=expr)
def _knownTerms(triple, varsknown, varscount):
return (len(filter(None, (x not in varsknown and
isinstance(
x, (Variable, BNode)) for x in triple))),
-sum(varscount.get(x, 0) for x in triple),
not isinstance(triple[2], Literal),
)
def reorderTriples(l):
"""
Reorder triple patterns so that we execute the
ones with most bindings first
"""
def _addvar(term, varsknown):
if isinstance(term, (Variable, BNode)):
varsknown.add(term)
l = [(None, x) for x in l]
varsknown = set()
varscount = collections.defaultdict(int)
for t in l:
for c in t[1]:
if isinstance(c, (Variable, BNode)):
varscount[c] += 1
i = 0
# Done in steps, sort by number of bound terms
# the top block of patterns with the most bound terms is kept
# the rest is resorted based on the vars bound after the first
# block is evaluated
# we sort by decorate/undecorate, since we need the value of the sort keys
while i < len(l):
l[i:] = sorted((_knownTerms(x[
1], varsknown, varscount), x[1]) for x in l[i:])
t = l[i][0][0] # top block has this many terms bound
j = 0
while i+j < len(l) and l[i+j][0][0] == t:
for c in l[i+j][1]:
_addvar(c, varsknown)
j += 1
i += 1
return [x[1] for x in l]
def triples(l):
l = reduce(lambda x, y: x + y, l)
if (len(l) % 3) != 0:
raise Exception('these aint triples')
return reorderTriples((l[x], l[x + 1], l[x + 2])
for x in range(0, len(l), 3))
def translatePName(p, prologue):
"""
Expand prefixed/relative URIs
"""
if isinstance(p, CompValue):
if p.name == 'pname':
return prologue.absolutize(p)
if p.name == 'literal':
return Literal(p.string, lang=p.lang,
datatype=prologue.absolutize(p.datatype))
elif isinstance(p, URIRef):
return prologue.absolutize(p)
def translatePath(p):
"""
Translate PropertyPath expressions
"""
if isinstance(p, CompValue):
if p.name == 'PathAlternative':
if len(p.part) == 1:
return p.part[0]
else:
return AlternativePath(*p.part)
elif p.name == 'PathSequence':
if len(p.part) == 1:
return p.part[0]
else:
return SequencePath(*p.part)
elif p.name == 'PathElt':
if not p.mod:
return p.part
else:
if isinstance(p.part, list):
if len(p.part) != 1:
raise Exception('Denkfehler!')
return MulPath(p.part[0], p.mod)
else:
return MulPath(p.part, p.mod)
elif p.name == 'PathEltOrInverse':
if isinstance(p.part, list):
if len(p.part) != 1:
raise Exception('Denkfehler!')
return InvPath(p.part[0])
else:
return InvPath(p.part)
elif p.name == 'PathNegatedPropertySet':
if isinstance(p.part, list):
return NegatedPath(AlternativePath(*p.part))
else:
return NegatedPath(p.part)
def translateExists(e):
"""
Translate the graph pattern used by EXISTS and NOT EXISTS
http://www.w3.org/TR/sparql11-query/#sparqlCollectFilters
"""
def _c(n):
if isinstance(n, CompValue):
if n.name in ('Builtin_EXISTS', 'Builtin_NOTEXISTS'):
n.graph = translateGroupGraphPattern(n.graph)
e = traverse(e, visitPost=_c)
return e
def collectAndRemoveFilters(parts):
"""
FILTER expressions apply to the whole group graph pattern in which
they appear.
http://www.w3.org/TR/sparql11-query/#sparqlCollectFilters
"""
filters = []
i = 0
while i < len(parts):
p = parts[i]
if p.name == 'Filter':
filters.append(translateExists(p.expr))
parts.pop(i)
else:
i += 1
if filters:
return and_(*filters)
return None
def translateGroupOrUnionGraphPattern(graphPattern):
A = None
for g in graphPattern.graph:
g = translateGroupGraphPattern(g)
if not A:
A = g
else:
A = Union(A, g)
return A
def translateGraphGraphPattern(graphPattern):
return Graph(graphPattern.term,
translateGroupGraphPattern(graphPattern.graph))
def translateInlineData(graphPattern):
return ToMultiSet(translateValues(graphPattern))
def translateGroupGraphPattern(graphPattern):
"""
http://www.w3.org/TR/sparql11-query/#convertGraphPattern
"""
if graphPattern.name == 'SubSelect':
return ToMultiSet(translate(graphPattern)[0])
if not graphPattern.part:
graphPattern.part = [] # empty { }
filters = collectAndRemoveFilters(graphPattern.part)
g = []
for p in graphPattern.part:
if p.name == 'TriplesBlock':
# merge adjacent TripleBlocks
if not (g and g[-1].name == 'BGP'):
g.append(BGP())
g[-1]["triples"] += triples(p.triples)
else:
g.append(p)
G = BGP()
for p in g:
if p.name == 'OptionalGraphPattern':
A = translateGroupGraphPattern(p.graph)
if A.name == 'Filter':
G = LeftJoin(G, A.p, A.expr)
else:
G = LeftJoin(G, A, TrueFilter)
elif p.name == 'MinusGraphPattern':
G = Minus(p1=G, p2=translateGroupGraphPattern(p.graph))
elif p.name == 'GroupOrUnionGraphPattern':
G = Join(p1=G, p2=translateGroupOrUnionGraphPattern(p))
elif p.name == 'GraphGraphPattern':
G = Join(p1=G, p2=translateGraphGraphPattern(p))
elif p.name == 'InlineData':
G = Join(p1=G, p2=translateInlineData(p))
elif p.name == 'ServiceGraphPattern':
G = Join(p1=G, p2=p)
elif p.name in ('BGP', 'Extend'):
G = Join(p1=G, p2=p)
elif p.name == 'Bind':
G = Extend(G, p.expr, p.var)
else:
raise Exception('Unknown part in GroupGraphPattern: %s - %s' %
(type(p), p.name))
if filters:
G = Filter(expr=filters, p=G)
return G
class StopTraversal(Exception):
def __init__(self, rv):
self.rv = rv
def _traverse(e, visitPre=lambda n: None, visitPost=lambda n: None):
"""
Traverse a parse-tree, visit each node
if visit functions return a value, replace current node
"""
_e = visitPre(e)
if _e is not None:
return _e
if e is None:
return None
if isinstance(e, (list, ParseResults)):
return [_traverse(x, visitPre, visitPost) for x in e]
elif isinstance(e, tuple):
return tuple([_traverse(x, visitPre, visitPost) for x in e])
elif isinstance(e, CompValue):
for k, val in e.iteritems():
e[k] = _traverse(val, visitPre, visitPost)
_e = visitPost(e)
if _e is not None:
return _e
return e
def _traverseAgg(e, visitor=lambda n, v: None):
"""
Traverse a parse-tree, visit each node
if visit functions return a value, replace current node
"""
res = []
if isinstance(e, (list, ParseResults, tuple)):
res = [_traverseAgg(x, visitor) for x in e]
elif isinstance(e, CompValue):
for k, val in e.iteritems():
if val != None:
res.append(_traverseAgg(val, visitor))
return visitor(e, res)
def traverse(
tree, visitPre=lambda n: None,
visitPost=lambda n: None, complete=None):
"""
Traverse tree, visit each node with visit function
visit function may raise StopTraversal to stop traversal
if complete!=None, it is returned on complete traversal,
otherwise the transformed tree is returned
"""
try:
r = _traverse(tree, visitPre, visitPost)
if complete is not None:
return complete
return r
except StopTraversal, st:
return st.rv
def _hasAggregate(x):
"""
Traverse parse(sub)Tree
return true if any aggregates are used
"""
if isinstance(x, CompValue):
if x.name.startswith('Aggregate_'):
raise StopTraversal(True)
def _aggs(e, A):
"""
Collect Aggregates in A
replaces aggregates with variable references
"""
# TODO: nested Aggregates?
if isinstance(e, CompValue) and e.name.startswith('Aggregate_'):
A.append(e)
aggvar = Variable('__agg_%d__' % len(A))
e["res"] = aggvar
return aggvar
def _findVars(x, res):
"""
Find all variables in a tree
"""
if isinstance(x, Variable):
res.add(x)
if isinstance(x, CompValue):
if x.name == "Bind":
res.add(x.var)
return x # stop recursion and finding vars in the expr
elif x.name == 'SubSelect':
if x.projection:
res.update(v.var or v.evar for v in x.projection)
return x
def _addVars(x, children):
# import pdb; pdb.set_trace()
if isinstance(x, Variable):
return set([x])
elif isinstance(x, CompValue):
x["_vars"] = set(reduce(operator.or_, children, set()))
if x.name == "Bind":
return set([x.var])
elif x.name == 'SubSelect':
if x.projection:
s = set(v.var or v.evar for v in x.projection)
else:
s = set()
return s
return reduce(operator.or_, children, set())
def _sample(e, v=None):
"""
For each unaggregated variable V in expr
Replace V with Sample(V)
"""
if isinstance(e, CompValue) and e.name.startswith("Aggregate_"):
return e # do not replace vars in aggregates
if isinstance(e, Variable) and v != e:
return CompValue('Aggregate_Sample', vars=e)
def _simplifyFilters(e):
if isinstance(e, Expr):
return simplifyFilters(e)
def translateAggregates(q, M):
E = []
A = []
# collect/replace aggs in :
# select expr as ?var
if q.projection:
for v in q.projection:
if v.evar:
v.expr = traverse(v.expr, functools.partial(_sample, v=v.evar))
v.expr = traverse(v.expr, functools.partial(_aggs, A=A))
# having clause
if traverse(q.having, _hasAggregate, complete=False):
q.having = traverse(q.having, _sample)
traverse(q.having, functools.partial(_aggs, A=A))
# order by
if traverse(q.orderby, _hasAggregate, complete=False):
q.orderby = traverse(q.orderby, _sample)
traverse(q.orderby, functools.partial(_aggs, A=A))
# sample all other select vars
# TODO: only allowed for vars in group-by?
if q.projection:
for v in q.projection:
if v.var:
rv = Variable('__agg_%d__' % (len(A) + 1))
A.append(CompValue('Aggregate_Sample', vars=v.var, res=rv))
E.append((rv, v.var))
return CompValue('AggregateJoin', A=A, p=M), E
def translateValues(v):
# if len(v.var)!=len(v.value):
# raise Exception("Unmatched vars and values in ValueClause: "+str(v))
res = []
if not v.var:
return res
if not v.value:
return res
if not isinstance(v.value[0], list):
for val in v.value:
res.append({v.var[0]: val})
else:
for vals in v.value:
res.append(dict(zip(v.var, vals)))
return CompValue('values', res=res)
def translate(q):
"""
http://www.w3.org/TR/sparql11-query/#convertSolMod
"""
_traverse(q, _simplifyFilters)
q.where = traverse(q.where, visitPost=translatePath)
# TODO: Var scope test
VS = set()
traverse(q.where, functools.partial(_findVars, res=VS))
# all query types have a where part
M = translateGroupGraphPattern(q.where)
aggregate = False
if q.groupby:
conditions = []
# convert "GROUP BY (?expr as ?var)" to an Extend
for c in q.groupby.condition:
if isinstance(c, CompValue) and c.name == 'GroupAs':
M = Extend(M, c.expr, c.var)
c = c.var
conditions.append(c)
M = Group(p=M, expr=conditions)
aggregate = True
elif traverse(q.having, _hasAggregate, complete=False) or \
traverse(q.orderby, _hasAggregate, complete=False) or \
any(traverse(x.expr, _hasAggregate, complete=False)
for x in q.projection or [] if x.evar):
# if any aggregate is used, implicit group by
M = Group(p=M)
aggregate = True
if aggregate:
M, E = translateAggregates(q, M)
else:
E = []
# HAVING
if q.having:
M = Filter(expr=and_(*q.having.condition), p=M)
# VALUES
if q.valuesClause:
M = Join(p1=M, p2=ToMultiSet(translateValues(q.valuesClause)))
if not q.projection:
# select *
PV = list(VS)
else:
PV = list()
for v in q.projection:
if v.var:
if v not in PV:
PV.append(v.var)
elif v.evar:
if v not in PV:
PV.append(v.evar)
E.append((v.expr, v.evar))
else:
raise Exception("I expected a var or evar here!")
for e, v in E:
M = Extend(M, e, v)
# ORDER BY
if q.orderby:
M = OrderBy(M, [CompValue('OrderCondition', expr=c.expr,
order=c.order) for c in q.orderby.condition])
# PROJECT
M = Project(M, PV)
if q.modifier:
if q.modifier == 'DISTINCT':
M = CompValue('Distinct', p=M)
elif q.modifier == 'REDUCED':
M = CompValue('Reduced', p=M)
if q.limitoffset:
offset = 0
if q.limitoffset.offset != None:
offset = q.limitoffset.offset.toPython()
if q.limitoffset.limit != None:
M = CompValue('Slice', p=M, start=offset,
length=q.limitoffset.limit.toPython())
else:
M = CompValue('Slice', p=M, start=offset)
return M, PV
def simplify(n):
"""Remove joins to empty BGPs"""
if isinstance(n, CompValue):
if n.name == 'Join':
if n.p1.name == 'BGP' and len(n.p1.triples) == 0:
return n.p2
if n.p2.name == 'BGP' and len(n.p2.triples) == 0:
return n.p1
elif n.name == 'BGP':
n["triples"] = reorderTriples(n.triples)
return n
def analyse(n, children):
if isinstance(n, CompValue):
if n.name == 'Join':
n["lazy"] = all(children)
return False
elif n.name in ('Slice', 'Distinct'):
return False
else:
return all(children)
else:
return True
def translatePrologue(p, base, initNs=None, prologue=None):
if prologue is None:
prologue = Prologue()
prologue.base = ""
if base:
prologue.base = base
if initNs:
for k, v in initNs.iteritems():
prologue.bind(k, v)
for x in p:
if x.name == 'Base':
prologue.base = x.iri
elif x.name == 'PrefixDecl':
prologue.bind(x.prefix, prologue.absolutize(x.iri))
return prologue
def translateQuads(quads):
if quads.triples:
alltriples = triples(quads.triples)
else:
alltriples = []
allquads = collections.defaultdict(list)
if quads.quadsNotTriples:
for q in quads.quadsNotTriples:
if q.triples:
allquads[q.term] += triples(q.triples)
return alltriples, allquads
def translateUpdate1(u, prologue):
if u.name in ('Load', 'Clear', 'Drop', 'Create'):
pass # no translation needed
elif u.name in ('Add', 'Move', 'Copy'):
pass
elif u.name in ('InsertData', 'DeleteData', 'DeleteWhere'):
t, q = translateQuads(u.quads)
u["quads"] = q
u["triples"] = t
if u.name in ('DeleteWhere', 'DeleteData'):
pass # TODO: check for bnodes in triples
elif u.name == 'Modify':
if u.delete:
u.delete["triples"], u.delete[
"quads"] = translateQuads(u.delete.quads)
if u.insert:
u.insert["triples"], u.insert[
"quads"] = translateQuads(u.insert.quads)
u["where"] = translateGroupGraphPattern(u.where)
else:
raise Exception('Unknown type of update operation: %s' % u)
u.prologue = prologue
return u
def translateUpdate(q, base=None, initNs=None):
"""
Returns a list of SPARQL Update Algebra expressions
"""
res = []
prologue = None
if not q.request:
return res
for p, u in zip(q.prologue, q.request):
prologue = translatePrologue(p, base, initNs, prologue)
# absolutize/resolve prefixes
u = traverse(
u, visitPost=functools.partial(translatePName, prologue=prologue))
u = _traverse(u, _simplifyFilters)
u = traverse(u, visitPost=translatePath)
res.append(translateUpdate1(u, prologue))
return res
def translateQuery(q, base=None, initNs=None):
"""
Translate a query-parsetree to a SPARQL Algebra Expression
Return a rdflib.plugins.sparql.sparql.Query object
"""
# We get in: (prologue, query)
prologue = translatePrologue(q[0], base, initNs)
# absolutize/resolve prefixes
q[1] = traverse(
q[1], visitPost=functools.partial(translatePName, prologue=prologue))
P, PV = translate(q[1])
datasetClause = q[1].datasetClause
if q[1].name == 'ConstructQuery':
template = triples(q[1].template) if q[1].template else None
res = CompValue(q[1].name, p=P,
template=template,
datasetClause=datasetClause)
else:
res = CompValue(q[1].name, p=P, datasetClause=datasetClause, PV=PV)
res = traverse(res, visitPost=simplify)
_traverseAgg(res, visitor=analyse)
_traverseAgg(res, _addVars)
return Query(prologue, res)
def pprintAlgebra(q):
def pp(p, ind=" "):
# if isinstance(p, list):
# print "[ "
# for x in p: pp(x,ind)
# print "%s ]"%ind
# return
if not isinstance(p, CompValue):
print p
return
print "%s(" % (p.name, )
for k in p:
print "%s%s =" % (ind, k,),
pp(p[k], ind + " ")
print "%s)" % ind
try:
pp(q.algebra)
except AttributeError:
# it's update, just a list
for x in q:
pp(x)
if __name__ == '__main__':
import sys
from . import parser
import os.path
if os.path.exists(sys.argv[1]):
q = file(sys.argv[1])
else:
q = sys.argv[1]
pq = parser.parseQuery(q)
print pq
tq = translateQuery(pq)
print pprintAlgebra(tq)
| gloaec/trifle | src/rdflib/plugins/sparql/algebra.py | Python | gpl-3.0 | 20,924 | [
"VisIt"
] | 6a727d3d32312b90aeaf9d2a3deae4583a04d91af0caa1a3101a623780eea9d1 |
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import espressomd._system as es
import espressomd
from espressomd import thermostat
from espressomd import code_info
import numpy
print("""
=======================================================
= lj_liquid_distribution.py =
=======================================================
Program Information:""")
print(code_info.features())
dev = "cpu"
# System parameters
#############################################################
# 10 000 Particles
box_l = 10.7437
density = 0.7
# Interaction parameters (repulsive Lennard Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 1.12246
lj_cap = 20
# Integration parameters
#############################################################
system = espressomd.System()
system.time_step = 0.01
system.cell_system.skin = 0.4
system.thermostat.set_langevin(kT=1.0, gamma=1.0)
# warmup integration (with capped LJ potential)
warm_steps = 100
warm_n_times = 30
# do the warmup until the particles have at least the distance min__dist
min_dist = 0.9
# integration
int_steps = 1000
int_n_times = 5
#############################################################
# Setup System #
#############################################################
# distribution file
distr_type_list_a = [0]
distr_type_list_b = [1]
distr_r_min = 0.1
distr_r_max = box_l/2.0
distr_r_bins = 200
distr_log_flag = 0
distr_int_flag = 1
distr_file = open("pylj_liquid_distribution.dat", "w")
distr_file.write("# r\tdistribution\n")
distr_r = numpy.zeros(distr_r_bins)
distr_values = numpy.zeros(distr_r_bins)
# Interaction setup
#############################################################
system.box_l = [box_l, box_l, box_l]
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.non_bonded_inter.set_force_cap(lj_cap)
print("LJ-parameters:")
print(system.non_bonded_inter[0, 0].lennard_jones.get_params())
# Particle setup
#############################################################
volume = box_l * box_l * box_l
n_part = int(volume * density)
for i in range(n_part):
if i < n_part/2.0:
system.part.add(type=0, id=i, pos=numpy.random.random(3) * system.box_l)
else:
system.part.add(type=1, id=i, pos=numpy.random.random(3) * system.box_l)
print("Simulate {} particles in a cubic simulation box {} at density {}."
.format(n_part, box_l, density).strip())
print("Interactions:\n")
act_min_dist = system.analysis.mindist()
print("Start with minimal distance {}".format(act_min_dist))
system.cell_system.max_num_cells = 2744
#############################################################
# Warmup Integration #
#############################################################
# open Observable file
obs_file = open("pylj_liquid.obs", "w")
obs_file.write("# Time\tE_tot\tE_kin\tE_pot\n")
# set obs_file [open "$name$ident.obs" "w"]
# puts $obs_file "\# System: $name$ident"
# puts $obs_file "\# Time\tE_tot\tE_kin\t..."
print("""
Start warmup integration:
At maximum {} times {} steps
Stop if minimal distance is larger than {}
""".strip().format(warm_n_times, warm_steps, min_dist))
# set LJ cap
lj_cap = 20
system.non_bonded_inter.set_force_cap(lj_cap)
print(system.non_bonded_inter[0, 0].lennard_jones)
# Warmup Integration Loop
i = 0
while (i < warm_n_times and act_min_dist < min_dist):
system.integrator.run(warm_steps)
# Warmup criterion
act_min_dist = system.analysis.mindist()
# print("\rrun %d at time=%f (LJ cap=%f) min dist = %f\r" % (i,system.time,lj_cap,act_min_dist), end=' ')
i += 1
# write observables
# puts $obs_file "{ time [setmd time] } [analyze energy]"
# Increase LJ cap
lj_cap = lj_cap + 10
system.non_bonded_inter.set_force_cap(lj_cap)
# Just to see what else we may get from the c code
import pprint
pprint.pprint(system.cell_system.get_state(), width=1)
# pprint.pprint(system.part.__getstate__(), width=1)
pprint.pprint(system.__getstate__(), width=1)
# write parameter file
# polyBlockWrite "$name$ident.set" {box_l time_step skin} ""
set_file = open("pylj_liquid.set", "w")
set_file.write("box_l %s\ntime_step %s\nskin %s\n" %
(box_l, system.time_step, system.cell_system.skin))
#############################################################
# Integration #
#############################################################
print("\nStart integration: run %d times %d steps" % (int_n_times, int_steps))
# remove force capping
lj_cap = 0
system.non_bonded_inter.set_force_cap(lj_cap)
print(system.non_bonded_inter[0, 0].lennard_jones)
# print(initial energies)
energies = system.analysis.energy()
print(energies)
j = 0
for i in range(0, int_n_times):
print("run %d at time=%f " % (i, system.time))
system.integrator.run(int_steps)
r, dist = system.analysis.distribution(type_list_a=distr_type_list_a, type_list_b=distr_type_list_b,
r_min=distr_r_min, r_max=distr_r_max, r_bins=distr_r_bins,
log_flag=distr_log_flag, int_flag=distr_int_flag)
distr_r = r
distr_values += dist
energies = system.analysis.energy()
print(energies)
obs_file.write('{ time %s } %s\n' % (system.time, energies))
linear_momentum = system.analysis.analyze_linear_momentum()
print(linear_momentum)
# write observables
# set energies [analyze energy]
# puts $obs_file "{ time [setmd time] } $energies"
# puts -nonewline "temp = [expr [lindex $energies 1 1]/(([degrees_of_freedom]/2.0)*[setmd n_part])]\r"
# flush stdout
# write intermediate configuration
# if { $i%10==0 } {
# polyBlockWrite "$name$ident.[format %04d $j]" {time box_l} {id pos type}
# incr j
# }
#rescale distribution values and write out data
distr_values /= int_n_times
for i in range(distr_r_bins):
distr_file.write("{0}\t{1}\n".format(distr_r[i], distr_values[i]))
distr_file.close()
# write end configuration
end_file = open("pylj_liquid.end", "w")
end_file.write("{ time %f } \n { box_l %f }\n" % (system.time, box_l))
end_file.write("{ particles {id pos type} }")
for i in range(n_part):
end_file.write("%s\n" % system.part[i].pos)
# id & type not working yet
obs_file.close()
set_file.close()
end_file.close()
# es._espressoHandle.die()
# terminate program
print("\nFinished.")
| lahnerml/espresso | samples/python/lj_liquid_distribution.py | Python | gpl-3.0 | 7,270 | [
"ESPResSo"
] | 02607c30ea46571d27f2e79678702e59fdcf16270c188cee5599ab5384d3eca4 |
__author__ = 'jules'
#see http://nbviewer.ipython.org/github/timstaley/ipython-notebooks/blob/compiled/probabilistic_programming/convolving_distributions_illustration.ipynb
import deepThought.ORM.ORM as ORM
from deepThought.util import list_to_cdf
from deepThought.stats.phase_type import infer_distribution
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
from deepThought.stats.customDistributions import MixtureDist2
def main():
job = ORM.deserialize("/tmp/output.pickle")
results = sorted(job.tasks.values(), key=lambda x: len(x.execution_history), reverse=True)
jobs = []
for x in results:
mean = np.mean(x.execution_history)
cx = np.sqrt(np.var(x.execution_history)) / mean
if cx >= 1:
jobs.append(x)
#set1 = jobs[1].execution_history
set1 = results[1].execution_history
data = np.array(set1)
mean = np.mean(data)
var = np.var(data)
dist = infer_distribution(data)
errscale = 10
err = stats.norm(loc=mean , scale=var)
delta = 1
sum_rv_delta_size = 1e2#1e-2
mixt = MixtureDist2(0.1, err, dist)
data_x, data_y = list_to_cdf(set1)
new_grid = np.arange(0,90000, 100)
plt.plot(new_grid, dist.cdf(new_grid), label='uniform')
plt.plot(new_grid, err.cdf(new_grid), label='gaussian')
plt.plot(new_grid, mixt.cdf(new_grid), label='Sum')
plt.plot(data_x, data_y, label="data")
#plt.xlim(0,max(new_grid))
plt.legend(loc='best'), plt.suptitle('CDFs')
#plt.ylim(-0.1,1.1)
plt.show()
if __name__ == '__main__':
main() | juliusf/Genetic-SRCPSP | tools/stat_inference/mixture_dist.py | Python | mit | 1,592 | [
"Gaussian"
] | f337221a6151d0c97a683c02324a9a4666e571da7d1e88c964d170f9c3a6f682 |
#!/usr/bin/env python
# Copyright (C) 2012, 2014 Moritz Huetten
import sys
import getopt
import math
#from numpy import *
import numpy as np
try:
from netCDF4 import Dataset as NC
except:
print "netCDF4 is not installed!"
sys.exit(1)
# geometry setup MISMIP3D Stnd-experiment
WRIT_FILE = 'MISMIP3D_Stnd_initialSetup.nc'
accumrate = 0.5 # accumulation rate in m/a
#### command line arguments ####
try:
opts, args = getopt.getopt(sys.argv[1:], "a:r:", ["accumrate=", "resolution="])
for opt, arg in opts:
if opt in ("-a", "--accumulation"):
accumrate = arg
if opt in ("-r", "--resolution"): # resolution in km
boxWidth = arg
except getopt.GetoptError:
print 'Incorrect command line arguments'
sys.exit(2)
######## geometry setup (moritz.huetten@pik) #########
boxWidth = float(boxWidth)
accumrate = float(accumrate)
### CONSTANTS ###
secpera = 31556926.
ice_density = 910.0 # [kg m-3]
yExtent = 2 * boxWidth # in km
xExtent = 2 * 800 # in km
# grid size: # of boxes
ny = int(np.floor(yExtent / boxWidth / 2) * 2 + 1) # make it an odd number
nx = int(np.floor(xExtent / boxWidth / 2) * 2 + 1) # make it an odd number
# grid size: extent in km's, origin (0,0) in the center of the domain
x = np.linspace(-xExtent / 2, xExtent / 2, nx) * 1000.0
y = np.linspace(-yExtent / 2, yExtent / 2, ny) * 1000.0
nxcenter = int(np.floor(0.5 * nx))
nycenter = int(np.floor(0.5 * ny))
thk = np.zeros((ny, nx))
topg = np.zeros((ny, nx))
ice_surface_temp = np.zeros((ny, nx))
precip = np.zeros((ny, nx))
# define bed elevation:
# linear sloping bed in x-direction with top in the middle, symmetric in y-direction
# print range(0,int(np.floor(0.5*nx)))
print "Informations from createSetup_Stnd.py:"
print "grid size (pixel):"
print ny
print nx
print "grid size center:"
print nxcenter
print nycenter
print "domain range in meters:"
print "x-dir:"
print x[0]
print x[nx - 1]
print "y-dir:"
print y[0]
print y[ny - 1]
# define bedrock geometry topg:
for i in range(0, nx):
for j in range(0, ny):
topg[j, i] = -100.0 - abs(x[i]) / 1.0e3
# define constant initial ice-thickness and extent:
thickness = 500.0 # initial ice thickness in meters
xfront = 700.0 # x-position of fixed calving front in km
nxfront = int(xfront / boxWidth)
for i in range(nxcenter - nxfront, nxcenter + nxfront):
for j in range(0, ny):
thk[j, i] = thickness
# define precipitation field:
for i in range(0, nx):
for j in range(0, ny):
precip[j, i] = accumrate / secpera
# defining dummy temperature:
for i in range(0, nx):
for j in range(0, ny):
ice_surface_temp[j, i] = 268.15
##### define dimensions in NetCDF file #####
ncfile = NC(WRIT_FILE, 'w', format='NETCDF3_CLASSIC')
xdim = ncfile.createDimension('x', nx)
ydim = ncfile.createDimension('y', ny)
##### define variables, set attributes, write data #####
# format: ['units', 'long_name', 'standard_name', '_FillValue', array]
vars = {'y': ['m',
'y-coordinate in Cartesian system',
'projection_y_coordinate',
None,
y],
'x': ['m',
'x-coordinate in Cartesian system',
'projection_x_coordinate',
None,
x],
'thk': ['m',
'floating ice shelf thickness',
'land_ice_thickness',
1.0,
thk],
'topg': ['m',
'bedrock surface elevation',
'bedrock_altitude',
-600.0,
topg],
'ice_surface_temp': ['K',
'annual mean air temperature at ice surface',
'surface_temperature',
248.0,
ice_surface_temp],
'climatic_mass_balance': ['kg m-2 year-1',
'mean annual net ice equivalent accumulation rate',
'land_ice_surface_specific_mass_balance_flux',
0.2 * ice_density,
precip],
}
for name in vars.keys():
[_, _, _, fill_value, data] = vars[name]
if name in ['x', 'y']:
var = ncfile.createVariable(name, 'f4', (name,))
else:
var = ncfile.createVariable(name, 'f4', ('y', 'x'), fill_value=fill_value)
for each in zip(['units', 'long_name', 'standard_name'], vars[name]):
if each[1]:
setattr(var, each[0], each[1])
var[:] = data
# finish up
ncfile.close()
print "NetCDF file ", WRIT_FILE, " created"
print ''
| talbrecht/pism_pik07 | examples/mismip/mismip3d/setup_Stnd.py | Python | gpl-3.0 | 4,700 | [
"NetCDF"
] | 05e54d08fc8528ec1c76d02cc092aa2e88a9ced8c0e1dae174f946a760478eae |
#!/usr/bin/env python
# Description: run job for the backend. result will not be cached
import os
import sys
import subprocess
from libpredweb import myfunc
from libpredweb import webserver_common as webcom
import glob
import hashlib
import shutil
from datetime import datetime
from dateutil import parser as dtparser
from pytz import timezone
import time
import site
import fcntl
import json
import urllib.request, urllib.parse, urllib.error
FORMAT_DATETIME = webcom.FORMAT_DATETIME
TZ = webcom.TZ
progname = os.path.basename(sys.argv[0])
wspace = ''.join([" "]*len(progname))
rundir = os.path.dirname(os.path.realpath(__file__))
webserver_root = os.path.realpath("%s/../../../"%(rundir))
activate_env="%s/env/bin/activate_this.py"%(webserver_root)
exec(compile(open(activate_env, "rb").read(), activate_env, 'exec'), dict(__file__=activate_env))
site.addsitedir("%s/env/lib/python2.7/site-packages/"%(webserver_root))
sys.path.append("/usr/local/lib/python2.7/dist-packages")
basedir = os.path.realpath("%s/.."%(rundir)) # path of the application, i.e. pred/
path_md5cache = "%s/static/md5"%(basedir)
path_cache = "%s/static/result/cache"%(basedir)
path_result = "%s/static/result/"%(basedir)
gen_errfile = "%s/static/log/%s.err"%(basedir, progname)
gen_logfile = "%s/static/log/%s.log"%(basedir, progname)
contact_email = "nanjiang.shu@scilifelab.se"
vip_email_file = "%s/config/vip_email.txt"%(basedir)
# note that here the url should be without http://
usage_short="""
Usage: %s seqfile_in_fasta
%s -jobid JOBID -outpath DIR -tmpdir DIR
%s -email EMAIL -baseurl BASE_WWW_URL
%s -only-get-cache
"""%(progname, wspace, wspace, wspace)
usage_ext="""\
Description:
run job
OPTIONS:
-h, --help Print this help message and exit
Created 2016-12-01, 2017-05-26, Nanjiang Shu
"""
usage_exp="""
Examples:
%s /data3/tmp/tmp_dkgSD/query.fa -outpath /data3/result/rst_mXLDGD -tmpdir /data3/tmp/tmp_dkgSD
"""%(progname)
def PrintHelp(fpout=sys.stdout):#{{{
print(usage_short, file=fpout)
print(usage_ext, file=fpout)
print(usage_exp, file=fpout)#}}}
def CleanResult(name_software, query_para, outpath_this_seq, runjob_logfile, runjob_errfile):#{{{
if name_software in ["prodres", "docker_prodres"]:
if not 'isKeepTempFile' in query_para or query_para['isKeepTempFile'] == False:
temp_result_folder = "%s/temp"%(outpath_this_seq)
if os.path.exists(temp_result_folder):
try:
shutil.rmtree(temp_result_folder)
except Exception as e:
msg = "Failed to delete the folder %s with message \"%s\""%(temp_result_folder, str(e))
webcom.loginfo(msg, runjob_errfile)
flist = [
"%s/outputs/%s"%(outpath_this_seq, "Alignment.txt"),
"%s/outputs/%s"%(outpath_this_seq, "tableOut.txt"),
"%s/outputs/%s"%(outpath_this_seq, "fullOut.txt")
]
for f in flist:
if os.path.exists(f):
try:
os.remove(f)
except Exception as e:
msg = "Failed to delete the file %s with message \"%s\""%(f, str(e))
webcom.loginfo(msg, runjob_errfile)
elif name_software in ["subcons", "docker_subcons"]:
if not 'isKeepTempFile' in query_para or query_para['isKeepTempFile'] == False:
temp_result_folder = "%s/tmp"%(outpath_this_seq)
if os.path.exists(temp_result_folder):
try:
shutil.rmtree(temp_result_folder)
except Exception as e:
msg = "Failed to delete the folder %s with message \"%s\""%(
temp_result_folder, str(e))
webcom.loginfo(msg, runjob_errfile)
#}}}
def GetCommand(name_software, seqfile_this_seq, tmp_outpath_result, tmp_outpath_this_seq, query_para):#{{{
"""Return the command for subprocess
"""
try:
docker_tmp_outpath_result = os.sep + os.sep.join(tmp_outpath_result.split(os.sep)[tmp_outpath_result.split(os.sep).index("static"):])
docker_seqfile_this_seq = os.sep + os.sep.join(seqfile_this_seq.split(os.sep)[seqfile_this_seq.split(os.sep).index("static"):])
docker_tmp_outpath_this_seq = os.sep + os.sep.join(tmp_outpath_this_seq.split(os.sep)[tmp_outpath_this_seq.split(os.sep).index("static"):])
except:
raise
cmd = []
if name_software in ['dummy']:
runscript = "%s/%s"%(rundir, "soft/dummyrun.sh")
cmd = ["bash", runscript, seqfile_this_seq, tmp_outpath_this_seq]
elif name_software in ['scampi2-single']:
if not os.path.exists(tmp_outpath_this_seq):
os.makedirs(tmp_outpath_this_seq)
runscript = "%s/%s"%(rundir, "soft/scampi2/bin/scampi/SCAMPI_run.pl")
outtopfile = "%s/query.top"%(tmp_outpath_this_seq)
cmd = [runscript, seqfile_this_seq, outtopfile]
elif name_software in ['scampi2-msa']:
if not os.path.exists(tmp_outpath_this_seq):
os.makedirs(tmp_outpath_this_seq)
runscript = "%s/%s"%(rundir,
"soft/scampi2/bin/scampi-msa/run_SCAMPI_multi.pl")
outtopfile = "%s/query.top"%(tmp_outpath_this_seq)
blastdir = "%s/%s"%(rundir, "soft/blast/blast-2.2.26")
os.environ['BLASTMAT'] = "%s/data"%(blastdir)
os.environ['BLASTBIN'] = "%s/bin"%(blastdir)
os.environ['BLASTDB'] = "%s/%s"%(rundir, "soft/blastdb")
blastdb = "%s/%s"%(os.environ['BLASTDB'], "uniref90.fasta" )
cmd = [runscript, seqfile_this_seq, outtopfile, blastdir, blastdb]
elif name_software in ['topcons2']:
runscript = "%s/%s"%(rundir,
"soft/topcons2_webserver/workflow/pfam_workflow.py")
blastdir = "%s/%s"%(rundir, "soft/blast/blast-2.2.26")
os.environ['BLASTMAT'] = "%s/data"%(blastdir)
os.environ['BLASTBIN'] = "%s/bin"%(blastdir)
os.environ['BLASTDB'] = "%s/%s"%(rundir, "soft/blastdb")
blastdb = "%s/%s"%(os.environ['BLASTDB'], "uniref90.fasta" )
cmd = ["python", runscript, seqfile_this_seq, tmp_outpath_result, blastdir, blastdb]
elif name_software in ['docker_topcons2']:
containerID = 'topcons2'
apppath = '/app/topcons2'
runscript = '%s/workflow/pfam_workflow.py'%(apppath)
blastdir = "%s/%s"%(apppath, "tools/blast-2.2.26")
blastdb = "%s/%s/%s"%(apppath, "database/blast", "uniref90.fasta" )
cmd = ["/usr/bin/docker", "exec", "--user", "user", containerID,
"script", "/dev/null", "-c",
"cd %s; export BLASTMAT=%s/data; export BLASTBIN=%s/bin; export BLASTDB=%s/database/blast; export HOME=/home/user; python %s %s %s %s %s"%(
docker_tmp_outpath_result, blastdir, blastdir, apppath,
runscript, docker_seqfile_this_seq,
docker_tmp_outpath_result, blastdir, blastdb)]
elif name_software in ['singularity_topcons2']:
path_image = '/data/singularity_images/topcons2.img'
apppath = '/app/topcons2'
runscript = '%s/workflow/pfam_workflow.py'%(apppath)
blastdir = "%s/%s"%(apppath, "tools/blast-2.2.26")
blastdb = "%s/%s/%s"%(apppath, "database/blast", "uniref90.fasta" )
os.environ['BLASTMAT'] = "%s/data"%(blastdir)
os.environ['BLASTBIN'] = "%s/bin"%(blastdir)
os.environ['BLASTDB'] = "%s/%s"%(apppath, "database/blast")
cmd = ["singularity", "exec",
"-B", "%s:%s"%('/scratch', '/scratch'),
"-B", "%s:%s"%('/data', '/data'),
"-B", "%s:%s"%('%s/static'%(basedir), '/static'),
path_image,
"python", runscript, docker_seqfile_this_seq, docker_tmp_outpath_result, blastdir, blastdb]
elif name_software in ['subcons']:
runscript = "%s/%s"%(rundir, "soft/subcons/master_subcons.sh")
cmd = ["bash", runscript, seqfile_this_seq, tmp_outpath_this_seq,
"-verbose"]
elif name_software in ['docker_subcons']:
containerID = 'subcons'
cmd = ["/usr/bin/docker", "exec", "--user", "user", containerID,
"script", "/dev/null", "-c",
"cd %s; export HOME=/home/user; /app/subcons/master_subcons.sh %s %s"%(
docker_tmp_outpath_result, docker_seqfile_this_seq,
docker_tmp_outpath_this_seq)]
elif name_software in ['singularity_subcons']:
path_image = '/data/singularity_images/subcons.img'
apppath = '/app/subcons'
runscript = '%s/master_subcons.sh'%(apppath)
cmd = ["singularity", "exec",
"-B", "%s:%s"%('/scratch', '/scratch'),
"-B", "%s:%s"%('/data', '/data'),
"-B", "%s:%s"%('%s/static'%(basedir), '/static'),
path_image,
runscript, docker_seqfile_this_seq, docker_tmp_outpath_result]
elif name_software in ['docker_boctopus2']:
containerID = 'boctopus2'
cmd = ["/usr/bin/docker", "exec", "--user", "user", containerID,
"script", "/dev/null", "-c",
"cd %s; export HOME=/home/user; python /app/boctopus2/boctopus_main.py %s %s"%(
docker_tmp_outpath_result, docker_seqfile_this_seq,
docker_tmp_outpath_this_seq)]
elif name_software in ['docker_pathopred']:
if not os.path.exists(tmp_outpath_this_seq):
os.makedirs(tmp_outpath_this_seq)
variant_text = query_para['variants']
variant_file = "%s/variants.fa" % tmp_outpath_result
myfunc.WriteFile(variant_text, variant_file)
docker_variant_file = os.sep + os.sep.join(variant_file.split(os.sep)[variant_file.split(os.sep).index("static"):])
identifier_name = query_para['identifier_name']
containerID = 'pathopred'
cmd = ["docker", "exec", "--user", "user", containerID,
"script", "/dev/null", "-c",
"cd %s; export HOME=/home/user; /app/pathopred/master_pathopred.sh %s %s %s %s"%(
docker_tmp_outpath_result, docker_seqfile_this_seq,
docker_variant_file, docker_tmp_outpath_this_seq, identifier_name)]
elif name_software in ['docker_predzinc']:
containerID = 'predzinc'
cmd = ["/usr/bin/docker", "exec", "--user", "user", containerID,
"script", "/dev/null", "-c",
"cd %s; export HOME=/home/user; /app/predzinc/predzinc.sh --cpu 4 %s -outpath %s"%(
docker_tmp_outpath_result, docker_seqfile_this_seq,
docker_tmp_outpath_this_seq)]
elif name_software in ['docker_frag1d']:
containerID = 'frag1d'
cmd = ["/usr/bin/docker", "exec", "--user", "user", containerID,
"script", "/dev/null", "-c",
"cd %s; export HOME=/home/user; /app/frag1d/frag1d.sh --cpu 4 %s -outpath %s"%(
docker_tmp_outpath_result, docker_seqfile_this_seq,
docker_tmp_outpath_this_seq)]
elif name_software in ['prodres']:#{{{
runscript = "%s/%s"%(rundir, "soft/PRODRES/PRODRES/PRODRES.py")
path_pfamscan = "%s/misc/PfamScan"%(webserver_root)
path_pfamdatabase = "%s/soft/PRODRES/databases"%(rundir)
path_pfamscanscript = "%s/pfam_scan.pl"%(path_pfamscan)
blastdb = "%s/soft/PRODRES/databases/blastdb/uniref90.fasta"%(rundir)
if 'PERL5LIB' not in os.environ:
os.environ['PERL5LIB'] = ""
os.environ['PERL5LIB'] = os.environ['PERL5LIB'] + ":" + path_pfamscan
cmd = ["python", runscript, "--input", seqfile_this_seq, "--output",
tmp_outpath_this_seq, "--pfam-dir", path_pfamdatabase,
"--pfamscan-script", path_pfamscanscript, "--fallback-db-fasta",
blastdb]
if 'second_method' in query_para and query_para['second_method'] != "":
cmd += ['--second-search', query_para['second_method']]
if 'pfamscan_evalue' in query_para and query_para['pfamscan_evalue'] != "":
cmd += ['--pfamscan_e-val', query_para['pfamscan_evalue']]
elif ('pfamscan_bitscore' in query_para and
query_para['pfamscan_bitscore'] != ""):
cmd += ['--pfamscan_bitscore', query_para['pfamscan_bitscore']]
if 'pfamscan_clanoverlap' in query_para:
if query_para['pfamscan_clanoverlap'] == False:
cmd += ['--pfamscan_clan-overlap', 'no']
else:
cmd += ['--pfamscan_clan-overlap', 'yes']
if ('jackhmmer_iteration' in query_para and
query_para['jackhmmer_iteration'] != ""):
cmd += ['--jackhmmer_max_iter', query_para['jackhmmer_iteration']]
if ('jackhmmer_threshold_type' in query_para and
query_para['jackhmmer_threshold_type'] != ""):
cmd += ['--jackhmmer-threshold-type',
query_para['jackhmmer_threshold_type']]
if 'jackhmmer_evalue' in query_para and query_para['jackhmmer_evalue'] != "":
cmd += ['--jackhmmer_e-val', query_para['jackhmmer_evalue']]
elif ('jackhmmer_bitscore' in query_para and
query_para['jackhmmer_bitscore'] != ""):
cmd += ['--jackhmmer_bit-score', query_para['jackhmmer_bitscore']]
if ('psiblast_iteration' in query_para and
query_para['psiblast_iteration'] != ""):
cmd += ['--psiblast_iter', query_para['psiblast_iteration']]
if 'psiblast_outfmt' in query_para and query_para['psiblast_outfmt'] != "":
cmd += ['--psiblast_outfmt', query_para['psiblast_outfmt']]
#}}}
return cmd
#}}}
def RunJob_proq3(modelfile, targetseq, outpath, tmpdir, email, jobid, query_para, g_params):# {{{
all_begin_time = time.time()
rootname = os.path.basename(os.path.splitext(modelfile)[0])
starttagfile = "%s/runjob.start"%(outpath)
runjob_errfile = "%s/runjob.err"%(outpath)
runjob_logfile = "%s/runjob.log"%(outpath)
app_logfile = "%s/app.log"%(outpath)
finishtagfile = "%s/runjob.finish"%(outpath)
failtagfile = "%s/runjob.failed"%(outpath)
rmsg = ""
webcom.WriteDateTimeTagFile(starttagfile, runjob_logfile, runjob_errfile)
try:
method_quality = query_para['method_quality']
except KeyError:
method_quality = 'sscore'
try:
isDeepLearning = query_para['isDeepLearning']
except KeyError:
isDeepLearning = True
if isDeepLearning:
m_str = "proq3d"
else:
m_str = "proq3"
try:
name_software = query_para['name_software']
except KeyError:
name_software = ""
resultpathname = jobid
outpath_result = "%s/%s"%(outpath, resultpathname)
tmp_outpath_result = "%s/%s"%(tmpdir, resultpathname)
zipfile = "%s.zip"%(resultpathname)
zipfile_fullpath = "%s.zip"%(outpath_result)
resultfile_text = "%s/%s"%(outpath_result, "query.proq3.txt")
finished_model_file = "%s/finished_models.txt"%(outpath_result)
for folder in [tmp_outpath_result]:
if os.path.exists(folder):
try:
shutil.rmtree(folder)
except Exception as e:
msg = "Failed to delete folder %s with message %s"%(folder, str(e))
webcom.loginfo(msg, runjob_errfile)
webcom.WriteDateTimeTagFile(failtagfile, runjob_logfile, runjob_errfile)
return 1
try:
os.makedirs(folder)
except Exception as e:
msg = "Failed to create folder %s with return message \"%s\""%(folder, str(e))
webcom.loginfo(msg, runjob_errfile)
webcom.WriteDateTimeTagFile(failtagfile, runjob_logfile, runjob_errfile)
return 1
tmp_outpath_this_model = "%s/%s"%(tmp_outpath_result, "model_%d"%(0))
outpath_this_model = "%s/%s"%(outpath_result, "model_%d"%(0))
# First try to retrieve the profile from archive
isGetProfileSuccess = False# {{{
if 'url_profile' in query_para:
# try to retrieve the profile
url_profile = query_para['url_profile']
remote_id = os.path.splitext(os.path.basename(url_profile))[0]
outfile_zip = "%s/%s.zip"%(tmp_outpath_result, remote_id)
webcom.loginfo("Trying to retrieve profile from %s"%(url_profile), runjob_logfile)
isRetrieveSuccess = False
if myfunc.IsURLExist(url_profile,timeout=5):
try:
myfunc.urlretrieve (url_profile, outfile_zip, timeout=10)
isRetrieveSuccess = True
except Exception as e:
msg = "Failed to retrieve profile from %s. Err = %s"%(url_profile, str(e))
webcom.loginfo(msg, runjob_errfile)
pass
if os.path.exists(outfile_zip) and isRetrieveSuccess:
msg = "Retrieved profile from %s"%(url_profile)
webcom.loginfo("Retrieved profile from %s"%(url_profile), runjob_logfile)
cmd = ["unzip", outfile_zip, "-d", tmp_outpath_result]
try:
subprocess.check_output(cmd)
try:
os.rename("%s/%s"%(tmp_outpath_result, remote_id),
"%s/profile_0"%(tmp_outpath_result))
isGetProfileSuccess = True
try:
os.remove(outfile_zip)
except:
pass
except:
pass
except:
pass
# }}}
tmp_seqfile = "%s/query.fasta"%(tmp_outpath_result)
tmp_outpath_profile = "%s/profile_0"%(tmp_outpath_result)
docker_tmp_seqfile = os.sep + os.sep.join(tmp_seqfile.split(os.sep)[tmp_seqfile.split(os.sep).index("static"):])
docker_modelfile= os.sep + os.sep.join(modelfile.split(os.sep)[modelfile.split(os.sep).index("static"):])
docker_tmp_outpath_profile = os.sep + os.sep.join(tmp_outpath_profile.split(os.sep)[tmp_outpath_profile.split(os.sep).index("static"):])
docker_tmp_outpath_this_model = os.sep + os.sep.join(tmp_outpath_this_model.split(os.sep)[tmp_outpath_this_model.split(os.sep).index("static"):])
docker_tmp_outpath_result = os.sep + os.sep.join(tmp_outpath_result.split(os.sep)[tmp_outpath_result.split(os.sep).index("static"):])
timefile = "%s/time.txt"%(tmp_outpath_result)
runtime_in_sec_profile = -1.0
runtime_in_sec_model = -1.0
if name_software in ['docker_proq3']:
myfunc.WriteFile(">query\n%s\n"%(targetseq), tmp_seqfile)
containerID = 'proq3'
if not isGetProfileSuccess:
# try to generate profile
cmd = ["/usr/bin/docker", "exec", "--user", "user", containerID,
"script", "/dev/null", "-c",
"cd %s; export HOME=/home/user; /app/proq3/run_proq3.sh -fasta %s -outpath %s -only-build-profile"%(
docker_tmp_outpath_result, docker_tmp_seqfile,
docker_tmp_outpath_profile)]
(t_success, runtime_in_sec) = webcom.RunCmd(cmd, runjob_logfile, runjob_errfile, verbose=True)
myfunc.WriteFile("%s;%f\n"%("profile_0",runtime_in_sec), timefile, "a", True)
runtime_in_sec_profile = runtime_in_sec
# then run with the pre-created profile
proq3opt = webcom.GetProQ3Option(query_para)
cmd = ["/usr/bin/docker", "exec", "--user", "user", containerID,
"script", "/dev/null", "-c",
"cd %s; export HOME=/home/user; /app/proq3/run_proq3.sh --profile %s %s -outpath %s -verbose %s"%(
docker_tmp_outpath_result, "%s/query.fasta"%(docker_tmp_outpath_profile),
docker_modelfile, docker_tmp_outpath_this_model, " ".join(proq3opt))]
(t_success, runtime_in_sec) = webcom.RunCmd(cmd, runjob_logfile, runjob_errfile, verbose=True)
webcom.loginfo("cmdline: %s"%(" ".join(cmd)), runjob_logfile)
myfunc.WriteFile("%s;%f\n"%("model_0",runtime_in_sec), timefile, "a", True)
runtime_in_sec_model = runtime_in_sec
if os.path.exists(tmp_outpath_result):
cmd = ["mv","-f", tmp_outpath_result, outpath_result]
(isCmdSuccess, t_runtime) = webcom.RunCmd(cmd, runjob_logfile, runjob_errfile, True)
# copy time.txt to within the model folder
try:
shutil.copyfile("%s/time.txt"%(outpath_result), "%s/model_0/time.txt"%(outpath_result))
except Exception as e:
webcom.loginfo("Copy time.txt failed with errmsg=%s"%(str(e)), runjob_errfile)
CleanResult(name_software, query_para, outpath_result, runjob_logfile, runjob_errfile)
if isCmdSuccess:
globalscorefile = "%s/%s.%s.%s.global"%(outpath_this_model, "query.pdb", m_str, method_quality)
(globalscore, itemList) = webcom.ReadProQ3GlobalScore(globalscorefile)
modelseqfile = "%s/%s.fasta"%(outpath_this_model, "query.pdb")
modellength = myfunc.GetSingleFastaLength(modelseqfile)
modelinfo = ["model_0", str(modellength), str(runtime_in_sec_model)]
if globalscore:
for i in range(len(itemList)):
modelinfo.append(str(globalscore[itemList[i]]))
myfunc.WriteFile("\t".join(modelinfo)+"\n", finished_model_file, "a")
modelFileList = ["%s/%s"%(outpath_this_model, "query.pdb")]
webcom.WriteProQ3TextResultFile(resultfile_text, query_para, modelFileList,
runtime_in_sec_model, g_params['base_www_url'], proq3opt, statfile="")
# make the zip file for all result
os.chdir(outpath)
cmd = ["zip", "-rq", zipfile, resultpathname]
webcom.RunCmd(cmd, runjob_logfile, runjob_errfile)
webcom.WriteDateTimeTagFile(finishtagfile, runjob_logfile, runjob_errfile)
isSuccess = False
if (os.path.exists(finishtagfile) and os.path.exists(zipfile_fullpath)):
isSuccess = True
else:
isSuccess = False
webcom.WriteDateTimeTagFile(failtagfile, runjob_logfile, runjob_errfile)
if os.path.exists(runjob_errfile) and os.stat(runjob_errfile).st_size > 0:
return 1
else: # no error, delete the tmpdir
try:
webcom.loginfo("shutil.rmtree(%s)"%(tmpdir), runjob_logfile)
shutil.rmtree(tmpdir)
return 0
except Exception as e:
msg = "Failed to delete tmpdir %s with message \"%s\""%(tmpdir, str(e))
webcom.loginfo(msg, runjob_errfile)
return 1
# }}}
def RunJob(infile, outpath, tmpdir, email, jobid, query_para, g_params):#{{{
all_begin_time = time.time()
rootname = os.path.basename(os.path.splitext(infile)[0])
starttagfile = "%s/runjob.start"%(outpath)
runjob_errfile = "%s/runjob.err"%(outpath)
runjob_logfile = "%s/runjob.log"%(outpath)
app_logfile = "%s/app.log"%(outpath)
finishtagfile = "%s/runjob.finish"%(outpath)
failtagfile = "%s/runjob.failed"%(outpath)
webcom.WriteDateTimeTagFile(starttagfile, runjob_logfile, runjob_errfile)
rmsg = ""
try:
name_software = query_para['name_software']
except KeyError:
name_software = ""
name_server = webcom.GetNameServerFromNameSoftware(name_software)
resultpathname = jobid
outpath_result = "%s/%s"%(outpath, resultpathname)
tmp_outpath_result = "%s/%s"%(tmpdir, resultpathname)
zipfile = "%s.zip"%(resultpathname)
zipfile_fullpath = "%s.zip"%(outpath_result)
resultfile_text = "%s/%s"%(outpath_result, "query.result.txt")
finished_seq_file = "%s/finished_seqs.txt"%(outpath_result)
for folder in [outpath_result, tmp_outpath_result]:
if os.path.exists(folder):
try:
shutil.rmtree(folder)
except Exception as e:
msg = "Failed to delete folder %s with message %s"%(folder, str(e))
webcom.loginfo(msg, runjob_errfile)
webcom.WriteDateTimeTagFile(failtagfile, runjob_logfile, runjob_errfile)
return 1
try:
os.makedirs(folder)
except Exception as e:
msg = "Failed to create folder %s with message %s"%(folder, str(e))
webcom.loginfo(msg, runjob_errfile)
webcom.WriteDateTimeTagFile(failtagfile, runjob_logfile, runjob_errfile)
return 1
try:
open(finished_seq_file, 'w').close()
except:
pass
(seqIDList , seqAnnoList, seqList) = myfunc.ReadFasta(infile)
for ii in range(len(seqIDList)):
origIndex = ii
seq = seqList[ii]
seqid = seqIDList[ii]
description = seqAnnoList[ii]
subfoldername_this_seq = "seq_%d"%(origIndex)
outpath_this_seq = "%s/%s"%(outpath_result, subfoldername_this_seq)
tmp_outpath_this_seq = "%s/%s"%(tmp_outpath_result, "seq_%d"%(0))
if os.path.exists(tmp_outpath_this_seq):
try:
shutil.rmtree(tmp_outpath_this_seq)
except OSError:
pass
seqfile_this_seq = "%s/%s"%(tmp_outpath_result, "query_%d.fa"%(origIndex))
seqcontent = ">query_%d\n%s\n"%(origIndex, seq)
myfunc.WriteFile(seqcontent, seqfile_this_seq, "w")
if not os.path.exists(seqfile_this_seq):
msg = "Failed to generate seq index %d"%(origIndex)
webcom.loginfo(msg, runjob_errfile)
continue
cmd = GetCommand(name_software, seqfile_this_seq, tmp_outpath_result, tmp_outpath_this_seq, query_para)
if len(cmd) < 1:
msg = "empty cmd for name_software = %s"%(name_software)
webcom.loginfo(msg, runjob_errfile)
pass
(t_success, runtime_in_sec) = webcom.RunCmd(cmd, runjob_logfile, runjob_errfile, verbose=True)
aaseqfile = "%s/seq.fa"%(tmp_outpath_this_seq)
if not os.path.exists(aaseqfile):
seqcontent = ">%s\n%s\n"%(description, seq)
myfunc.WriteFile(seqcontent, aaseqfile, "w")
timefile = "%s/time.txt"%(tmp_outpath_this_seq)
if not os.path.exists(timefile):
myfunc.WriteFile("%s;%f\n"%(seqid,runtime_in_sec), timefile, "w")
if os.path.exists(tmp_outpath_this_seq):
fromdir = tmp_outpath_this_seq
if name_software in ["prodres", "docker_prodres", "singularity_prodres"]:
fromdir = fromdir + os.sep + "query_0"
# for prodres, also copy the aaseqfile and timefile to the
# fromdir
try:
shutil.copy(aaseqfile, fromdir)
shutil.copy(timefile, fromdir)
except Exception as e:
msg = "failed to copy aaseqfile or timefile to the folder %s"%(fromdir)
webcom.loginfo(msg, runjob_errfile)
pass
cmd = ["mv","-f", fromdir, outpath_this_seq]
(isCmdSuccess, t_runtime) = webcom.RunCmd(cmd, runjob_logfile, runjob_errfile, True)
CleanResult(name_software, query_para, outpath_this_seq, runjob_logfile, runjob_errfile)
if isCmdSuccess:
runtime = runtime_in_sec #in seconds
timefile = "%s/time.txt"%(outpath_this_seq)
if os.path.exists(timefile):
content = myfunc.ReadFile(timefile).split("\n")[0]
strs = content.split(";")
try:
runtime = "%.1f"%(float(strs[1]))
except:
pass
extItem1 = None
extItem2 = None
info_finish = [ "seq_%d"%origIndex, str(len(seq)),
str(extItem1), str(extItem2),
"newrun", str(runtime), description]
myfunc.WriteFile("\t".join(info_finish)+"\n",
finished_seq_file, "a", isFlush=True)
# now write the text output for this seq
info_this_seq = "%s\t%d\t%s\t%s"%("seq_%d"%origIndex, len(seq), description, seq)
resultfile_text_this_seq = "%s/%s"%(outpath_this_seq, "query.result.txt")
webcom.loginfo("Write resultfile_text %s"%(resultfile_text_this_seq), runjob_logfile)
webcom.WriteDumpedTextResultFile(name_server, resultfile_text_this_seq,
outpath_result,
[info_this_seq], runtime_in_sec,
g_params['base_www_url'])
all_end_time = time.time()
all_runtime_in_sec = all_end_time - all_begin_time
# make the zip file for all result
statfile = "%s/%s"%(outpath_result, "stat.txt")
os.chdir(outpath)
cmd = ["zip", "-rq", zipfile, resultpathname]
webcom.RunCmd(cmd, runjob_logfile, runjob_errfile)
webcom.WriteDateTimeTagFile(finishtagfile, runjob_logfile, runjob_errfile)
isSuccess = False
if (os.path.exists(finishtagfile) and os.path.exists(zipfile_fullpath)):
isSuccess = True
else:
isSuccess = False
webcom.WriteDateTimeTagFile(failtagfile, runjob_logfile, runjob_errfile)
# try to delete the tmpdir if there is no error
if os.path.exists(runjob_errfile) and os.stat(runjob_errfile).st_size > 0:
return 1
else:
try:
webcom.loginfo("shutil.rmtree(%s)"%(tmpdir), runjob_logfile)
shutil.rmtree(tmpdir)
return 0
except Exception as e:
msg = "Failed to delete tmpdir %s with message \"%s\""%(tmpdir, str(e))
webcom.loginfo(msg, runjob_errfile)
return 1
#}}}
def main(g_params):#{{{
argv = sys.argv
numArgv = len(argv)
if numArgv < 2:
PrintHelp()
return 1
outpath = ""
infile = ""
tmpdir = ""
email = ""
jobid = ""
i = 1
isNonOptionArg=False
while i < numArgv:
if isNonOptionArg == True:
infile = argv[i]
isNonOptionArg = False
i += 1
elif argv[i] == "--":
isNonOptionArg = True
i += 1
elif argv[i][0] == "-":
if argv[i] in ["-h", "--help"]:
PrintHelp()
return 1
elif argv[i] in ["-outpath", "--outpath"]:
(outpath, i) = myfunc.my_getopt_str(argv, i)
elif argv[i] in ["-tmpdir", "--tmpdir"] :
(tmpdir, i) = myfunc.my_getopt_str(argv, i)
elif argv[i] in ["-jobid", "--jobid"] :
(jobid, i) = myfunc.my_getopt_str(argv, i)
elif argv[i] in ["-baseurl", "--baseurl"] :
(g_params['base_www_url'], i) = myfunc.my_getopt_str(argv, i)
elif argv[i] in ["-email", "--email"] :
(email, i) = myfunc.my_getopt_str(argv, i)
elif argv[i] in ["-q", "--q"]:
g_params['isQuiet'] = True
i += 1
else:
print("Error! Wrong argument:", argv[i], file=sys.stderr)
return 1
else:
infile = argv[i]
i += 1
if jobid == "":
print("%s: jobid not set. exit"%(sys.argv[0]), file=sys.stderr)
return 1
g_params['jobid'] = jobid
# create a lock file in the resultpath when run_job.py is running for this
# job, so that daemon will not run on this folder
lockname = "runjob.lock"
lock_file = "%s/%s/%s"%(path_result, jobid, lockname)
g_params['lockfile'] = lock_file
fp = open(lock_file, 'w')
try:
fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
print("Another instance of %s is running"%(progname), file=sys.stderr)
return 1
if myfunc.checkfile(infile, "infile") != 0:
return 1
if outpath == "":
print("outpath not set. exit", file=sys.stderr)
return 1
elif not os.path.exists(outpath):
try:
subprocess.check_output(["mkdir", "-p", outpath])
except subprocess.CalledProcessError as e:
print(e, file=sys.stderr)
return 1
if tmpdir == "":
print("tmpdir not set. exit", file=sys.stderr)
return 1
elif not os.path.exists(tmpdir):
try:
subprocess.check_output(["mkdir", "-p", tmpdir])
except subprocess.CalledProcessError as e:
print(e, file=sys.stderr)
return 1
if os.path.exists(vip_email_file):
g_params['vip_user_list'] = myfunc.ReadIDList(vip_email_file)
query_parafile = "%s/query.para.txt"%(outpath)
query_para = {}
content = myfunc.ReadFile(query_parafile)
if content != "":
query_para = json.loads(content)
try:
name_software = query_para['name_software']
except KeyError:
name_software = ""
status = 0
if name_software in ["proq3", "docker_proq3", "singularity_proq3"]:
# for proq3, model is provided in query_para
# provided in the query_para
runjob_errfile = "%s/runjob.err"%(outpath)
runjob_logfile = "%s/runjob.log"%(outpath)
modelfile = "%s/query.pdb"%(outpath)
if 'pdb_model' in query_para:
model = query_para['pdb_model']
myfunc.WriteFile(model, modelfile)
elif 'url_pdb_model' in query_para:
url_pdb_model = query_para['url_pdb_model']
webcom.loginfo("Trying to retrieve profile from %s"%(url_pdb_model), runjob_logfile)
isRetrieveSuccess = False
if myfunc.IsURLExist(url_pdb_model,timeout=5):
try:
myfunc.urlretrieve (url_pdb_model, modelfile, timeout=10)
isRetrieveSuccess = True
except Exception as e:
msg = "Failed to retrieve modelfile from %s. Err = %s"%(url_pdb_model, e)
webcom.loginfo(msg, runjob_logfile)
return 1
else:
webcom.loginfo("Neither pdb_model nor url_pdb_model are provided. Exit", gen_errfile)
return 1
try:
targetseq = query_para['targetseq']
except:
seqList = myfunc.PDB2Seq(modelfile)
if len(seqList) >= 1:
targetseq = seqList[0]
print("Run proq3")
status = RunJob_proq3(modelfile, targetseq, outpath, tmpdir, email, jobid, query_para, g_params)
else:
status = RunJob(infile, outpath, tmpdir, email, jobid, query_para, g_params)
return status
#}}}
def InitGlobalParameter():#{{{
g_params = {}
g_params['isQuiet'] = True
g_params['base_www_url'] = ""
g_params['jobid'] = ""
g_params['lockfile'] = ""
g_params['vip_user_list'] = []
return g_params
#}}}
if __name__ == '__main__' :
g_params = InitGlobalParameter()
status = main(g_params)
if os.path.exists(g_params['lockfile']):
try:
os.remove(g_params['lockfile'])
except:
myfunc.WriteFile("Failed to delete lockfile %s\n"%(g_params['lockfile']), gen_errfile, "a", True)
sys.exit(status)
| ElofssonLab/web_common_backend | proj/pred/app/run_job.py | Python | mit | 35,277 | [
"BLAST"
] | 4aefe41e6b0ecc99eb0fae03e18c01a5472e37bb3997c4d5385c03186b50ca29 |
# $Id$
#
# Copyright (C) 2007 greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import doctest
import unittest
from rdkit import RDConfig
from rdkit.Chem.Fingerprints import DbFpSupplier
from rdkit.Dbase.DbConnection import DbConnect
def load_tests(loader, tests, ignore):
""" Add the Doctests from the module """
tests.addTests(doctest.DocTestSuite(DbFpSupplier, optionflags=doctest.ELLIPSIS))
return tests
class TestCase(unittest.TestCase):
def test_ForwardDbFpSupplier(self):
# Additional tests to complete code coverage
conn = DbConnect(RDConfig.RDTestDatabase, 'simple_combined')
self.assertRaises(ValueError, DbFpSupplier.ForwardDbFpSupplier, conn.GetData(),
fpColName='typo')
suppl = DbFpSupplier.ForwardDbFpSupplier(conn.GetData(), fpColName='AutoFragmentFp')
self.assertIn('ID', suppl.GetColumnNames())
| bp-kelley/rdkit | rdkit/Chem/Fingerprints/UnitTestDbFpSupplier.py | Python | bsd-3-clause | 1,059 | [
"RDKit"
] | 3eec9a99a840f06cffb1b0257f0c61d4c941555bcd69abe2e667949949ad55bd |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing `LinearOperator` and sub-classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
class OperatorBuildInfo(object):
"""Object encoding expected shape for a test.
Encodes the expected shape of a matrix for a test. Also
allows additional metadata for the test harness.
"""
def __init__(self, shape, **kwargs):
self.shape = shape
self.__dict__.update(kwargs)
@six.add_metaclass(abc.ABCMeta) # pylint: disable=no-init
class LinearOperatorDerivedClassTest(test.TestCase):
"""Tests for derived classes.
Subclasses should implement every abstractmethod, and this will enable all
test methods to work.
"""
# Absolute/relative tolerance for tests.
_atol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-12,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-12
}
_rtol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-12,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-12
}
def assertAC(self, x, y):
"""Derived classes can set _atol, _rtol to get different tolerance."""
dtype = dtypes.as_dtype(x.dtype)
atol = self._atol[dtype]
rtol = self._rtol[dtype]
self.assertAllClose(x, y, atol=atol, rtol=rtol)
@property
def _adjoint_options(self):
return [False, True]
@property
def _adjoint_arg_options(self):
return [False, True]
@property
def _dtypes_to_test(self):
# TODO(langmore) Test tf.float16 once tf.matrix_solve works in 16bit.
return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
@property
def _use_placeholder_options(self):
return [False, True]
@abc.abstractproperty
def _operator_build_infos(self):
"""Returns list of OperatorBuildInfo, encapsulating the shape to test."""
raise NotImplementedError("operator_build_infos has not been implemented.")
@abc.abstractmethod
def _operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
"""Build a batch matrix and an Operator that should have similar behavior.
Every operator acts like a (batch) matrix. This method returns both
together, and is used by tests.
Args:
build_info: `OperatorBuildInfo`, encoding shape information about the
operator.
dtype: Numpy dtype. Data type of returned array/operator.
use_placeholder: Python bool. If True, initialize the operator with a
placeholder of undefined shape and correct dtype.
ensure_self_adjoint_and_pd: If `True`,
construct this operator to be Hermitian Positive Definite, as well
as ensuring the hints `is_positive_definite` and `is_self_adjoint`
are set.
This is useful for testing methods such as `cholesky`.
Returns:
operator: `LinearOperator` subclass instance.
mat: `Tensor` representing operator.
"""
# Create a matrix as a numpy array with desired shape/dtype.
# Create a LinearOperator that should have the same behavior as the matrix.
raise NotImplementedError("Not implemented yet.")
@abc.abstractmethod
def _make_rhs(self, operator, adjoint, with_batch=True):
"""Make a rhs appropriate for calling operator.solve(rhs).
Args:
operator: A `LinearOperator`
adjoint: Python `bool`. If `True`, we are making a 'rhs' value for the
adjoint operator.
with_batch: Python `bool`. If `True`, create `rhs` with the same batch
shape as operator, and otherwise create a matrix without any batch
shape.
Returns:
A `Tensor`
"""
raise NotImplementedError("_make_rhs is not defined.")
@abc.abstractmethod
def _make_x(self, operator, adjoint, with_batch=True):
"""Make an 'x' appropriate for calling operator.matmul(x).
Args:
operator: A `LinearOperator`
adjoint: Python `bool`. If `True`, we are making an 'x' value for the
adjoint operator.
with_batch: Python `bool`. If `True`, create `x` with the same batch shape
as operator, and otherwise create a matrix without any batch shape.
Returns:
A `Tensor`
"""
raise NotImplementedError("_make_x is not defined.")
@property
def _tests_to_skip(self):
"""List of test names to skip."""
# Subclasses should over-ride if they want to skip some tests.
# To skip "test_foo", add "foo" to this list.
return []
def _skip_if_tests_to_skip_contains(self, test_name):
"""If self._tests_to_skip contains test_name, raise SkipTest exception.
See tests below for usage.
Args:
test_name: String name corresponding to a test.
Raises:
SkipTest Exception, if test_name is in self._tests_to_skip.
"""
if test_name in self._tests_to_skip:
self.skipTest(
"{} skipped because it was added to self._tests_to_skip.".format(
test_name))
def test_to_dense(self):
self._skip_if_tests_to_skip_contains("to_dense")
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
op_dense = operator.to_dense()
if not use_placeholder:
self.assertAllEqual(build_info.shape, op_dense.get_shape())
op_dense_v, mat_v = sess.run([op_dense, mat])
self.assertAC(op_dense_v, mat_v)
def test_det(self):
self._skip_if_tests_to_skip_contains("det")
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
op_det = operator.determinant()
if not use_placeholder:
self.assertAllEqual(build_info.shape[:-2], op_det.get_shape())
op_det_v, mat_det_v = sess.run(
[op_det, linalg_ops.matrix_determinant(mat)])
self.assertAC(op_det_v, mat_det_v)
def test_log_abs_det(self):
self._skip_if_tests_to_skip_contains("log_abs_det")
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
op_log_abs_det = operator.log_abs_determinant()
_, mat_log_abs_det = linalg.slogdet(mat)
if not use_placeholder:
self.assertAllEqual(
build_info.shape[:-2], op_log_abs_det.get_shape())
op_log_abs_det_v, mat_log_abs_det_v = sess.run(
[op_log_abs_det, mat_log_abs_det])
self.assertAC(op_log_abs_det_v, mat_log_abs_det_v)
def _test_matmul(self, with_batch):
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
# If batch dimensions are omitted, but there are
# no batch dimensions for the linear operator, then
# skip the test case. This is already checked with
# with_batch=True.
if not with_batch and len(build_info.shape) <= 2:
continue
for dtype in self._dtypes_to_test:
for adjoint in self._adjoint_options:
for adjoint_arg in self._adjoint_arg_options:
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
x = self._make_x(
operator, adjoint=adjoint, with_batch=with_batch)
# If adjoint_arg, compute A X^H^H = A X.
if adjoint_arg:
op_matmul = operator.matmul(
linalg.adjoint(x),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_matmul = operator.matmul(x, adjoint=adjoint)
mat_matmul = linear_operator_util.matmul_with_broadcast(
mat, x, adjoint_a=adjoint)
if not use_placeholder:
self.assertAllEqual(op_matmul.get_shape(),
mat_matmul.get_shape())
op_matmul_v, mat_matmul_v = sess.run(
[op_matmul, mat_matmul])
self.assertAC(op_matmul_v, mat_matmul_v)
def test_matmul(self):
self._skip_if_tests_to_skip_contains("matmul")
self._test_matmul(with_batch=True)
def test_matmul_with_broadcast(self):
self._skip_if_tests_to_skip_contains("matmul_with_broadcast")
self._test_matmul(with_batch=False)
def test_adjoint(self):
self._skip_if_tests_to_skip_contains("adjoint")
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
op_adjoint = operator.adjoint().to_dense()
op_adjoint_h = operator.H.to_dense()
mat_adjoint = linalg.adjoint(mat)
op_adjoint_v, op_adjoint_h_v, mat_adjoint_v = sess.run(
[op_adjoint, op_adjoint_h, mat_adjoint])
self.assertAC(mat_adjoint_v, op_adjoint_v)
self.assertAC(mat_adjoint_v, op_adjoint_h_v)
def test_cholesky(self):
self._skip_if_tests_to_skip_contains("cholesky")
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder,
ensure_self_adjoint_and_pd=True)
op_chol = operator.cholesky().to_dense()
mat_chol = linalg_ops.cholesky(mat)
op_chol_v, mat_chol_v = sess.run([op_chol, mat_chol])
self.assertAC(mat_chol_v, op_chol_v)
def _test_solve(self, with_batch):
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
# If batch dimensions are omitted, but there are
# no batch dimensions for the linear operator, then
# skip the test case. This is already checked with
# with_batch=True.
if not with_batch and len(build_info.shape) <= 2:
continue
for dtype in self._dtypes_to_test:
for adjoint in self._adjoint_options:
for adjoint_arg in self._adjoint_arg_options:
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
rhs = self._make_rhs(
operator, adjoint=adjoint, with_batch=with_batch)
# If adjoint_arg, solve A X = (rhs^H)^H = rhs.
if adjoint_arg:
op_solve = operator.solve(
linalg.adjoint(rhs),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_solve = operator.solve(
rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
mat_solve = linear_operator_util.matrix_solve_with_broadcast(
mat, rhs, adjoint=adjoint)
if not use_placeholder:
self.assertAllEqual(op_solve.get_shape(),
mat_solve.get_shape())
op_solve_v, mat_solve_v = sess.run([op_solve, mat_solve])
self.assertAC(op_solve_v, mat_solve_v)
def test_solve(self):
self._skip_if_tests_to_skip_contains("solve")
self._test_solve(with_batch=True)
def test_solve_with_broadcast(self):
self._skip_if_tests_to_skip_contains("solve_with_broadcast")
self._test_solve(with_batch=False)
def _test_inverse(self):
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
op_inverse_v, mat_inverse_v = sess.run([
operator.inverse().to_dense(), linalg.inv(mat)])
self.assertAC(op_inverse_v, mat_inverse_v)
def test_inverse(self):
self._skip_if_tests_to_skip_contains("inverse")
self._test_inverse()
def test_trace(self):
self._skip_if_tests_to_skip_contains("trace")
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
op_trace = operator.trace()
mat_trace = math_ops.trace(mat)
if not use_placeholder:
self.assertAllEqual(op_trace.get_shape(), mat_trace.get_shape())
op_trace_v, mat_trace_v = sess.run([op_trace, mat_trace])
self.assertAC(op_trace_v, mat_trace_v)
def test_add_to_tensor(self):
self._skip_if_tests_to_skip_contains("add_to_tensor")
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
op_plus_2mat = operator.add_to_tensor(2 * mat)
if not use_placeholder:
self.assertAllEqual(build_info.shape, op_plus_2mat.get_shape())
op_plus_2mat_v, mat_v = sess.run([op_plus_2mat, mat])
self.assertAC(op_plus_2mat_v, 3 * mat_v)
def test_diag_part(self):
self._skip_if_tests_to_skip_contains("diag_part")
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
op_diag_part = operator.diag_part()
mat_diag_part = array_ops.matrix_diag_part(mat)
if not use_placeholder:
self.assertAllEqual(mat_diag_part.get_shape(),
op_diag_part.get_shape())
op_diag_part_, mat_diag_part_ = sess.run(
[op_diag_part, mat_diag_part])
self.assertAC(op_diag_part_, mat_diag_part_)
@six.add_metaclass(abc.ABCMeta)
class SquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest):
"""Base test class appropriate for square operators.
Sub-classes must still define all abstractmethods from
LinearOperatorDerivedClassTest that are not defined here.
"""
@property
def _operator_build_infos(self):
build_info = OperatorBuildInfo
# non-batch operators (n, n) and batch operators.
return [
build_info((0, 0)),
build_info((1, 1)),
build_info((1, 3, 3)),
build_info((3, 4, 4)),
build_info((2, 1, 4, 4))]
def _make_rhs(self, operator, adjoint, with_batch=True):
# This operator is square, so rhs and x will have same shape.
# adjoint value makes no difference because the operator shape doesn't
# change since it is square, but be pedantic.
return self._make_x(operator, adjoint=not adjoint, with_batch=with_batch)
def _make_x(self, operator, adjoint, with_batch=True):
# Value of adjoint makes no difference because the operator is square.
# Return the number of systems to solve, R, equal to 1 or 2.
r = self._get_num_systems(operator)
# If operator.shape = [B1,...,Bb, N, N] this returns a random matrix of
# shape [B1,...,Bb, N, R], R = 1 or 2.
if operator.shape.is_fully_defined():
batch_shape = operator.batch_shape.as_list()
n = operator.domain_dimension.value
if with_batch:
x_shape = batch_shape + [n, r]
else:
x_shape = [n, r]
else:
batch_shape = operator.batch_shape_tensor()
n = operator.domain_dimension_tensor()
if with_batch:
x_shape = array_ops.concat((batch_shape, [n, r]), 0)
else:
x_shape = [n, r]
return random_normal(x_shape, dtype=operator.dtype)
def _get_num_systems(self, operator):
"""Get some number, either 1 or 2, depending on operator."""
if operator.tensor_rank is None or operator.tensor_rank % 2:
return 1
else:
return 2
@six.add_metaclass(abc.ABCMeta)
class NonSquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest):
"""Base test class appropriate for generic rectangular operators.
Square shapes are never tested by this class, so if you want to test your
operator with a square shape, create two test classes, the other subclassing
SquareLinearOperatorFullMatrixTest.
Sub-classes must still define all abstractmethods from
LinearOperatorDerivedClassTest that are not defined here.
"""
@property
def _tests_to_skip(self):
"""List of test names to skip."""
return [
"cholesky",
"inverse",
"solve",
"solve_with_broadcast",
"det",
"log_abs_det"
]
@property
def _operator_build_infos(self):
build_info = OperatorBuildInfo
# non-batch operators (n, n) and batch operators.
return [
build_info((2, 1)),
build_info((1, 2)),
build_info((1, 3, 2)),
build_info((3, 3, 4)),
build_info((2, 1, 2, 4))]
def _make_rhs(self, operator, adjoint, with_batch=True):
# TODO(langmore) Add once we're testing solve_ls.
raise NotImplementedError(
"_make_rhs not implemented because we don't test solve")
def _make_x(self, operator, adjoint, with_batch=True):
# Return the number of systems for the argument 'x' for .matmul(x)
r = self._get_num_systems(operator)
# If operator.shape = [B1,...,Bb, M, N] this returns a random matrix of
# shape [B1,...,Bb, N, R], R = 1 or 2.
if operator.shape.is_fully_defined():
batch_shape = operator.batch_shape.as_list()
if adjoint:
n = operator.range_dimension.value
else:
n = operator.domain_dimension.value
if with_batch:
x_shape = batch_shape + [n, r]
else:
x_shape = [n, r]
else:
batch_shape = operator.batch_shape_tensor()
if adjoint:
n = operator.range_dimension_tensor()
else:
n = operator.domain_dimension_tensor()
if with_batch:
x_shape = array_ops.concat((batch_shape, [n, r]), 0)
else:
x_shape = [n, r]
return random_normal(x_shape, dtype=operator.dtype)
def _get_num_systems(self, operator):
"""Get some number, either 1 or 2, depending on operator."""
if operator.tensor_rank is None or operator.tensor_rank % 2:
return 1
else:
return 2
def random_positive_definite_matrix(shape, dtype, force_well_conditioned=False):
"""[batch] positive definite matrix.
Args:
shape: `TensorShape` or Python list. Shape of the returned matrix.
dtype: `TensorFlow` `dtype` or Python dtype.
force_well_conditioned: Python bool. If `True`, returned matrix has
eigenvalues with modulus in `(1, 4)`. Otherwise, eigenvalues are
chi-squared random variables.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
if not tensor_util.is_tensor(shape):
shape = tensor_shape.TensorShape(shape)
# Matrix must be square.
shape[-1].assert_is_compatible_with(shape[-2])
with ops.name_scope("random_positive_definite_matrix"):
tril = random_tril_matrix(
shape, dtype, force_well_conditioned=force_well_conditioned)
return math_ops.matmul(tril, tril, adjoint_b=True)
def random_tril_matrix(shape,
dtype,
force_well_conditioned=False,
remove_upper=True):
"""[batch] lower triangular matrix.
Args:
shape: `TensorShape` or Python `list`. Shape of the returned matrix.
dtype: `TensorFlow` `dtype` or Python dtype
force_well_conditioned: Python `bool`. If `True`, returned matrix will have
eigenvalues with modulus in `(1, 2)`. Otherwise, eigenvalues are unit
normal random variables.
remove_upper: Python `bool`.
If `True`, zero out the strictly upper triangle.
If `False`, the lower triangle of returned matrix will have desired
properties, but will not have the strictly upper triangle zero'd out.
Returns:
`Tensor` with desired shape and dtype.
"""
with ops.name_scope("random_tril_matrix"):
# Totally random matrix. Has no nice properties.
tril = random_normal(shape, dtype=dtype)
if remove_upper:
tril = array_ops.matrix_band_part(tril, -1, 0)
# Create a diagonal with entries having modulus in [1, 2].
if force_well_conditioned:
maxval = ops.convert_to_tensor(np.sqrt(2.), dtype=dtype.real_dtype)
diag = random_sign_uniform(
shape[:-1], dtype=dtype, minval=1., maxval=maxval)
tril = array_ops.matrix_set_diag(tril, diag)
return tril
def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None):
"""Tensor with (possibly complex) Gaussian entries.
Samples are distributed like
```
N(mean, stddev^2), if dtype is real,
X + iY, where X, Y ~ N(mean, stddev^2) if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
mean: `Tensor` giving mean of normal to sample from.
stddev: `Tensor` giving stdev of normal to sample from.
dtype: `TensorFlow` `dtype` or numpy dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_normal"):
samples = random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
if dtype.is_complex:
if seed is not None:
seed += 1234
more_samples = random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
samples = math_ops.complex(samples, more_samples)
return samples
def random_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) Uniform entries.
Samples are distributed like
```
Uniform[minval, maxval], if dtype is real,
X + iY, where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_uniform"):
samples = random_ops.random_uniform(
shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed)
if dtype.is_complex:
if seed is not None:
seed += 12345
more_samples = random_ops.random_uniform(
shape,
dtype=dtype.real_dtype,
minval=minval,
maxval=maxval,
seed=seed)
samples = math_ops.complex(samples, more_samples)
return samples
def random_sign_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) random entries from a "sign Uniform".
Letting `Z` be a random variable equal to `-1` and `1` with equal probability,
Samples from this `Op` are distributed like
```
Z * X, where X ~ Uniform[minval, maxval], if dtype is real,
Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_sign_uniform"):
unsigned_samples = random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
if seed is not None:
seed += 12
signs = math_ops.sign(
random_ops.random_uniform(shape, minval=-1., maxval=1., seed=seed))
return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype)
def random_normal_correlated_columns(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
eps=1e-4,
seed=None):
"""Batch matrix with (possibly complex) Gaussian entries and correlated cols.
Returns random batch matrix `A` with specified element-wise `mean`, `stddev`,
living close to an embedded hyperplane.
Suppose `shape[-2:] = (M, N)`.
If `M < N`, `A` is a random `M x N` [batch] matrix with iid Gaussian entries.
If `M >= N`, then the colums of `A` will be made almost dependent as follows:
```
L = random normal N x N-1 matrix, mean = 0, stddev = 1 / sqrt(N - 1)
B = random normal M x N-1 matrix, mean = 0, stddev = stddev.
G = (L B^H)^H, a random normal M x N matrix, living on N-1 dim hyperplane
E = a random normal M x N matrix, mean = 0, stddev = eps
mu = a constant M x N matrix, equal to the argument "mean"
A = G + E + mu
```
Args:
shape: Python list of integers.
Shape of the returned tensor. Must be at least length two.
mean: `Tensor` giving mean of normal to sample from.
stddev: `Tensor` giving stdev of normal to sample from.
dtype: `TensorFlow` `dtype` or numpy dtype
eps: Distance each column is perturbed from the low-dimensional subspace.
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
Raises:
ValueError: If `shape` is not at least length 2.
"""
dtype = dtypes.as_dtype(dtype)
if len(shape) < 2:
raise ValueError(
"Argument shape must be at least length 2. Found: %s" % shape)
# Shape is the final shape, e.g. [..., M, N]
shape = list(shape)
batch_shape = shape[:-2]
m, n = shape[-2:]
# If there is only one column, "they" are by definition correlated.
if n < 2 or n < m:
return random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
# Shape of the matrix with only n - 1 columns that we will embed in higher
# dimensional space.
smaller_shape = batch_shape + [m, n - 1]
# Shape of the embedding matrix, mapping batch matrices
# from [..., N-1, M] to [..., N, M]
embedding_mat_shape = batch_shape + [n, n - 1]
# This stddev for the embedding_mat ensures final result has correct stddev.
stddev_mat = 1 / np.sqrt(n - 1)
with ops.name_scope("random_normal_correlated_columns"):
smaller_mat = random_normal(
smaller_shape, mean=0.0, stddev=stddev_mat, dtype=dtype, seed=seed)
if seed is not None:
seed += 1287
embedding_mat = random_normal(embedding_mat_shape, dtype=dtype, seed=seed)
embedded_t = math_ops.matmul(embedding_mat, smaller_mat, transpose_b=True)
embedded = array_ops.matrix_transpose(embedded_t)
mean_mat = array_ops.ones_like(embedded) * mean
return embedded + random_normal(shape, stddev=eps, dtype=dtype) + mean_mat
| jendap/tensorflow | tensorflow/python/ops/linalg/linear_operator_test_util.py | Python | apache-2.0 | 30,832 | [
"Gaussian"
] | bdb7b314565a6089759f46247e2a22288a1481e7bb2c41a4eb35fe8cb6024b61 |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Image pipeline
reader = vtk.vtkImageReader()
reader.SetDataByteOrderToLittleEndian()
reader.SetDataExtent(0,63,0,63,1,93)
reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
dilate = vtk.vtkImageContinuousDilate3D()
dilate.SetInputConnection(reader.GetOutputPort())
dilate.SetKernelSize(11,11,1)
erode = vtk.vtkImageContinuousErode3D()
erode.SetInputConnection(dilate.GetOutputPort())
erode.SetKernelSize(11,11,1)
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(erode.GetOutputPort())
viewer.SetColorWindow(2000)
viewer.SetColorLevel(1000)
viewer.Render()
# --- end of script --
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Imaging/Core/Testing/Python/ContinuousClose2D.py | Python | gpl-3.0 | 772 | [
"VTK"
] | f8c2221d4d48b3c64944ac37c076ca9674165565aed3c2c8cfb8ca17fccc9eb7 |
from splinter.browser import Browser
from time import sleep
import traceback
class Buy_Tickets(object):
# 定义实例属性,初始化
def __init__(self, username, passwd, order, passengers, dtime, starts, ends):
self.username = username
self.passwd = passwd
# 车次,0代表所有车次,依次从上到下,1代表所有车次,依次类推
self.order = order
# 乘客名
self.passengers = passengers
# 起始地和终点
self.starts = starts
self.ends = ends
# 日期
self.dtime = dtime
# self.xb = xb
# self.pz = pz
self.login_url = 'https://kyfw.12306.cn/otn/test_login/init'
self.initMy_url = 'https://kyfw.12306.cn/otn/index/initMy12306'
self.ticket_url = 'https://kyfw.12306.cn/otn/leftTicket/init'
self.driver_name = 'chrome'
self.executable_path = 'E:\Program Files\Python36\chromedriver.exe'
# 登录功能实现
def login(self):
self.driver.visit(self.login_url)
self.driver.fill('loginUserDTO.user_name', self.username)
# sleep(1)
self.driver.fill('userDTO.password', self.passwd)
# sleep(1)
print('请输入验证码...')
while True:
if self.driver.url != self.initMy_url:
sleep(1)
else:
break
# 买票功能实现
def start_buy(self):
self.driver = Browser(driver_name=self.driver_name, executable_path=self.executable_path)
#窗口大小的操作
self.driver.driver.set_window_size(700, 500)
self.login()
self.driver.visit(self.ticket_url)
try:
print('开始购票...')
# 加载查询信息
self.driver.cookies.add({"_jc_save_fromStation": self.starts})
self.driver.cookies.add({"_jc_save_toStation": self.ends})
self.driver.cookies.add({"_jc_save_fromDate": self.dtime})
self.driver.reload()
count = 0
if self.order != 0:
while self.driver.url == self.ticket_url:
self.driver.find_by_text('查询').click()
count += 1
print('第%d次点击查询...' % count)
try:
self.driver.find_by_text('预订')[self.order-1].click()
sleep(1.5)
except Exception as e:
print(e)
print('预订失败...')
continue
else:
while self.driver.url == self.ticket_url:
self.driver.find_by_text('查询').click()
count += 1
print('第%d次点击查询...' % count)
try:
for i in self.driver.find_by_text('预订'):
i.click()
sleep(1)
except Exception as e:
print(e)
print('预订失败...')
continue
print('开始预订...')
sleep(1)
print('开始选择用户...')
for p in self.passengers:
self.driver.find_by_text(p).last.click()
sleep(0.5)
if p[-1] == ')':
self.driver.find_by_id('dialog_xsertcj_ok').click()
print('提交订单...')
# sleep(1)
# self.driver.find_by_text(self.pz).click()
# sleep(1)
# self.driver.find_by_text(self.xb).click()
# sleep(1)
self.driver.find_by_id('submitOrder_id').click()
sleep(2)
print('确认选座...')
self.driver.find_by_id('qr_submit_id').click()
print('预订成功...')
except Exception as e:
print(e)
if __name__ == '__main__':
# 用户名
username = '13521895260'
# 密码
password = 'jydn000234'
# 车次选择,0代表所有车次
order = 2
# 乘客名,比如passengers = ['丁小红', '丁小明']
# 学生票需注明,注明方式为:passengers = ['丁小红(学生)', '丁小明']
passengers = ['丁彦军']
# 日期,格式为:'2018-01-20'
dtime = '2018-01-19'
# 出发地(需填写cookie值)
starts = '%u5434%u5821%2CWUY' #吴堡
# 目的地(需填写cookie值)
ends = '%u897F%u5B89%2CXAY' #西安
# xb =['硬座座']
# pz=['成人票']
Buy_Tickets(username, password, order, passengers, dtime, starts, ends).start_buy() | lsp84ch83/PyText | 公开课/爬虫/用Python抢火车票完整源代码.py | Python | gpl-3.0 | 4,669 | [
"VisIt"
] | 9cdd68d5202f6557cde1124b647172ef50182e3dcc8af6a1a69d3c54755970b0 |
from sympy.core import S, C, sympify
from sympy.core.function import Function, ArgumentIndexError
from sympy.ntheory import sieve
from math import sqrt as _sqrt
from sympy.core.compatibility import reduce, as_int
from sympy.core.cache import cacheit
class CombinatorialFunction(Function):
"""Base class for combinatorial functions. """
###############################################################################
######################## FACTORIAL and MULTI-FACTORIAL ########################
###############################################################################
class factorial(CombinatorialFunction):
"""Implementation of factorial function over nonnegative integers.
For the sake of convenience and simplicity of procedures using
this function it is defined for negative integers and returns
zero in this case.
The factorial is very important in combinatorics where it gives
the number of ways in which 'n' objects can be permuted. It also
arises in calculus, probability, number theory etc.
There is strict relation of factorial with gamma function. In
fact n! = gamma(n+1) for nonnegative integers. Rewrite of this
kind is very useful in case of combinatorial simplification.
Computation of the factorial is done using two algorithms. For
small arguments naive product is evaluated. However for bigger
input algorithm Prime-Swing is used. It is the fastest algorithm
known and computes n! via prime factorization of special class
of numbers, called here the 'Swing Numbers'.
Examples
========
>>> from sympy import Symbol, factorial
>>> n = Symbol('n', integer=True)
>>> factorial(-2)
0
>>> factorial(0)
1
>>> factorial(7)
5040
>>> factorial(n)
factorial(n)
>>> factorial(2*n)
factorial(2*n)
See Also
========
factorial2, RisingFactorial, FallingFactorial
"""
nargs = 1
def fdiff(self, argindex=1):
if argindex == 1:
return C.gamma(self.args[0] + 1)*C.polygamma(0, self.args[0] + 1)
else:
raise ArgumentIndexError(self, argindex)
_small_swing = [
1, 1, 1, 3, 3, 15, 5, 35, 35, 315, 63, 693, 231, 3003, 429, 6435, 6435, 109395,
12155, 230945, 46189, 969969, 88179, 2028117, 676039, 16900975, 1300075,
35102025, 5014575, 145422675, 9694845, 300540195, 300540195
]
@classmethod
def _swing(cls, n):
if n < 33:
return cls._small_swing[n]
else:
N, primes = int(_sqrt(n)), []
for prime in sieve.primerange(3, N + 1):
p, q = 1, n
while True:
q //= prime
if q > 0:
if q & 1 == 1:
p *= prime
else:
break
if p > 1:
primes.append(p)
for prime in sieve.primerange(N + 1, n//3 + 1):
if (n // prime) & 1 == 1:
primes.append(prime)
L_product = R_product = 1
for prime in sieve.primerange(n//2 + 1, n + 1):
L_product *= prime
for prime in primes:
R_product *= prime
return L_product*R_product
@classmethod
def _recursive(cls, n):
if n < 2:
return 1
else:
return (cls._recursive(n//2)**2)*cls._swing(n)
@classmethod
def eval(cls, n):
n = sympify(n)
if n.is_Number:
if n is S.Zero:
return S.One
elif n.is_Integer:
if n.is_negative:
return S.Zero
else:
n, result = n.p, 1
if n < 20:
for i in range(2, n + 1):
result *= i
else:
N, bits = n, 0
while N != 0:
if N & 1 == 1:
bits += 1
N = N >> 1
result = cls._recursive(n)*2**(n - bits)
return C.Integer(result)
if n.is_negative:
return S.Zero
def _eval_rewrite_as_gamma(self, n):
return C.gamma(n + 1)
def _eval_is_integer(self):
return self.args[0].is_integer
class MultiFactorial(CombinatorialFunction):
pass
class subfactorial(CombinatorialFunction):
"""The subfactorial counts the derangements of n items and is
defined for non-negative integers as::
,
| 1 for n = 0
!n = { 0 for n = 1
| (n - 1)*(!(n - 1) + !(n - 2)) for n > 1
`
It can also be written as int(round(n!/exp(1))) but the recursive
definition with caching is implemented for this function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Subfactorial
Examples
========
>>> from sympy import subfactorial
>>> from sympy.abc import n
>>> subfactorial(n + 1)
subfactorial(n + 1)
>>> subfactorial(5)
44
See Also
========
factorial, sympy.utilities.iterables.generate_derangements
"""
nargs = 1
@classmethod
@cacheit
def _eval(self, n):
if not n:
return 1
elif n == 1:
return 0
return (n - 1)*(self._eval(n - 1) + self._eval(n - 2))
@classmethod
def eval(cls, arg):
try:
arg = as_int(arg)
if arg < 0:
raise ValueError
return C.Integer(cls._eval(arg))
except ValueError:
if sympify(arg).is_Number:
raise ValueError("argument must be a nonnegative integer")
class factorial2(CombinatorialFunction):
"""The double factorial n!!, not to be confused with (n!)!
The double factorial is defined for integers >= -1 as::
,
| n*(n - 2)*(n - 4)* ... * 1 for n odd
n!! = { n*(n - 2)*(n - 4)* ... * 2 for n even
| 1 for n = 0, -1
`
Examples
========
>>> from sympy import factorial2, var
>>> var('n')
n
>>> factorial2(n + 1)
factorial2(n + 1)
>>> factorial2(5)
15
>>> factorial2(-1)
1
See Also
========
factorial, RisingFactorial, FallingFactorial
"""
nargs = 1
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg == S.Zero or arg == S.NegativeOne:
return S.One
return factorial2(arg - 2)*arg
###############################################################################
######################## RISING and FALLING FACTORIALS ########################
###############################################################################
class RisingFactorial(CombinatorialFunction):
"""Rising factorial (also called Pochhammer symbol) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by:
rf(x, k) = x * (x+1) * ... * (x + k-1)
where 'x' can be arbitrary expression and 'k' is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/RisingFactorial.html page.
Examples
========
>>> from sympy import rf
>>> from sympy.abc import x
>>> rf(x, 0)
1
>>> rf(1, 5)
120
>>> rf(x, 5) == x*(1 + x)*(2 + x)*(3 + x)*(4 + x)
True
See Also
========
factorial, factorial2, FallingFactorial
"""
nargs = 2
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN:
return S.NaN
elif x is S.One:
return factorial(k)
elif k.is_Integer:
if k is S.NaN:
return S.NaN
elif k is S.Zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
return reduce(lambda r, i: r*(x + i), xrange(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
return 1/reduce(lambda r, i: r*(x - i), xrange(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k):
return C.gamma(x + k) / C.gamma(x)
class FallingFactorial(CombinatorialFunction):
"""Falling factorial (related to rising factorial) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by
ff(x, k) = x * (x-1) * ... * (x - k+1)
where 'x' can be arbitrary expression and 'k' is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/FallingFactorial.html page.
>>> from sympy import ff
>>> from sympy.abc import x
>>> ff(x, 0)
1
>>> ff(5, 5)
120
>>> ff(x, 5) == x*(x-1)*(x-2)*(x-3)*(x-4)
True
See Also
========
factorial, factorial2, RisingFactorial
"""
nargs = 2
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN:
return S.NaN
elif k.is_Integer:
if k is S.NaN:
return S.NaN
elif k is S.Zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
return reduce(lambda r, i: r*(x - i), xrange(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
return 1/reduce(lambda r, i: r*(x + i), xrange(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k):
return (-1)**k * C.gamma(-x + k) / C.gamma(-x)
rf = RisingFactorial
ff = FallingFactorial
###############################################################################
########################### BINOMIAL COEFFICIENTS #############################
###############################################################################
class binomial(CombinatorialFunction):
"""Implementation of the binomial coefficient. It can be defined
in two ways depending on its desired interpretation:
C(n,k) = n!/(k!(n-k)!) or C(n, k) = ff(n, k)/k!
First, in a strict combinatorial sense it defines the
number of ways we can choose 'k' elements from a set of
'n' elements. In this case both arguments are nonnegative
integers and binomial is computed using an efficient
algorithm based on prime factorization.
The other definition is generalization for arbitrary 'n',
however 'k' must also be nonnegative. This case is very
useful when evaluating summations.
For the sake of convenience for negative 'k' this function
will return zero no matter what valued is the other argument.
To expand the binomial when n is a symbol, use either
expand_func() or expand(func=True). The former will keep the
polynomial in factored form while the latter will expand the
polynomial itself. See examples for details.
Examples
========
>>> from sympy import Symbol, Rational, binomial, expand_func
>>> n = Symbol('n', integer=True)
>>> binomial(15, 8)
6435
>>> binomial(n, -1)
0
>>> [ binomial(0, i) for i in range(1)]
[1]
>>> [ binomial(1, i) for i in range(2)]
[1, 1]
>>> [ binomial(2, i) for i in range(3)]
[1, 2, 1]
>>> [ binomial(3, i) for i in range(4)]
[1, 3, 3, 1]
>>> [ binomial(4, i) for i in range(5)]
[1, 4, 6, 4, 1]
>>> binomial(Rational(5,4), 3)
-5/128
>>> binomial(n, 3)
binomial(n, 3)
>>> binomial(n, 3).expand(func=True)
n**3/6 - n**2/2 + n/3
>>> expand_func(binomial(n, 3))
n*(n - 2)*(n - 1)/6
"""
nargs = 2
def fdiff(self, argindex=1):
if argindex == 1:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/01/
n, k = self.args
return binomial(n, k)*(C.polygamma(0, n + 1) - C.polygamma(0, n - k + 1))
elif argindex == 2:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/02/
n, k = self.args
return binomial(n, k)*(C.polygamma(0, n - k + 1) - C.polygamma(0, k + 1))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, n, k):
n, k = map(sympify, (n, k))
if k.is_Number:
if k.is_Integer:
if k < 0:
return S.Zero
elif k == 0 or n == k:
return S.One
elif n.is_Integer and n >= 0:
n, k = int(n), int(k)
if k > n:
return S.Zero
elif k > n // 2:
k = n - k
M, result = int(_sqrt(n)), 1
for prime in sieve.primerange(2, n + 1):
if prime > n - k:
result *= prime
elif prime > n // 2:
continue
elif prime > M:
if n % prime < k % prime:
result *= prime
else:
N, K = n, k
exp = a = 0
while N > 0:
a = int((N % prime) < (K % prime + a))
N, K = N // prime, K // prime
exp = a + exp
if exp > 0:
result *= prime**exp
return C.Integer(result)
elif n.is_Number:
result = n - k + 1
for i in xrange(2, k + 1):
result *= n - k + i
result /= i
return result
elif k.is_negative:
return S.Zero
elif (n - k).simplify().is_negative:
return S.Zero
else:
d = n - k
if d.is_Integer:
return cls.eval(n, d)
def _eval_expand_func(self, **hints):
"""
Function to expand binomial(n,k) when m is positive integer
Also,
n is self.args[0] and k is self.args[1] while using binomial(n, k)
"""
n = self.args[0]
if n.is_Number:
return binomial(*self.args)
k = self.args[1]
if k.is_Add and n in k.args:
k = n - k
if k.is_Integer:
if k == S.Zero:
return S.One
elif k < 0:
return S.Zero
else:
n = self.args[0]
result = n - k + 1
for i in xrange(2, k + 1):
result *= n - k + i
result /= i
return result
else:
return binomial(*self.args)
def _eval_rewrite_as_factorial(self, n, k):
return C.factorial(n)/(C.factorial(k)*C.factorial(n - k))
def _eval_rewrite_as_gamma(self, n, k):
return C.gamma(n + 1)/(C.gamma(k + 1)*C.gamma(n - k + 1))
def _eval_is_integer(self):
return self.args[0].is_integer and self.args[1].is_integer
| lidavidm/mathics-heroku | venv/lib/python2.7/site-packages/sympy/functions/combinatorial/factorials.py | Python | gpl-3.0 | 16,857 | [
"VisIt"
] | fe787db0fe256eb4a3f7a3b6d58f0dd4ff9632ca48b3bde2539015079d13d71f |
#!/usr/bin/env python
import sys, os, gzip
from galaxy.datatypes.checkers import is_gzip
def main():
"""
The format of the file is JSON::
{ "sections" : [
{ "start" : "x", "end" : "y", "sequences" : "z" },
...
]}
This works only for UNCOMPRESSED fastq files. The Python GzipFile does not provide seekable
offsets via tell(), so clients just have to split the slow way
"""
input_fname = sys.argv[1]
if is_gzip(input_fname):
print 'Conversion is only possible for uncompressed files'
sys.exit(1)
out_file = open(sys.argv[2], 'w')
current_line = 0
sequences=1000000
lines_per_chunk = 4*sequences
chunk_begin = 0
in_file = open(input_fname)
out_file.write('{"sections" : [');
for line in in_file:
current_line += 1
if 0 == current_line % lines_per_chunk:
chunk_end = in_file.tell()
out_file.write('{"start":"%s","end":"%s","sequences":"%s"},' % (chunk_begin, chunk_end, sequences))
chunk_begin = chunk_end
chunk_end = in_file.tell()
out_file.write('{"start":"%s","end":"%s","sequences":"%s"}' % (chunk_begin, chunk_end, (current_line % lines_per_chunk) / 4))
out_file.write(']}\n')
if __name__ == "__main__":
main()
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/datatypes/converters/fastq_to_fqtoc.py | Python | gpl-3.0 | 1,317 | [
"Galaxy"
] | 82c9b67c9e7a194ee98ddbecdb783b9324cb453c13c8af56cfd688b7ce08d805 |
"""Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import parallel_helper
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = np.bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
.. versionadded:: 0.18
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False,
random_state=random_state)
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
check_is_fitted(self, 'estimators_')
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
# This is a utility function for joblib's Parallel. It can't go locally in
# ForestClassifier or ForestRegressor, because joblib complains that it cannot
# pickle it when placed there.
def accumulate_prediction(predict, X, out):
prediction = predict(X, check_input=False)
if len(out) == 1:
out[0] += prediction
else:
for i in range(len(out)):
out[i] += prediction[i]
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('balanced', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight != 'balanced_subsample' or
not self.bootstrap):
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
all_proba = [np.zeros((X.shape[0], j), dtype=np.float64)
for j in np.atleast_1d(self.n_classes_)]
Parallel(n_jobs=n_jobs, verbose=self.verbose, backend="threading")(
delayed(accumulate_prediction)(e.predict_proba, X, all_proba)
for e in self.estimators_)
for proba in all_proba:
proba /= len(self.estimators_)
if len(all_proba) == 1:
return all_proba[0]
else:
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
if self.n_outputs_ > 1:
y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64)
else:
y_hat = np.zeros((X.shape[0]), dtype=np.float64)
# Parallel loop
Parallel(n_jobs=n_jobs, verbose=self.verbose, backend="threading")(
delayed(accumulate_prediction)(e.predict, X, [y_hat])
for e in self.estimators_)
y_hat /= len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
Examples
--------
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.datasets import make_classification
>>>
>>> X, y = make_classification(n_samples=1000, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = RandomForestClassifier(max_depth=2, random_state=0)
>>> clf.fit(X, y)
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=2, max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
oob_score=False, random_state=0, verbose=0, warm_start=False)
>>> print(clf.feature_importances_)
[ 0.17287856 0.80608704 0.01884792 0.00218648]
>>> print(clf.predict([[0, 0, 0, 0]]))
[1]
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
Examples
--------
>>> from sklearn.ensemble import RandomForestRegressor
>>> from sklearn.datasets import make_regression
>>>
>>> X, y = make_regression(n_features=4, n_informative=2,
... random_state=0, shuffle=False)
>>> regr = RandomForestRegressor(max_depth=2, random_state=0)
>>> regr.fit(X, y)
RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=2,
max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=10, n_jobs=1,
oob_score=False, random_state=0, verbose=0, warm_start=False)
>>> print(regr.feature_importances_)
[ 0.17339552 0.81594114 0. 0.01066333]
>>> print(regr.predict([[0, 0, 0, 0]]))
[-2.50699856]
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| clemkoa/scikit-learn | sklearn/ensemble/forest.py | Python | bsd-3-clause | 78,779 | [
"Brian"
] | d3d059f561a955a205103c5ffb6c485e00406697715c85afaf10e6ead9df2204 |
import os
import re
import errno
def colorText(string, color, **kwargs):
"""
A function for coloring text.
Args:
string[str]: The string to color.
color[str]: The color to use (see color_codes variable).
Kwargs:
html[bool]: If true html colored text is returned.
colored[bool]: When false the coloring is not applied.
"""
# Get the properties
html = kwargs.pop('html', False)
code = kwargs.pop('code', True)
colored = kwargs.pop('colored', True)
# ANSI color codes for colored terminal output
color_codes = {'RESET':'\033[0m','BOLD':'\033[1m','RED':'\033[31m','GREEN':'\033[35m','CYAN':'\033[34m','YELLOW':'\033[33m','MAGENTA':'\033[32m'}
if code:
color_codes['GREEN'] = '\033[32m'
color_codes['CYAN'] = '\033[36m'
color_codes['MAGENTA'] = '\033[35m'
if colored and not (os.environ.has_key('BITTEN_NOCOLOR') and os.environ['BITTEN_NOCOLOR'] == 'true'):
if html:
string = string.replace('<r>', color_codes['BOLD']+color_codes['RED'])
string = string.replace('<c>', color_codes['BOLD']+color_codes['CYAN'])
string = string.replace('<g>', color_codes['BOLD']+color_codes['GREEN'])
string = string.replace('<y>', color_codes['BOLD']+color_codes['YELLOW'])
string = string.replace('<b>', color_codes['BOLD'])
string = re.sub(r'</[rcgyb]>', color_codes['RESET'], string)
else:
string = color_codes[color] + string + color_codes['RESET']
elif html:
string = re.sub(r'</?[rcgyb]>', '', string) # stringip all "html" tags
return string
def str2bool(string):
"""
A function for converting string to boolean.
Args:
string[str]: The text to convert (e.g., 'true' or '1')
"""
string = string.lower()
if string is 'true' or string is '1':
return True
else:
return False
def find_moose_executable(loc, **kwargs):
"""
Args:
loc[str]: The directory containing the MOOSE executable.
Kwargs:
methods[list]: (Default: ['opt', 'oprof', 'dbg', 'devel']) The list of build types to consider.
name[str]: (Default: opt.path.basename(loc)) The name of the executable to locate.
"""
# Set the methods and name local varaiables
methods = kwargs.pop('methods', ['opt', 'oprof', 'dbg', 'devel'])
name = kwargs.pop('name', os.path.basename(loc))
# Handle 'combined' and 'tests'
if os.path.isdir(loc):
if name == 'combined':
name = 'modules'
elif name == 'tests':
name = 'moose_test'
# Check that the location exists and that it is a directory
loc = os.path.abspath(loc)
if not os.path.isdir(loc):
print 'ERROR: The supplied path must be a valid directory:', loc
return errno.ENOTDIR
# Search for executable with the given name
exe = errno.ENOENT
for method in methods:
exe = os.path.join(loc, name + '-' + method)
if os.path.isfile(exe):
break
# Returns the executable or error code
if not errno.ENOENT:
print 'ERROR: Unable to locate a valid MOOSE executable in directory'
return exe
def runExe(app_path, args):
"""
A function for running an application.
Args:
app_path[str]: The application to execute.
args[list]: The arguuments to pass to the executable.
"""
import subprocess
popen_args = [str(app_path)]
if isinstance(args, str):
popen_args.append(args)
else:
popen_args.extend(args)
proc = subprocess.Popen(popen_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
data = proc.communicate()
stdout_data = data[0].decode("utf-8")
return stdout_data
def check_configuration(packages):
"""
Check that the supplied packages exist.
Return:
[int]: 0 = Success; 1 = Missing package(s)
"""
missing = []
for package in packages:
try:
__import__(package)
except ImportError:
missing.append(package)
if missing:
print "The following packages are missing but required:"
for m in missing:
print ' '*4, '-', m
return 1
return 0
def touch(fname):
"""
Touch a file so to update modified time.
"""
with open(fname, 'a'):
os.utime(fname, None)
def gold(filename):
"""
Get the gold filename corresponding to a filename.
"""
if not os.path.exists(filename):
return None
fn = os.path.basename(filename)
dn = os.path.dirname(filename)
gold = os.path.join(dn, 'gold', fn)
if os.path.exists(gold):
return gold
return None
def unique_list(output, input):
"""
Insert items into list, but only if they are unique.
"""
for item in input:
if item not in output:
output.append(item)
| stimpsonsg/moose | python/mooseutils/mooseutils.py | Python | lgpl-2.1 | 4,939 | [
"MOOSE"
] | d817b1544112fc0dcc0f65d5110eafd0bc0b38e966d0cd25d7a29e4c1063e029 |
""" 2-input XOR example using Izhikevich's spiking neuron model. """
from __future__ import print_function
import multiprocessing
import os
from matplotlib import patches
from matplotlib import pylab as plt
import visualize
import neat
# Network inputs and expected outputs.
xor_inputs = ((0, 0), (0, 1), (1, 0), (1, 1))
xor_outputs = (0, 1, 1, 0)
# Maximum amount of simulated time (in milliseconds) to wait for the network to produce an output.
max_time_msec = 20.0
def compute_output(t0, t1):
"""Compute the network's output based on the "time to first spike" of the two output neurons."""
if t0 is None or t1 is None:
# If neither of the output neurons fired within the allotted time,
# give a response which produces a large error.
return -1.0
else:
# If the output neurons fire within 1.0 milliseconds of each other,
# the output is 1, and if they fire more than 11 milliseconds apart,
# the output is 0, with linear interpolation between 1 and 11 milliseconds.
response = 1.1 - 0.1 * abs(t0 - t1)
return max(0.0, min(1.0, response))
def simulate(genome, config):
# Create a network of "fast spiking" Izhikevich neurons.
net = neat.iznn.IZNN.create(genome, config)
dt = net.get_time_step_msec()
sum_square_error = 0.0
simulated = []
for idata, odata in zip(xor_inputs, xor_outputs):
neuron_data = {}
for i, n in net.neurons.items():
neuron_data[i] = []
# Reset the network, apply the XOR inputs, and run for the maximum allowed time.
net.reset()
net.set_inputs(idata)
t0 = None
t1 = None
v0 = None
v1 = None
num_steps = int(max_time_msec / dt)
net.set_inputs(idata)
for j in range(num_steps):
t = dt * j
output = net.advance(dt)
# Capture the time and neuron membrane potential for later use if desired.
for i, n in net.neurons.items():
neuron_data[i].append((t, n.current, n.v, n.u, n.fired))
# Remember time and value of the first output spikes from each neuron.
if t0 is None and output[0] > 0:
t0, I0, v0, u0, f0 = neuron_data[net.outputs[0]][-2]
if t1 is None and output[1] > 0:
t1, I1, v1, u1, f0 = neuron_data[net.outputs[1]][-2]
response = compute_output(t0, t1)
sum_square_error += (response - odata) ** 2
#print(genome)
#visualize.plot_spikes(neuron_data[net.outputs[0]], False)
#visualize.plot_spikes(neuron_data[net.outputs[1]], True)
simulated.append((idata, odata, t0, t1, v0, v1, neuron_data))
return sum_square_error, simulated
def eval_genome(genome, config):
sum_square_error, simulated = simulate(genome, config)
return 10.0 - sum_square_error
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
genome.fitness = eval_genome(genome, config)
def run(config_path):
# Load the config file, which is assumed to live in
# the same directory as this script.
config = neat.Config(neat.iznn.IZGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_path)
# For this network, we use two output neurons and use the difference between
# the "time to first spike" to determine the network response. There are
# probably a great many different choices one could make for an output encoding,
# and this choice may not be the best for tackling a real problem.
config.output_nodes = 2
pop = neat.population.Population(config)
# Add a stdout reporter to show progress in the terminal.
pop.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
pop.add_reporter(stats)
pe = neat.ParallelEvaluator(multiprocessing.cpu_count(), eval_genome)
winner = pop.run(pe.evaluate, 3000)
# Display the winning genome.
print('\nBest genome:\n{!s}'.format(winner))
node_names = {-1:'A', -2: 'B'}
visualize.draw_net(config, winner, True, node_names=node_names)
visualize.plot_stats(stats, ylog=False, view=True)
visualize.plot_species(stats, view=True)
# Show output of the most fit genome against training data, and create
# a plot of the traces out to the max time for each set of inputs.
print('\nBest network output:')
plt.figure(figsize=(12, 12))
sum_square_error, simulated = simulate(winner, config)
for r, (inputData, outputData, t0, t1, v0, v1, neuron_data) in enumerate(simulated):
response = compute_output(t0, t1)
print("{0!r} expected {1:.3f} got {2:.3f}".format(inputData, outputData, response))
axes = plt.subplot(4, 1, r + 1)
plt.title("Traces for XOR input {{{0:.1f}, {1:.1f}}}".format(*inputData), fontsize=12)
for i, s in neuron_data.items():
if i in [0, 1]:
t, I, v, u, fired = zip(*s)
plt.plot(t, v, "-", label="neuron {0:d}".format(i))
# Circle the first peak of each output.
circle0 = patches.Ellipse((t0, v0), 1.0, 10.0, color='r', fill=False)
circle1 = patches.Ellipse((t1, v1), 1.0, 10.0, color='r', fill=False)
axes.add_artist(circle0)
axes.add_artist(circle1)
plt.ylabel("Potential (mv)", fontsize=10)
plt.ylim(-100, 50)
plt.tick_params(labelsize=8)
plt.grid()
plt.xlabel("Time (in ms)", fontsize=10)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.savefig("traces.png", dpi=90)
plt.show()
if __name__ == '__main__':
local_dir = os.path.dirname(__file__)
run(os.path.join(local_dir, 'config-spiking'))
| CodeReclaimers/neat-python | examples/xor/evolve-spiking.py | Python | bsd-3-clause | 5,782 | [
"NEURON"
] | ab0085c939dad11e978250ec56cff296c872803f9ec435ea91f1b3940703c20d |
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_gslbservice
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of GslbService Avi RESTful Object
description:
- This module is used to configure GslbService object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
controller_health_status_enabled:
description:
- Gs member's overall health status is derived based on a combination of controller and datapath health-status inputs.
- Note that the datapath status is determined by the association of health monitor profiles.
- Only the controller provided status is determined through this configuration.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
created_by:
description:
- Creator name.
- Field introduced in 17.1.2.
version_added: "2.4"
description:
description:
- User defined description for the object.
domain_names:
description:
- Fully qualified domain name of the gslb service.
down_response:
description:
- Response to the client query when the gslb service is down.
enabled:
description:
- Enable or disable the gslb service.
- If the gslb service is enabled, then the vips are sent in the dns responses based on reachability and configured algorithm.
- If the gslb service is disabled, then the vips are no longer available in the dns response.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
groups:
description:
- Select list of pools belonging to this gslb service.
health_monitor_refs:
description:
- Verify vs health by applying one or more health monitors.
- Active monitors generate synthetic traffic from dns service engine and to mark a vs up or down based on the response.
- It is a reference to an object of type gslbhealthmonitor.
health_monitor_scope:
description:
- Health monitor probe can be executed for all the members or it can be executed only for third-party members.
- This operational mode is useful to reduce the number of health monitor probes in case of a hybrid scenario.
- In such a case, avi members can have controller derived status while non-avi members can be probed by via health monitor probes in dataplane.
- Enum options - GSLB_SERVICE_HEALTH_MONITOR_ALL_MEMBERS, GSLB_SERVICE_HEALTH_MONITOR_ONLY_NON_AVI_MEMBERS.
- Default value when not specified in API or module is interpreted by Avi Controller as GSLB_SERVICE_HEALTH_MONITOR_ALL_MEMBERS.
name:
description:
- Name for the gslb service.
required: true
num_dns_ip:
description:
- Number of ip addresses of this gslb service to be returned by the dns service.
- Enter 0 to return all ip addresses.
- Allowed values are 1-20.
- Special values are 0- 'return all ip addresses'.
tenant_ref:
description:
- It is a reference to an object of type tenant.
ttl:
description:
- Ttl value (in seconds) for records served for this gslb service by the dns service.
- Allowed values are 1-86400.
url:
description:
- Avi controller URL of the object.
use_edns_client_subnet:
description:
- Use the client ip subnet from the edns option as source ipaddress for client geo-location and consistent hash algorithm.
- Default is true.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
uuid:
description:
- Uuid of the gslb service.
wildcard_match:
description:
- Enable wild-card match of fqdn if an exact match is not found in the dns table, the longest match is chosen by wild-carding the fqdn in the dns
- request.
- Default is false.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create GslbService object
avi_gslbservice:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_gslbservice
"""
RETURN = '''
obj:
description: GslbService (api/gslbservice) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
controller_health_status_enabled=dict(type='bool',),
created_by=dict(type='str',),
description=dict(type='str',),
domain_names=dict(type='list',),
down_response=dict(type='dict',),
enabled=dict(type='bool',),
groups=dict(type='list',),
health_monitor_refs=dict(type='list',),
health_monitor_scope=dict(type='str',),
name=dict(type='str', required=True),
num_dns_ip=dict(type='int',),
tenant_ref=dict(type='str',),
ttl=dict(type='int',),
url=dict(type='str',),
use_edns_client_subnet=dict(type='bool',),
uuid=dict(type='str',),
wildcard_match=dict(type='bool',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'gslbservice',
set([]))
if __name__ == '__main__':
main()
| MiLk/ansible | lib/ansible/modules/network/avi/avi_gslbservice.py | Python | gpl-3.0 | 7,459 | [
"VisIt"
] | 9d1179829b66472f6a85c677039d5323a651bf58173ab899e409994ca67af376 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
OTBUtils.py
---------------------
Date : 11-12-13
Copyright : (C) 2013 by CS Systemes d'information (CS SI)
Email : otb at c-s dot fr (CS SI)
Contributors : Julien Malik (CS SI) - creation of otbspecific
Oscar Picas (CS SI) -
Alexia Mondot (CS SI) - split otbspecific into 2 files
add functions
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
When QGIS is run, OTB algorithms are created according to xml files from description/ directory.
"""
__author__ = 'Julien Malik, Oscar Picas, Alexia Mondot'
__date__ = 'December 2013'
__copyright__ = '(C) 2013, CS Systemes d\'information (CS SI)'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
__version__ = "3.8"
import copy
from processing.algs.otb.OTBUtils import (renameValueField,
remove_dependent_choices,
remove_other_choices,
remove_parameter_by_key,
defaultSplit,
split_by_choice,
defaultWrite,
remove_choice,
remove_independent_choices)
def getBinaryMorphologicalOperation(available_app, original_dom_document):
"""
Let ball as only available structype.
Split the application according to its filter dilate, erode, opening, closing.
"""
the_root = original_dom_document
renameValueField(the_root, 'structype.ball.xradius', 'name', 'The Structuring Element Radius')
renameValueField(the_root, 'structype.ball.xradius', 'description', 'The Structuring Element Radius')
remove_dependent_choices(the_root, 'structype', 'ball')
remove_other_choices(the_root, 'structype', 'ball')
remove_dependent_choices(the_root, 'filter', 'dilate')
remove_parameter_by_key(the_root, 'structype.ball.yradius')
the_list = defaultSplit(available_app, the_root, 'filter')
return the_list
def getEdgeExtraction(available_app, original_dom_document):
"""
Let ball as only available filter (not an oval).
Split the application according to its filter gradient, sobel, touzi.
"""
the_root = original_dom_document
renameValueField(the_root, 'filter.touzi.xradius', 'name', 'The Radius')
renameValueField(the_root, 'filter.touzi.xradius', 'description', 'The Radius')
remove_parameter_by_key(the_root, 'filter.touzi.yradius')
split = split_by_choice(the_root, 'filter')
the_list = []
for key in split:
defaultWrite('%s-%s' % (available_app, key), split[key])
the_list.append(split[key])
return the_list
def getGrayScaleMorphologicalOperation(available_app, original_dom_document):
"""
Let ball as only available structype.
Split the application according to its filter dilate, erode, opening, closing.
"""
the_root = original_dom_document
renameValueField(the_root, 'structype.ball.xradius', 'name', 'The Structuring Element Radius')
renameValueField(the_root, 'structype.ball.xradius', 'description', 'The Structuring Element Radius')
remove_dependent_choices(the_root, 'structype', 'ball')
remove_other_choices(the_root, 'structype', 'ball')
remove_parameter_by_key(the_root, 'structype.ball.yradius')
split = defaultSplit(available_app, the_root, 'filter')
return split
def getOrthoRectification(available_app, original_dom_document):
"""
Let only mode auto.
Remove all parameters which should be updated once the input file given.
Split by SRS : EPSG, fit to ortho, lambert-wgs84 and UTM.
Each of these SRS have their own parameters modified in this fonction.
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
remove_choice(the_root, 'outputs.mode', 'auto')
remove_independent_choices(the_root, 'outputs.mode', 'auto')
remove_choice(the_root, 'outputs.mode', 'outputroi')
remove_independent_choices(the_root, 'outputs.mode', 'outputroi')
remove_parameter_by_key(the_root, 'outputs.ulx')
remove_parameter_by_key(the_root, 'outputs.uly')
remove_parameter_by_key(the_root, 'outputs.sizex')
remove_parameter_by_key(the_root, 'outputs.sizey')
remove_parameter_by_key(the_root, 'outputs.spacingx')
remove_parameter_by_key(the_root, 'outputs.spacingy')
remove_parameter_by_key(the_root, 'outputs.lrx')
remove_parameter_by_key(the_root, 'outputs.lry')
remove_parameter_by_key(the_root, 'opt.rpc')
deleteGeoidSrtm(the_root)
remove_parameter_by_key(the_root, 'outputs.isotropic')
emptyMap = copy.deepcopy(the_root)
remove_parameter_by_key(the_root, 'outputs.ortho')
remove_choice(the_root, 'outputs.mode', 'orthofit')
remove_independent_choices(the_root, 'outputs.mode', 'orthofit')
merged = copy.deepcopy(the_root)
split = split_by_choice(the_root, 'map')
the_list = []
for key in split:
if key == 'utm':
the_doc = split[key]
remove_parameter_by_key(the_doc, 'map.epsg.code')
defaultWrite('%s-%s' % (available_app, key), the_doc)
the_list.append(the_doc)
elif key == 'epsg':
the_doc = split[key]
remove_parameter_by_key(the_doc, 'map.utm.northhem')
remove_parameter_by_key(the_doc, 'map.utm.zone')
defaultWrite('%s-%s' % (available_app, key), the_doc)
the_list.append(the_doc)
remove_choice(merged, 'map', 'utm')
remove_choice(merged, 'map', 'epsg')
remove_parameter_by_key(merged, 'map.epsg.code')
remove_parameter_by_key(merged, 'map.utm.northhem')
remove_parameter_by_key(merged, 'map.utm.zone')
old_app_name = merged.find('key').text
merged.find('key').text = '%s-%s' % (old_app_name, 'lambert-WGS84')
merged.find('longname').text = '%s (%s)' % (old_app_name, 'lambert-WGS84')
defaultWrite('%s-%s' % (available_app, 'lambert-WGS84'), merged)
the_list.append(merged)
remove_parameter_by_key(emptyMap, 'map')
remove_parameter_by_key(emptyMap, 'map.epsg.code')
remove_parameter_by_key(emptyMap, 'map.utm.northhem')
remove_parameter_by_key(emptyMap, 'map.utm.zone')
remove_choice(emptyMap, 'outputs.mode', 'autosize')
remove_independent_choices(emptyMap, 'outputs.mode', 'autosize')
remove_choice(emptyMap, 'outputs.mode', 'autospacing')
remove_independent_choices(emptyMap, 'outputs.mode', 'autospacing')
old_app_name = emptyMap.find('key').text
emptyMap.find('key').text = '%s-%s' % (old_app_name, 'fit-to-ortho')
emptyMap.find('longname').text = '%s (%s)' % (old_app_name, 'fit-to-ortho')
defaultWrite('%s-%s' % (available_app, 'fit-to-ortho'), emptyMap)
the_list.append(emptyMap)
return the_list
def getDimensionalityReduction(available_app, original_dom_document):
"""
Remove rescale.outmin and rescale.outmax and split by method (ica, maf, napca and pca) and adjust parameters of each resulting app.
"""
the_root = original_dom_document
remove_parameter_by_key(the_root, 'rescale.outmin')
remove_parameter_by_key(the_root, 'rescale.outmax')
split = split_by_choice(the_root, 'method')
the_list = []
for key in split:
if key == 'maf':
the_doc = split[key]
remove_parameter_by_key(the_doc, 'outinv')
defaultWrite('%s-%s' % (available_app, key), the_doc)
the_list.append(the_doc)
else:
defaultWrite('%s-%s' % (available_app, key), split[key])
the_list.append(split[key])
return the_list
def getPansharpening(available_app, original_dom_document):
"""
Split by method (bayes, lmvm, rcs)
"""
the_root = original_dom_document
split = split_by_choice(the_root, 'method')
the_list = []
for key in split:
defaultWrite('%s-%s' % (available_app, key), split[key])
the_list.append(split[key])
return the_list
def getPixelValue(available_app, original_dom_document):
the_root = original_dom_document
remove_parameter_by_key(the_root, 'cl')
defaultWrite(available_app, the_root)
return [the_root]
def getExtractROI(available_app, original_dom_document):
"""
Split by mode (standard, fit)
Adapt parameters of each resulting app.
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
remove_parameter_by_key(the_root, 'cl')
deleteGeoidSrtm(the_root)
split = split_by_choice(the_root, 'mode')
the_list = []
for key in split:
if key == 'standard':
the_doc = split[key]
remove_parameter_by_key(the_doc, 'mode.fit.elev.dem')
remove_parameter_by_key(the_doc, 'mode.fit.elev.geoid')
remove_parameter_by_key(the_doc, 'mode.fit.elev.default')
remove_parameter_by_key(the_doc, 'mode.fit.ref')
defaultWrite('%s-%s' % (available_app, key), the_doc)
the_list.append(the_doc)
else:
#key == 'fit'
the_doc = split[key]
remove_parameter_by_key(the_doc, 'startx')
remove_parameter_by_key(the_doc, 'starty')
remove_parameter_by_key(the_doc, 'sizex')
remove_parameter_by_key(the_doc, 'sizey')
defaultWrite('%s-%s' % (available_app, key), the_doc)
the_list.append(split[key])
return the_list
def getQuicklook(available_app, original_dom_document):
the_root = original_dom_document
remove_parameter_by_key(the_root, 'cl')
defaultWrite(available_app, the_root)
return [the_root]
def getRigidTransformResample(available_app, original_dom_document):
"""
split by transformation (id, rotation, translation)
"""
the_root = original_dom_document
split = split_by_choice(the_root, 'transform.type')
the_list = []
for key in split:
defaultWrite('%s-%s' % (available_app, key), split[key])
the_list.append(split[key])
return the_list
def getHomologousPointsExtraction(available_app, original_dom_document):
the_list = defaultSplit(available_app, original_dom_document, 'mode')
return the_list
def getGenerateRPCSensorModel(available_app, original_dom_document):
the_root = original_dom_document
remove_dependent_choices(the_root, 'map', 'wgs')
remove_other_choices(the_root, 'map', 'wgs')
defaultWrite(available_app, the_root)
return [the_root]
def getRefineSensorModel(available_app, original_dom_document):
the_root = original_dom_document
remove_dependent_choices(the_root, 'map', 'wgs')
remove_other_choices(the_root, 'map', 'wgs')
defaultWrite(available_app, the_root)
return [the_root]
def getSegmentation(available_app, original_dom_document):
"""
Remove the choice raster and split by filter (cc, edison, meanshift, mprofiles, watershed)
"""
the_root = original_dom_document
#remove_choice(the_root, 'filter', 'edison')
#remove_independent_choices(the_root, 'filter', 'edison')
#remove_choice(the_root, 'filter', 'meanshift')
#remove_independent_choices(the_root, 'filter', 'meanshift')
remove_choice(the_root, 'mode', 'raster')
remove_independent_choices(the_root, 'mode', 'raster')
split = split_by_choice(the_root, 'filter')
the_list = []
for key in split:
defaultWrite('%s-%s' % (available_app, key), split[key])
the_list.append(split[key])
return the_list
def getKMeansClassification(available_app, original_dom_document):
the_root = original_dom_document
remove_parameter_by_key(the_root, 'rand')
defaultWrite(available_app, the_root)
return [the_root]
def getTrainSVMImagesClassifier(available_app, original_dom_document):
the_root = original_dom_document
remove_parameter_by_key(the_root, 'rand')
defaultWrite(available_app, the_root)
return [the_root]
def getComputeConfusionMatrix(available_app, original_dom_document):
"""
Split by ref (raster, vector)
"""
the_root = original_dom_document
#remove_independent_choices(the_root, 'ref', 'vector')
#remove_choice(the_root, 'ref', 'vector')
#defaultWrite(available_app, the_root)
split = split_by_choice(the_root, 'ref')
the_list = []
for key in split:
defaultWrite('%s-%s' % (available_app, key), split[key])
the_list.append(split[key])
return the_list
return [the_root]
def getOpticalCalibration(available_app, original_dom_document):
"""
Remove toc options (let toa) and remove all about atmo
"""
#the_list = defaultSplit(available_app, original_dom_document, 'level')
the_root = original_dom_document
remove_independent_choices(the_root, 'level', 'toc')
remove_choice(the_root, 'level', 'toc')
remove_parameter_by_key(the_root, 'atmo.aerosol')
remove_parameter_by_key(the_root, 'atmo.oz')
remove_parameter_by_key(the_root, 'atmo.wa')
remove_parameter_by_key(the_root, 'atmo.pressure')
remove_parameter_by_key(the_root, 'atmo.opt')
remove_parameter_by_key(the_root, 'atmo.aeronet')
remove_parameter_by_key(the_root, 'radius')
defaultWrite(available_app, the_root)
return [the_root]
def getSarRadiometricCalibration(available_app, original_dom_document):
# TODO ** before doing anything, check support for SAR data in Qgis
the_root = original_dom_document
defaultWrite(available_app, the_root)
return [the_root]
def getSmoothing(available_app, original_dom_document):
"""
Split by type (anidif, gaussian, mean)
"""
#import copy
#the_root = copy.deepcopy(original_dom_document)
#remove_dependent_choices(the_root, 'type', 'anidif')
#remove_other_choices(the_root, 'type', 'anidif')
#defaultWrite('%s-anidif' % available_app, the_root)
#the_root = copy.deepcopy(original_dom_document)
#remove_independent_choices(the_root, 'type', 'anidif')
#remove_choice(the_root, 'type', 'anidif')
#defaultWrite(available_app, the_root)
the_root = original_dom_document
split = split_by_choice(the_root, 'type')
the_list = []
for key in split:
defaultWrite('%s-%s' % (available_app, key), split[key])
the_list.append(split[key])
return the_list
#split = split_by_choice(the_root, 'type')
#the_list = []
#for key in split:
# defaultWrite('%s-%s' % (available_app, key), split[key])
# the_list.append(split[key])
#return the_list
def getColorMapping(available_app, original_dom_document):
"""
Remove the option colortolabel
Split by method : custom, continuous, optimal and image and adapt parameters of each resulting app
"""
the_root = original_dom_document
remove_independent_choices(the_root, 'op', 'colortolabel')
remove_choice(the_root, 'op', 'colortolabel')
split = split_by_choice(the_root, 'method')
the_list = []
for key in split:
if key == 'custom':
the_doc = split[key]
remove_parameter_by_key(the_doc, 'method.continuous.lut')
remove_parameter_by_key(the_doc, 'method.continuous.min')
remove_parameter_by_key(the_doc, 'method.continuous.max')
remove_parameter_by_key(the_doc, 'method.optimal.background')
remove_parameter_by_key(the_doc, 'method.image.in')
remove_parameter_by_key(the_doc, 'method.image.low')
remove_parameter_by_key(the_doc, 'method.image.up')
defaultWrite('%s-%s' % (available_app, key), the_doc)
the_list.append(the_doc)
elif key == 'continuous':
the_doc = split[key]
remove_parameter_by_key(the_doc, 'method.custom.lut')
remove_parameter_by_key(the_doc, 'method.optimal.background')
remove_parameter_by_key(the_doc, 'method.image.in')
remove_parameter_by_key(the_doc, 'method.image.low')
remove_parameter_by_key(the_doc, 'method.image.up')
defaultWrite('%s-%s' % (available_app, key), the_doc)
the_list.append(the_doc)
elif key == 'optimal':
the_doc = split[key]
remove_parameter_by_key(the_doc, 'method.custom.lut')
remove_parameter_by_key(the_doc, 'method.continuous.lut')
remove_parameter_by_key(the_doc, 'method.continuous.min')
remove_parameter_by_key(the_doc, 'method.continuous.max')
remove_parameter_by_key(the_doc, 'method.image.in')
remove_parameter_by_key(the_doc, 'method.image.low')
remove_parameter_by_key(the_doc, 'method.image.up')
defaultWrite('%s-%s' % (available_app, key), the_doc)
the_list.append(the_doc)
else:
#key == 'image'
the_doc = split[key]
remove_parameter_by_key(the_doc, 'method.custom.lut')
remove_parameter_by_key(the_doc, 'method.continuous.lut')
remove_parameter_by_key(the_doc, 'method.continuous.min')
remove_parameter_by_key(the_doc, 'method.continuous.max')
remove_parameter_by_key(the_doc, 'method.optimal.background')
defaultWrite('%s-%s' % (available_app, key), the_doc)
the_list.append(split[key])
return the_list
def getFusionOfClassifications(available_app, original_dom_document):
"""
Split by method of fusion of classification (dempstershafer, majorityvoting)
"""
the_root = original_dom_document
split = split_by_choice(the_root, 'method')
the_list = []
for key in split:
defaultWrite('%s-%s' % (available_app, key), split[key])
the_list.append(split[key])
return the_list
def getTrainImagesClassifier(available_app, original_dom_document):
"""
Split by classifier (ann, bayes, boost, dt, gbt, knn, libsvm, rf, svm)
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
deleteGeoidSrtm(the_root)
split = split_by_choice(the_root, 'classifier')
the_list = []
for key in split:
defaultWrite('%s-%s' % (available_app, key), split[key])
the_list.append(split[key])
return the_list
def getLineSegmentDetection(available_app, original_dom_document):
"""
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
remove_parameter_by_key(the_root, 'elev.default')
remove_parameter_by_key(the_root, 'elev.geoid')
remove_parameter_by_key(the_root, 'elev.dem')
defaultWrite(available_app, the_root)
return [the_root]
def getImageEnvelope(available_app, original_dom_document):
"""
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
remove_parameter_by_key(the_root, 'elev.default')
remove_parameter_by_key(the_root, 'elev.geoid')
remove_parameter_by_key(the_root, 'elev.dem')
defaultWrite(available_app, the_root)
return [the_root]
def getReadImageInfo(available_app, original_dom_document):
"""
Remove parameters that are output of the application.
"""
the_root = original_dom_document
remove_parameter_by_key(the_root, 'outkwl')
remove_parameter_by_key(the_root, 'indexx')
remove_parameter_by_key(the_root, 'indexy')
remove_parameter_by_key(the_root, 'sizex')
remove_parameter_by_key(the_root, 'sizey')
remove_parameter_by_key(the_root, 'spacingx')
remove_parameter_by_key(the_root, 'spacingy')
remove_parameter_by_key(the_root, 'originx')
remove_parameter_by_key(the_root, 'originy')
remove_parameter_by_key(the_root, 'estimatedgroundspacingx')
remove_parameter_by_key(the_root, 'estimatedgroundspacingy')
remove_parameter_by_key(the_root, 'numberbands')
remove_parameter_by_key(the_root, 'sensor')
remove_parameter_by_key(the_root, 'id')
remove_parameter_by_key(the_root, 'time')
remove_parameter_by_key(the_root, 'ullat')
remove_parameter_by_key(the_root, 'ullon')
remove_parameter_by_key(the_root, 'urlat')
remove_parameter_by_key(the_root, 'urlon')
remove_parameter_by_key(the_root, 'lrlat')
remove_parameter_by_key(the_root, 'lrlon')
remove_parameter_by_key(the_root, 'lllat')
remove_parameter_by_key(the_root, 'lllon')
remove_parameter_by_key(the_root, 'town')
remove_parameter_by_key(the_root, 'country')
remove_parameter_by_key(the_root, 'rgb.r')
remove_parameter_by_key(the_root, 'rgb.g')
remove_parameter_by_key(the_root, 'rgb.b')
remove_parameter_by_key(the_root, 'projectionref')
remove_parameter_by_key(the_root, 'keyword')
remove_parameter_by_key(the_root, 'gcp.count')
remove_parameter_by_key(the_root, 'gcp.proj')
defaultWrite(available_app, the_root)
return [the_root]
def getComputeModulusAndPhase(available_app, original_dom_document):
"""
Split the application according the field nbinput.
For each of the resulting apps, give a new name.
"""
the_root = original_dom_document
split = split_by_choice(the_root, 'nbinput')
the_list = []
for key in split:
if key == 'one':
the_doc = split[key]
old_app_name = the_doc.find('key').text
the_doc.find('key').text = '%s-%s' % (old_app_name, 'OneEntry')
the_doc.find('longname').text = '%s (%s)' % (old_app_name, 'OneEntry')
defaultWrite('%s-%s' % (available_app, 'OneEntry'), the_doc)
the_list.append(the_doc)
else:
the_doc = split[key]
old_app_name = the_doc.find('key').text
the_doc.find('key').text = '%s-%s' % (old_app_name, 'TwoEntries')
the_doc.find('longname').text = '%s (%s)' % (old_app_name, 'TwoEntries')
defaultWrite('%s-%s' % (available_app, 'TwoEntries'), the_doc)
the_list.append(the_doc)
return the_list
def getCompareImages(available_app, original_dom_document):
"""
Remove mse, mae, psnr as they are output of the algorithm.
"""
the_root = original_dom_document
remove_parameter_by_key(the_root, 'mse')
remove_parameter_by_key(the_root, 'mae')
remove_parameter_by_key(the_root, 'psnr')
defaultWrite(available_app, the_root)
return [the_root]
def getRadiometricIndices(available_app, original_dom_document):
"""
These 3 indices are missing. Remove them from the list.
"""
the_root = original_dom_document
remove_choice(the_root, 'list', 'laindvilog')
remove_choice(the_root, 'list', 'lairefl')
remove_choice(the_root, 'list', 'laindviformo')
defaultWrite(available_app, the_root)
return [the_root]
def getConnectedComponentSegmentation(available_app, original_dom_document):
"""
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
deleteGeoidSrtm(the_root)
defaultWrite(available_app, the_root)
return [the_root]
def getKmzExport(available_app, original_dom_document):
"""
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
deleteGeoidSrtm(the_root)
defaultWrite(available_app, the_root)
return [the_root]
def getSuperimpose(available_app, original_dom_document):
"""
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
deleteGeoidSrtm(the_root)
defaultWrite(available_app, the_root)
return [the_root]
def getStereoFramework(available_app, original_dom_document):
"""
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
deleteGeoidSrtm(the_root)
defaultWrite(available_app, the_root)
return [the_root]
def getRasterization(available_app, original_dom_document):
"""
Let only rasterization with an reference image
Let only mode auto.
Remove all parameters which should be updated once the input file given.
Split by SRS : EPSG, fit to ortho, lambert-wgs84 and UTM.
Each of these SRS have their own parameters modified in this fonction.
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
rasterization_image = original_dom_document
import copy
rasterization_manual = copy.deepcopy(original_dom_document)
old_app_name = rasterization_image.find('key').text
remove_parameter_by_key(rasterization_image, 'szx')
remove_parameter_by_key(rasterization_image, 'szy')
remove_parameter_by_key(rasterization_image, 'epsg')
remove_parameter_by_key(rasterization_image, 'orx')
remove_parameter_by_key(rasterization_image, 'ory')
remove_parameter_by_key(rasterization_image, 'spx')
remove_parameter_by_key(rasterization_image, 'spy')
remove_parameter_by_key(rasterization_manual, 'im')
# set a new name according to the choice
rasterization_image.find('key').text = '%s-%s' % (old_app_name, "image")
rasterization_image.find('longname').text = '%s (%s)' % (old_app_name, "image")
defaultWrite('%s-%s' % (old_app_name, "image"), rasterization_image)
rasterization_manual.find('key').text = '%s-%s' % (old_app_name, "manual")
rasterization_manual.find('longname').text = '%s (%s)' % (old_app_name, "manual")
defaultWrite('%s-%s' % (old_app_name, "manual"), rasterization_manual)
return [rasterization_image, rasterization_manual]
def getVectorDataExtractROI(available_app, original_dom_document):
"""
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
deleteGeoidSrtm(the_root)
defaultWrite(available_app, the_root)
return [the_root]
def getVectorDataReprojection(available_app, original_dom_document):
"""
"""
the_root = original_dom_document
deleteGeoidSrtm(the_root)
return defaultSplit(available_app, the_root, 'out.proj')
def getComputePolylineFeatureFromImage(available_app, original_dom_document):
"""
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
the_root = original_dom_document
deleteGeoidSrtm(the_root)
defaultWrite(available_app, the_root)
return [the_root]
def getDespeckle(available_app, original_dom_document):
"""
"""
the_root = original_dom_document
the_list = defaultSplit(available_app, the_root, 'filter')
return the_list
def deleteGeoidSrtm(doc):
"""
Delete GEOID and DEM parameter as they are not updated at the creation of the otb algorithms when you launch QGIS.
The values are picked from the settings.
"""
t4 = [item for item in doc.findall('.//parameter') if item.find('key').text.endswith("elev.geoid")]
for t5 in t4:
doc.remove(t5)
t4 = [item for item in doc.findall('.//parameter') if item.find('key').text.endswith("elev.dem")]
for t5 in t4:
doc.remove(t5)
| AsgerPetersen/QGIS | python/plugins/processing/algs/otb/maintenance/OTBSpecific_XMLcreation.py | Python | gpl-2.0 | 29,063 | [
"Gaussian"
] | 85c566b95051d29c0b1ffa16559a23f4178d6149e98a24f666c8fc0f1446b624 |
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import numpy as np
import espressomd
from espressomd import thermostat
from espressomd import electrostatics
from samples_common import open
print("""
=======================================================
= debye_hueckel.py =
=======================================================
Program Information:""")
print(espressomd.features())
dev = "cpu"
# Constants
#############################################################
N_A = 6.022e23
pi = 3.14159265359
# System parameters
#############################################################
box_l = 10
# Molar salt concentration
mol_dens = 0.1
# Number density of ions
num_dens = mol_dens * N_A
# Convert to MD units with lj_sig = 7.14 Angstrom
num_dens = num_dens * 3.64e-25
volume = box_l * box_l * box_l
n_part = int(volume * num_dens)
# Interaction parameters (repulsive Lennard Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 1.12246
lj_cap = 20
# Integration parameters
#############################################################
system = espressomd.System(box_l=[box_l]*3)
system.set_random_state_PRNG()
np.random.seed(seed=system.seed)
system.time_step = 0.01
system.cell_system.skin = 0.4
thermostat.Thermostat().set_langevin(1.0, 1.0)
# warmup integration (with capped LJ potential)
warm_steps = 100
warm_n_times = 30
# do the warmup until the particles have at least the distance min_dist
min_dist = 0.9
# integration
int_steps = 1000
int_n_times = 10
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.force_cap = lj_cap
print("LJ-parameters:")
print(system.non_bonded_inter[0, 0].lennard_jones.get_params())
# Particle setup
#############################################################
for i in range(n_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
for i in range(n_part // 2):
system.part[2 * i].q = -1.0
system.part[2 * i].type = 1
system.part[2 * i + 1].q = 1.0
system.part[2 * i + 1].type = 2
# Activating the Debye-Hueckel interaction
# The Coulomb prefactor is set to one. Assuming the solvent is water, this
# means that lj_sig is 0.714 nm in SI units.
coulomb_prefactor =1
# inverse Debye length for 1:1 electrolyte in water at room temperature (nm)
dh_kappa = np.sqrt(mol_dens) / 0.304
# convert to MD units
dh_kappa = dh_kappa / 0.714
dh = electrostatics.DH(
prefactor=coulomb_prefactor, kappa=dh_kappa, r_cut=int(5 / dh_kappa))
system.actors.add(dh)
print(system.actors)
system.analysis.dist_to(0)
print("Simulate {} monovalent salt in a cubic simulation box {} at molar concentration {}."
.format(n_part, box_l, mol_dens).strip())
print("Interactions:\n")
act_min_dist = system.analysis.min_dist()
print("Start with minimal distance {}".format(act_min_dist))
system.cell_system.max_num_cells = 2744
#############################################################
# Warmup Integration #
#############################################################
# open Observable file
obs_file = open("pydebye_hueckel.obs", "w")
obs_file.write("# Time\tE_tot\tE_kin\tE_pot\n")
print("""
Start warmup integration:
At maximum {} times {} steps
Stop if minimal distance is larger than {}
""".strip().format(warm_n_times, warm_steps, min_dist))
# set LJ cap
lj_cap = 20
system.force_cap = lj_cap
print(system.non_bonded_inter[0, 0].lennard_jones)
# Warmup Integration Loop
i = 0
while (i < warm_n_times and act_min_dist < min_dist):
system.integrator.run(steps=warm_steps)
# Warmup criterion
act_min_dist = system.analysis.min_dist()
i += 1
# Increase LJ cap
lj_cap = lj_cap + 10
system.force_cap = lj_cap
import pprint
pprint.pprint(system.cell_system.get_state(), width=1)
# pprint.pprint(system.part.__getstate__(), width=1)
pprint.pprint(system.__getstate__())
# write parameter file
set_file = open("pydebye_hueckel.set", "w")
set_file.write("box_l %s\ntime_step %s\nskin %s\n" %
(box_l, system.time_step, system.cell_system.skin))
#############################################################
# Integration #
#############################################################
print("\nStart integration: run %d times %d steps" % (int_n_times, int_steps))
# remove force capping
lj_cap = 0
system.force_cap = lj_cap
print(system.non_bonded_inter[0, 0].lennard_jones)
# print(initial energies)
energies = system.analysis.energy()
print(energies)
j = 0
for i in range(0, int_n_times):
print("run %d at time=%f " % (i, system.time))
system.integrator.run(steps=int_steps)
energies = system.analysis.energy()
print(energies)
obs_file.write('{ time %s } %s\n' % (system.time, energies))
# write end configuration
end_file = open("pydebye_hueckel.end", "w")
end_file.write("{ time %f } \n { box_l %f }\n" % (system.time, box_l))
end_file.write("{ particles {type q pos} }")
for i in range(n_part - 1):
end_file.write("%s\t%s\t%s\n" %
(system.part[i].type, system.part[i].q, system.part[i].pos))
obs_file.close()
set_file.close()
end_file.close()
# terminate program
print("\nFinished.")
| KonradBreitsprecher/espresso | samples/debye_hueckel.py | Python | gpl-3.0 | 6,305 | [
"ESPResSo"
] | ffe655962b5eab08db8c30c233f0ce5cb3e42e14d53a0a0228fc52644c31f473 |
"""
Viewing MRI data with cut plane and iso surface.
This example downloads an MRI scan, turns it into a 3D numpy array and
visualizes it.
First we extract some internal structures of the brain by defining a
volume of interest around them, and using iso surfaces.
Then we display two cut planes to show the raw MRI data itself.
Finally we display the outer surface, but we restrict it to volume of
interest to leave a cut for the cut planes.
For an example of feature extraction from MRI data using Mayavi and vtk,
see :ref:`example_tvtk_segmentation`.
"""
### Download the data, if not already on disk #################################
import os
if not os.path.exists('MRbrain.tar.gz'):
# Download the data
import urllib
print "Downloading data, Please Wait (7.8MB)"
opener = urllib.urlopen(
'http://graphics.stanford.edu/data/voldata/MRbrain.tar.gz')
open('MRbrain.tar.gz', 'wb').write(opener.read())
# Extract the data
import tarfile
tar_file = tarfile.open('MRbrain.tar.gz')
try:
os.mkdir('mri_data')
except:
pass
tar_file.extractall('mri_data')
tar_file.close()
### Read the data in a numpy 3D array #########################################
import numpy as np
data = np.array([np.fromfile(os.path.join('mri_data', 'MRbrain.%i' % i),
dtype='>u2') for i in range(1, 110)])
data.shape = (109, 256, 256)
data = data.T
# Display the data ############################################################
from mayavi import mlab
mlab.figure(bgcolor=(0, 0, 0), size=(400, 400))
src = mlab.pipeline.scalar_field(data)
# Our data is not equally spaced in all directions:
src.spacing = [1, 1, 1.5]
src.update_image_data = True
# Extract some inner structures: the ventricles and the inter-hemisphere
# fibers. We define a volume of interest (VOI) that restricts the
# iso-surfaces to the inner of the brain. We do this with the ExtractGrid
# filter.
blur = mlab.pipeline.user_defined(src, filter='ImageGaussianSmooth')
voi = mlab.pipeline.extract_grid(blur)
voi.set(x_min=125, x_max=193, y_min=92, y_max=125, z_min=34, z_max=75)
mlab.pipeline.iso_surface(voi, contours=[1610, 2480], colormap='Spectral')
# Add two cut planes to show the raw MRI data. We use a threshold filter
# to remove cut the planes outside the brain.
thr = mlab.pipeline.threshold(src, low=1120)
cut_plane = mlab.pipeline.scalar_cut_plane(thr,
plane_orientation='y_axes',
colormap='black-white',
vmin=1400,
vmax=2600)
cut_plane.implicit_plane.origin = (136, 111.5, 82)
cut_plane.implicit_plane.widget.enabled = False
cut_plane2 = mlab.pipeline.scalar_cut_plane(thr,
plane_orientation='z_axes',
colormap='black-white',
vmin=1400,
vmax=2600)
cut_plane2.implicit_plane.origin = (136, 111.5, 82)
cut_plane2.implicit_plane.widget.enabled = False
# Extract two views of the outside surface. We need to define VOIs in
# order to leave out a cut in the head.
voi2 = mlab.pipeline.extract_grid(src)
voi2.set(y_min=112)
outer = mlab.pipeline.iso_surface(voi2, contours=[1776, ],
color=(0.8, 0.7, 0.6))
voi3 = mlab.pipeline.extract_grid(src)
voi3.set(y_max=112, z_max=53)
outer3 = mlab.pipeline.iso_surface(voi3, contours=[1776, ],
color=(0.8, 0.7, 0.6))
mlab.view(-125, 54, 326, (145.5, 138, 66.5))
mlab.roll(-175)
mlab.show()
import shutil
shutil.rmtree('mri_data')
| liulion/mayavi | examples/mayavi/mlab/mri.py | Python | bsd-3-clause | 3,664 | [
"Mayavi",
"VTK"
] | 68d398e2e06b7b03f7b5f9c1b381c6f8d76dcee0dbe385418eda9510a5d110a4 |
"""
gribTools
======
Import GRIB2 data (must be converted to netCDF) and return a dictionary of variables.
gribImport
gribLevs
"""
import numpy as np
import warnings
warnings.filterwarnings("ignore",category=FutureWarning)
import xarray as xr
from datetime import datetime as dt
def gribImport(gribFile):
"""
Import select variables from a GRIB2 (netCDF) file,
convert to masked arrays, and return these data
within a dictionary.
Parameters
----------
gribFile : string
String specifying the path and filename of the GRIB2
netCDF file.
Returns
-------
gribDict : dict
Dictionary containing all imported GRIB2 data.
"""
gribData = xr.open_dataset(gribFile)
u = gribData.UGRD_P0_L100_GLC0.to_masked_array().squeeze()
v = gribData.VGRD_P0_L100_GLC0.to_masked_array().squeeze()
lat = gribData.gridlat_0.to_masked_array().squeeze()
lon = gribData.gridlon_0.to_masked_array().squeeze()
geoHght = gribData.HGT_P0_L100_GLC0.to_masked_array().squeeze()
presLev = gribData.lv_ISBL0.to_masked_array()/100
time = dt.strptime(gribData.grib_source[8:21],'%Y%m%d_%H%M')
gribDict = {'lon': lon, 'lat': lat, 'geoHght': geoHght, 'presLev': presLev,
'u': u, 'v': v, 'time': time,'allData': gribData}
return gribDict
def gribLevs(levs,geoHght):
"""
Search through the geopotential height variable from model data
to find the indices pertaining to the closest match to the levels
which were given. Geopotential heights for each vertical level will
be given as an average of all heights at that level.
Parameters
----------
levs : array-like
List or 1-D array of levels (km) to be compared against.
geoHght : array
3-D array of geopotential heights (m) to search through.
These are converted to km.
Returns
-------
geoHtIx : int array
1-D array containing vertical level indices which most closely
match the input array of SAMURAI levels to be plotted.
"""
geoHght = geoHght/1000
geoHtAvg = np.empty([geoHght.shape[0]])
for ix in range(0,geoHght.shape[0]):
geoHtAvg[ix] = np.mean(geoHght[ix,:,:])
geoHtIx = np.zeros([len(levs),],dtype=int)
for iL in range(0,len(levs)):
levMatch = min(geoHtAvg, key=lambda x: abs(x - levs[iL]))
geoHtIx[iL] = np.int(np.squeeze(np.where(geoHtAvg == levMatch)))
return geoHtIx | stechma2/samuraiAnalysis | gribTools.py | Python | gpl-3.0 | 2,550 | [
"NetCDF"
] | d737984745f31a87b55b7de68565d3e83fbca4efbc4b02af33ce4f187064de02 |
#!/usr/bin/env python
import sys, traceback
import pysam
import numpy
from scipy.stats import histogram
import matplotlib.pyplot as plt
if len(sys.argv) != 2:
print 'Usage: [bam file]'
sys.exit(1)
samfile = pysam.AlignmentFile(sys.argv[1], 'rb')
info = []
counts = {'sec': 0, 'unmap': 0, 'rev': 0, 'dup': 0, 'qcf': 0}
for read in samfile.fetch():
try:
if read.cigarstring is not None:
match_len = sum([cig_i[1] for cig_i in read.cigartuples if cig_i[0] == 0])
read_len = sum([cig_i[1] for cig_i in read.cigartuples])
else:
match_len, read_len = (None, None)
counts['sec'] += int(read.is_secondary)
counts['unmap'] += int(read.is_unmapped)
counts['rev'] += int(read.is_reverse)
counts['dup'] += int(read.is_duplicate)
counts['qcf'] += int(read.is_qcfail)
info.append((read.mapping_quality, match_len, read_len))
except TypeError as e:
print 'exception {0}'.format(e)
traceback.print_exc(file=sys.stdout)
sys.exit(1)
info = numpy.array(info, dtype=numpy.float)
print 'Counts'
print '\taligned reads= {0}\n\tsecondary= {1:.0f}\n\tunmapped= {2:.0f}\n\tdup= {2:.0f}\n\tqcfail= {2:.0f}'.format(
len(info), counts['sec'], counts['unmap'], counts['dup'], counts['qcf'])
dat = info[:, 0]
dat = dat[~numpy.isnan(dat)]
print 'MapQ stats\n\tmean= {0:.2f} sd= {1:.2f}'.format(numpy.mean(dat), numpy.std(dat))
print '\t50th percentile mapQ= {0}'.format(numpy.percentile(dat, q=50))
#plt.figure(1)
#plt.subplot(211)
#plt.title('Mapping quality distribution')
#plt.hist(dat, bins=20, range=(0, 60))
dat = info[:, 1:3]
dat = dat[~numpy.isnan(dat).any(1)]
rellen = dat[:, 0] / dat[:, 1]
print 'Matching length\n\tmean= {0:.2f} sd= {1:.2f}'.format(numpy.mean(dat[:, 0]), numpy.std(dat[:, 0]))
print 'Fully aligning reads\n\tcount= {0}\n\t50th percentile= {1}'.format(numpy.sum(rellen >= 1.0),
numpy.percentile(dat[:, 0], q=50))
#plt.subplot(212)
#plt.title('Distribution of relative mapped read length')
#plt.hist(rellen, bins=20, range=(0, 1))
#plt.show()
| koadman/proxigenomics | simulation/pipeline/bin/info_sam.py | Python | gpl-2.0 | 2,165 | [
"pysam"
] | 19a7a0eeeac9b3342142a89b4588c30db13a3305ef74afc0183fce8491de4250 |
"""
Tests for discussion pages
"""
from uuid import uuid4
from .helpers import UniqueCourseTest
from ..pages.lms.auto_auth import AutoAuthPage
from ..pages.lms.courseware import CoursewarePage
from ..pages.lms.discussion import (
DiscussionTabSingleThreadPage,
InlineDiscussionPage,
InlineDiscussionThreadPage,
DiscussionUserProfilePage,
DiscussionTabHomePage,
DiscussionSortPreferencePage,
)
from ..fixtures.course import CourseFixture, XBlockFixtureDesc
from ..fixtures.discussion import (
SingleThreadViewFixture,
UserProfileViewFixture,
SearchResultFixture,
Thread,
Response,
Comment,
SearchResult,
)
class DiscussionResponsePaginationTestMixin(object):
"""
A mixin containing tests for response pagination for use by both inline
discussion and the discussion tab
"""
def setup_thread(self, num_responses, **thread_kwargs):
"""
Create a test thread with the given number of responses, passing all
keyword arguments through to the Thread fixture, then invoke
setup_thread_page.
"""
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(id=thread_id, commentable_id=self.discussion_id, **thread_kwargs)
)
for i in range(num_responses):
thread_fixture.addResponse(Response(id=str(i), body=str(i)))
thread_fixture.push()
self.setup_thread_page(thread_id)
def assert_response_display_correct(self, response_total, displayed_responses):
"""
Assert that various aspects of the display of responses are all correct:
* Text indicating total number of responses
* Presence of "Add a response" button
* Number of responses actually displayed
* Presence and text of indicator of how many responses are shown
* Presence and text of button to load more responses
"""
self.assertEqual(
self.thread_page.get_response_total_text(),
str(response_total) + " responses"
)
self.assertEqual(self.thread_page.has_add_response_button(), response_total != 0)
self.assertEqual(self.thread_page.get_num_displayed_responses(), displayed_responses)
self.assertEqual(
self.thread_page.get_shown_responses_text(),
(
None if response_total == 0 else
"Showing all responses" if response_total == displayed_responses else
"Showing first {} responses".format(displayed_responses)
)
)
self.assertEqual(
self.thread_page.get_load_responses_button_text(),
(
None if response_total == displayed_responses else
"Load all responses" if response_total - displayed_responses < 100 else
"Load next 100 responses"
)
)
def test_pagination_no_responses(self):
self.setup_thread(0)
self.assert_response_display_correct(0, 0)
def test_pagination_few_responses(self):
self.setup_thread(5)
self.assert_response_display_correct(5, 5)
def test_pagination_two_response_pages(self):
self.setup_thread(50)
self.assert_response_display_correct(50, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(50, 50)
def test_pagination_exactly_two_response_pages(self):
self.setup_thread(125)
self.assert_response_display_correct(125, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(125, 125)
def test_pagination_three_response_pages(self):
self.setup_thread(150)
self.assert_response_display_correct(150, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 125)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 150)
def test_add_response_button(self):
self.setup_thread(5)
self.assertTrue(self.thread_page.has_add_response_button())
self.thread_page.click_add_response_button()
def test_add_response_button_closed_thread(self):
self.setup_thread(5, closed=True)
self.assertFalse(self.thread_page.has_add_response_button())
class DiscussionTabSingleThreadTest(UniqueCourseTest, DiscussionResponsePaginationTestMixin):
"""
Tests for the discussion page displaying a single thread
"""
def setUp(self):
super(DiscussionTabSingleThreadTest, self).setUp()
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
# Create a course to register for
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def setup_thread_page(self, thread_id):
self.thread_page = DiscussionTabSingleThreadPage(self.browser, self.course_id, thread_id) # pylint:disable=W0201
self.thread_page.visit()
class DiscussionCommentDeletionTest(UniqueCourseTest):
"""
Tests for deleting comments displayed beneath responses in the single thread view.
"""
def setUp(self):
super(DiscussionCommentDeletionTest, self).setUp()
# Create a course to register for
CourseFixture(**self.course_info).install()
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_deletion_test_thread"))
view.addResponse(
Response(id="response1"),
[Comment(id="comment_other_author", user_id="other"), Comment(id="comment_self_author", user_id=self.user_id)])
view.push()
def test_comment_deletion_as_student(self):
self.setup_user()
self.setup_view()
page = DiscussionTabSingleThreadPage(self.browser, self.course_id, "comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
def test_comment_deletion_as_moderator(self):
self.setup_user(roles=['Moderator'])
self.setup_view()
page = DiscussionTabSingleThreadPage(self.browser, self.course_id, "comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
page.delete_comment("comment_other_author")
class DiscussionCommentEditTest(UniqueCourseTest):
"""
Tests for editing comments displayed beneath responses in the single thread view.
"""
def setUp(self):
super(DiscussionCommentEditTest, self).setUp()
# Create a course to register for
CourseFixture(**self.course_info).install()
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_edit_test_thread"))
view.addResponse(
Response(id="response1"),
[Comment(id="comment_other_author", user_id="other"), Comment(id="comment_self_author", user_id=self.user_id)])
view.push()
def edit_comment(self, page, comment_id):
page.start_comment_edit(comment_id)
new_comment = "edited body"
page.set_comment_editor_value(comment_id, new_comment)
page.submit_comment_edit(comment_id, new_comment)
def test_edit_comment_as_student(self):
self.setup_user()
self.setup_view()
page = DiscussionTabSingleThreadPage(self.browser, self.course_id, "comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
def test_edit_comment_as_moderator(self):
self.setup_user(roles=["Moderator"])
self.setup_view()
page = DiscussionTabSingleThreadPage(self.browser, self.course_id, "comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
self.edit_comment(page, "comment_other_author")
def test_cancel_comment_edit(self):
self.setup_user()
self.setup_view()
page = DiscussionTabSingleThreadPage(self.browser, self.course_id, "comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
page.set_comment_editor_value("comment_self_author", "edited body")
page.cancel_comment_edit("comment_self_author", original_body)
def test_editor_visibility(self):
"""Only one editor should be visible at a time within a single response"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = DiscussionTabSingleThreadPage(self.browser, self.course_id, "comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_add_comment_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.set_comment_editor_value("comment_self_author", "edited body")
page.start_comment_edit("comment_other_author")
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_comment_editor_visible("comment_other_author"))
self.assertEqual(page.get_comment_body("comment_self_author"), original_body)
page.start_response_edit("response1")
self.assertFalse(page.is_comment_editor_visible("comment_other_author"))
self.assertTrue(page.is_response_editor_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_response_editor_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.cancel_comment_edit("comment_self_author", original_body)
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
class InlineDiscussionTest(UniqueCourseTest, DiscussionResponsePaginationTestMixin):
"""
Tests for inline discussions
"""
def setUp(self):
super(InlineDiscussionTest, self).setUp()
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
CourseFixture(**self.course_info).add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
metadata={"discussion_id": self.discussion_id}
)
)
)
)
).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.courseware_page.visit()
self.discussion_page = InlineDiscussionPage(self.browser, self.discussion_id)
def setup_thread_page(self, thread_id):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 1)
self.thread_page = InlineDiscussionThreadPage(self.browser, thread_id) # pylint:disable=W0201
self.thread_page.expand()
def test_initial_render(self):
self.assertFalse(self.discussion_page.is_discussion_expanded())
def test_expand_discussion_empty(self):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 0)
def check_anonymous_to_peers(self, is_staff):
thread = Thread(id=uuid4().hex, anonymous_to_peers=True, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.push()
self.setup_thread_page(thread.get("id"))
self.assertEqual(self.thread_page.is_thread_anonymous(), not is_staff)
def test_anonymous_to_peers_threads_as_staff(self):
AutoAuthPage(self.browser, course_id=self.course_id, roles="Administrator").visit()
self.courseware_page.visit()
self.check_anonymous_to_peers(True)
def test_anonymous_to_peers_threads_as_peer(self):
self.check_anonymous_to_peers(False)
class DiscussionUserProfileTest(UniqueCourseTest):
"""
Tests for user profile page in discussion tab.
"""
PAGE_SIZE = 20 # django_comment_client.forum.views.THREADS_PER_PAGE
PROFILED_USERNAME = "profiled-user"
def setUp(self):
super(DiscussionUserProfileTest, self).setUp()
CourseFixture(**self.course_info).install()
# The following line creates a user enrolled in our course, whose
# threads will be viewed, but not the one who will view the page.
# It isn't necessary to log them in, but using the AutoAuthPage
# saves a lot of code.
self.profiled_user_id = AutoAuthPage(
self.browser,
username=self.PROFILED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# now create a second user who will view the profile.
self.user_id = AutoAuthPage(
self.browser,
course_id=self.course_id
).visit().get_user_id()
def check_pages(self, num_threads):
# set up the stub server to return the desired amount of thread results
threads = [Thread(id=uuid4().hex) for _ in range(num_threads)]
UserProfileViewFixture(threads).push()
# navigate to default view (page 1)
page = DiscussionUserProfilePage(
self.browser,
self.course_id,
self.profiled_user_id,
self.PROFILED_USERNAME
)
page.visit()
current_page = 1
total_pages = max(num_threads - 1, 1) / self.PAGE_SIZE + 1
all_pages = range(1, total_pages + 1)
def _check_page():
# ensure the page being displayed as "current" is the expected one
self.assertEqual(page.get_current_page(), current_page)
# ensure the expected threads are being shown in the right order
threads_expected = threads[(current_page - 1) * self.PAGE_SIZE:current_page * self.PAGE_SIZE]
self.assertEqual(page.get_shown_thread_ids(), [t["id"] for t in threads_expected])
# ensure the clickable page numbers are the expected ones
self.assertEqual(page.get_clickable_pages(), [
p for p in all_pages
if p != current_page
and p - 2 <= current_page <= p + 2
or (current_page > 2 and p == 1)
or (current_page < total_pages and p == total_pages)
])
# ensure the previous button is shown, but only if it should be.
# when it is shown, make sure it works.
if current_page > 1:
self.assertTrue(page.is_prev_button_shown(current_page - 1))
page.click_prev_page()
self.assertEqual(page.get_current_page(), current_page - 1)
page.click_next_page()
self.assertEqual(page.get_current_page(), current_page)
else:
self.assertFalse(page.is_prev_button_shown())
# ensure the next button is shown, but only if it should be.
if current_page < total_pages:
self.assertTrue(page.is_next_button_shown(current_page + 1))
else:
self.assertFalse(page.is_next_button_shown())
# click all the way up through each page
for i in range(current_page, total_pages):
_check_page()
if current_page < total_pages:
page.click_on_page(current_page + 1)
current_page += 1
# click all the way back down
for i in range(current_page, 0, -1):
_check_page()
if current_page > 1:
page.click_on_page(current_page - 1)
current_page -= 1
def test_0_threads(self):
self.check_pages(0)
def test_1_thread(self):
self.check_pages(1)
def test_20_threads(self):
self.check_pages(20)
def test_21_threads(self):
self.check_pages(21)
def test_151_threads(self):
self.check_pages(151)
class DiscussionSearchAlertTest(UniqueCourseTest):
"""
Tests for spawning and dismissing alerts related to user search actions and their results.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionSearchAlertTest, self).setUp()
CourseFixture(**self.course_info).install()
# first auto auth call sets up a user that we will search for in some tests
self.searched_user_id = AutoAuthPage(
self.browser,
username=self.SEARCHED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# this auto auth call creates the actual session user
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
def setup_corrected_text(self, text):
SearchResultFixture(SearchResult(corrected_text=text)).push()
def check_search_alert_messages(self, expected):
actual = self.page.get_search_alert_messages()
self.assertTrue(all(map(lambda msg, sub: msg.lower().find(sub.lower()) >= 0, actual, expected)))
def test_no_rewrite(self):
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no threads"])
def test_rewrite_dismiss(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.page.dismiss_alert_message("foo")
self.check_search_alert_messages([])
def test_new_search(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.setup_corrected_text("bar")
self.page.perform_search()
self.check_search_alert_messages(["bar"])
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no threads"])
def test_rewrite_and_user(self):
self.setup_corrected_text("foo")
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["foo", self.SEARCHED_USERNAME])
def test_user_only(self):
self.setup_corrected_text(None)
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["no threads", self.SEARCHED_USERNAME])
# make sure clicking the link leads to the user profile page
UserProfileViewFixture([]).push()
self.page.get_search_alert_links().first.click()
DiscussionUserProfilePage(
self.browser,
self.course_id,
self.searched_user_id,
self.SEARCHED_USERNAME
).wait_for_page()
class DiscussionSortPreferenceTest(UniqueCourseTest):
"""
Tests for the discussion page displaying a single thread.
"""
def setUp(self):
super(DiscussionSortPreferenceTest, self).setUp()
# Create a course to register for.
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.sort_page = DiscussionSortPreferencePage(self.browser, self.course_id)
self.sort_page.visit()
def test_default_sort_preference(self):
"""
Test to check the default sorting preference of user. (Default = date )
"""
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, "date")
def test_change_sort_preference(self):
"""
Test that if user sorting preference is changing properly.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "date"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
def test_last_preference_saved(self):
"""
Test that user last preference is saved.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "date"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
self.sort_page.refresh_page()
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
| LICEF/edx-platform | common/test/acceptance/tests/test_discussion.py | Python | agpl-3.0 | 22,502 | [
"VisIt"
] | 6569fdfa341231db78c3526621ebca9aaa9b44ff2f0ae30ffdec5a50a96ae05a |
# $HeadURL$
"""
Collection of DIRAC useful operating system related modules
by default on Error they return None
"""
__RCSID__ = "$Id$"
from types import StringTypes
import os
import DIRAC
from DIRAC.Core.Utilities.Subprocess import shellCall, systemCall
from DIRAC.Core.Utilities import List
DEBUG = 0
def uniquePath( path = None ):
"""
Utility to squeeze the string containing a PATH-like value to
leave only unique elements preserving the original order
"""
if not StringTypes.__contains__( type( path ) ):
return None
try:
elements = List.uniqueElements( List.fromChar( path, ":" ) )
return ':'.join( elements )
except Exception:
return None
def getDiskSpace( path = '.' ):
""" Get the free disk space in the partition containing the path.
The disk space is reported in MBytes. Returned 0 in case of any
error, e.g. path does not exist
"""
if not os.path.exists( path ):
return -1
comm = 'df -P -m %s | tail -1' % path
resultDF = shellCall( 10, comm )
if resultDF['OK'] and not resultDF['Value'][0]:
output = resultDF['Value'][1]
if output.find( ' /afs' ) >= 0 : # AFS disk space
comm = 'fs lq | tail -1'
resultAFS = shellCall( 10, comm )
if resultAFS['OK'] and not resultAFS['Value'][0]:
output = resultAFS['Value'][1]
fields = output.split()
quota = long( fields[1] )
used = long( fields[2] )
space = ( quota - used ) / 1024
return int( space )
else:
return -1
else:
fields = output.split()
try:
value = int( fields[3] )
except Exception, error:
print "Exception during disk space evaluation:", str( error )
value = -1
return value
else:
return -1
def getDirectorySize( path ):
""" Get the total size of the given directory in MB
"""
comm = "du -s -m %s" % path
result = shellCall( 10, comm )
if not result['OK'] or result['Value'][0] != 0:
return 0
else:
output = result['Value'][1]
print output
size = int( output.split()[0] )
return size
def sourceEnv( timeout, cmdTuple, inputEnv = None ):
""" Function to source configuration files in a platform dependent way and get
back the environment
"""
# add appropriate extension to first element of the tuple (the command)
envAsDict = '&& python -c "import os,sys ; print >> sys.stderr, os.environ"'
# 1.- Choose the right version of the configuration file
if DIRAC.platformTuple[0] == 'Windows':
cmdTuple[0] += '.bat'
else:
cmdTuple[0] += '.sh'
# 2.- Check that it exists
if not os.path.exists( cmdTuple[0] ):
result = DIRAC.S_ERROR( 'Missing script: %s' % cmdTuple[0] )
result['stdout'] = ''
result['stderr'] = 'Missing script: %s' % cmdTuple[0]
return result
# Source it in a platform dependent way:
# On windows the execution makes the environment to be inherit
# On Linux or Darwin use bash and source the file.
if DIRAC.platformTuple[0] == 'Windows':
# this needs to be tested
cmd = ' '.join( cmdTuple ) + envAsDict
ret = shellCall( timeout, [ cmd ], env = inputEnv )
else:
cmdTuple.insert( 0, 'source' )
cmd = ' '.join( cmdTuple ) + envAsDict
ret = systemCall( timeout, [ '/bin/bash', '-c', cmd ], env = inputEnv )
# 3.- Now get back the result
stdout = ''
stderr = ''
result = DIRAC.S_OK()
if ret['OK']:
# The Command has not timeout, retrieve stdout and stderr
stdout = ret['Value'][1]
stderr = ret['Value'][2]
if ret['Value'][0] == 0:
# execution was OK
try:
result['outputEnv'] = eval( stderr.split( '\n' )[-2] + '\n' )
stderr = '\n'.join( stderr.split( '\n' )[:-2] )
except Exception:
stdout = cmd + '\n' + stdout
result = DIRAC.S_ERROR( 'Could not parse Environment dictionary from stderr' )
else:
# execution error
stdout = cmd + '\n' + stdout
result = DIRAC.S_ERROR( 'Execution returns %s' % ret['Value'][0] )
else:
# Timeout
stdout = cmd
stderr = ret['Message']
result = DIRAC.S_ERROR( stderr )
# 4.- Put stdout and stderr in result structure
result['stdout'] = stdout
result['stderr'] = stderr
return result
#FIXME: this is not used !
def unifyLdLibraryPath( path, newpath ):
""" for Linux and MacOS link all the files in the path in a single directory
newpath. For that we go along the path in a reverse order and link all files
from the path, the latest appearance of a file will take precedence
"""
if not DIRAC.platformTuple[0] == 'Windows':
if os.path.exists( newpath ):
if not os.path.isdir( newpath ):
try:
os.remove( newpath )
except Exception:
return path
else:
try:
os.makedirs( newpath )
except Exception:
return path
pathList = path.split( ':' )
for dummy in pathList[:]:
ldDir = pathList.pop()
if not os.path.isdir( ldDir ):
continue
ldLibs = os.listdir( ldDir )
for lib in ldLibs:
newF = os.path.join( newpath, lib )
ldF = os.path.join( ldDir, lib )
# 1. Check if the file exist (broken links will return False)
if os.path.isfile( ldF ):
ldF = os.path.realpath( ldF )
# 2. Check if the link is present already
if os.path.exists( newF ):
# 3. Check is the point to the same file
if os.path.samefile( newF, ldF ):
continue
else:
os.remove( newF )
# 4. Create the link
os.symlink( ldF, newF )
return newpath
else:
# Windows does nothing for the moment
return path
def which( program ):
""" Utility that mimics the 'which' command from the shell
"""
def is_exe( fpath ):
return os.path.isfile( fpath ) and os.access( fpath, os.X_OK )
fpath, _fname = os.path.split( program )
if fpath:
if is_exe( program ):
return program
else:
for path in os.environ["PATH"].split( os.pathsep ):
path = path.strip( '"' )
exe_file = os.path.join( path, program )
if is_exe( exe_file ):
return exe_file
return None
| avedaee/DIRAC | Core/Utilities/Os.py | Python | gpl-3.0 | 6,236 | [
"DIRAC"
] | a6d8df382e5a8d38c09d5fda419d89a624d522e4fe02b9ed704758f92e4d7a79 |
"""Helper functions common to all mission tsaks"""
import itertools
from math import radians, sqrt, cos, sin, tan, atan, degrees, atan2
import numpy
import scipy.stats
import shm
from mission.framework.helpers import get_sub_position
# Function returns a list where elements are sorted by their distance from element t in list x
# Used for pipe finding logic
def priority_list_gen(x, t):
i = x.index(t)
l1 = x[0:i]
l1.reverse()
l2 = x[i + 1:]
def pr(a, b):
if not a:
return b
else:
return [a[0]] + pr(b, a[1:])
return [t] + pr(l1, l2)
# Used for determining offsets while approaching a target and holding it in a certain spot in the camera view.
# Returns x and y offsets which scale based on distance from the target, keeping a roughly constant real world offset
# between the axis of the camera and of the target.
#
# final_offsets are the desired offsets (x,y) in pixels when current_area == final_area.
# Will not return offsets larger than final_offsets.
def scaled_offsets(vehicle, final_area, final_offsets, current_area, forward=True):
if (final_area == 0) or (current_area == 0) or (final_offsets == (0, 0)):
return 0, 0
screen_width = 0
screen_height = 0
if forward:
screen_width = vehicle.sensors.forward_width
screen_height = vehicle.sensors.forward_height
else:
screen_width = vehicle.sensors.downward_width
screen_height = vehicle.sensors.downward_height
screen_area = screen_width * screen_height
if screen_area is 0:
return 0, 0
# View angle from center of screen to edge of screen.
max_angle_x = 0
max_angle_y = 0
if forward:
max_angle_x = 35
max_angle_y = 26.25
else:
max_angle_x = 27.5
max_angle_y = 20.625
final_x, final_y = final_offsets
# Calculates the constant 'real' offsets and area based off of the given final conditions.
#
# All 'real' values are in arbitrary units of distance and area based off the distance variable (below) being
# equal to 1 when current_area = final_area.
real_x = tan(radians(2 * max_angle_x * (float(final_x) / float(screen_width))))
real_y = tan(radians(2 * max_angle_y * (float(final_y) / float(screen_height))))
real_area = (screen_area / final_area) / (4 * tan(radians(max_angle_x)) * tan(radians(max_angle_y)))
# Distance is 1 when (current_area = final_area). Distance decreases as current_area approaches screen_area (as
# we get closer).
distance = sqrt(
(screen_area / current_area) / (real_area * (4 * tan(radians(max_angle_x)) * tan(radians(max_angle_y)))))
# Subfunction to calculate an individual offset.
def get_offset(screen_height_or_width, real_offset, distance, max_angle, final_offset):
offset = ((.5 * screen_height_or_width) * degrees(atan2(real_offset, distance)) / max_angle)
# Do not return an offset larger than the final offset.
if abs(offset) > abs(final_offset):
return final_offset
return offset
# Returns the x and y offsets.
x_offset = get_offset(screen_width, real_x, distance, max_angle_x, final_x)
y_offset = get_offset(screen_height, real_y, distance, max_angle_y, final_y)
offsets = (x_offset, y_offset)
return offsets
# Will return a speed between min and max speeds based on value. Can also be used for other variables besides speed.
def scaled_speed(final_value=0, initial_value=0, final_speed=0, initial_speed=0, current_value=0):
# Check to make sure reasonable values were passed in.
if initial_value > final_value:
temp = initial_value
initial_value = final_value
final_value = temp
temp = initial_speed
initial_speed = final_speed
final_speed = temp
output_speed = 0
if current_value >= final_value:
# If we are past final value, go at final speed.
output_speed = final_speed
elif current_value <= initial_value:
# If we are below initial value, go at initial speed.
output_speed = initial_speed
else:
# Find the fraction of the value range which we've covered
value_range = final_value - initial_value + 0.0
value_fraction = ((current_value - initial_value + 0.0) / (value_range * 1.0))
# Then multiply that fraction by the speed range to get the speed difference to add to the minimum speed.
speed_range = final_speed - initial_speed + 0.0
output_speed = (speed_range * value_fraction) + initial_speed + 0.0
return output_speed
def downward_position(camera_position=(0, 0)):
"""
distance = (objsize) * (camsize) / (image size * tan(theta)
very similar to distance_from_size
"""
# TODO: add pitch and roll compensation
pos = get_sub_position()[:2]
altitude = shm.dvl.savg_altitude.get()
heading_rad = radians(shm.kalman.heading.get())
heading_v = numpy.array((cos(heading_rad), sin(heading_rad)))
heading_yv = numpy.array((-heading_v[1], heading_v[0]))
screen_width = shm.camera.downward_width.get()
screen_height = shm.camera.downward_height.get()
screen_area = screen_width * screen_height
if screen_area == 0:
return numpy.array((0, 0))
from locator.camera import compute_fov_from_lens_params, DOWNWARD_LENS_PARAMS
h_fov, v_fov = compute_fov_from_lens_params(*DOWNWARD_LENS_PARAMS)
max_angle_x = 2 * tan(h_fov / 2)
max_angle_y = 2 * tan(v_fov / 2)
camera_x, camera_y = camera_position
camera_x -= .5 * screen_width
camera_y -= .5 * screen_height
x_scale = (camera_x / screen_width) * max_angle_x
y_scale = (camera_y / screen_height) * max_angle_y
x_distance = -1 * altitude * y_scale
y_distance = altitude * x_scale
return pos + (x_distance * heading_v) + (y_distance * heading_yv)
def distance_from_size(camera_size=0, field_of_view=0, object_size=0, image_size=0):
""" Determines distance in real world units (meters) from size in pixels.
Can be used for both height and width field of view is in degrees camera_height and image_size are in pixels
object_height is in any real world length unit returns: distance to object in the real world length unit
"""
return (object_size * camera_size) / \
(image_size * 2 * tan(radians(field_of_view / 2.0)))
# Uses downward_position because something is wrong :(
# But should be revisited perhaps after this summer (2013)
# to take into account pitch and roll
def screen_point_to_world_point(x, y):
"""
Converts a point (x,y) in the downward camera to a point (north,east) on the ground.
Takes into account altitude and attitude (heading, pitch, roll)
"""
# Measured by inspection of bins
pitch_fov = radians(41.)
roll_fov = radians(32.5)
screen_height = shm.camera.downward_height.get()
screen_width = shm.camera.downward_width.get()
# Centers the coordinates, flips y
x = (x - screen_width / 2.)
y = (screen_height / 2. - y)
# Angle from sub to the point
# (opp/adj)/(opp/adj) =
# tan(angle)/tan(fov) = x/(half screen size)
# so angle = atan( x*tan(fov)*2/screen size)
eff_pitch = shm.kalman.pitch.get() + degrees(atan(y * tan(pitch_fov) * 2 / screen_height))
eff_roll = -shm.kalman.roll.get() + degrees(atan(x * tan(roll_fov) * 2 / screen_width))
# Calculate displacements in vehicle frame
h = shm.dvl.savg_altitude.get()
forward = tan(radians(eff_pitch)) * h
sway = tan(radians(eff_roll)) * h
heading = shm.kalman.heading.get()
n = shm.kalman.north.get() + forward * cos(heading) - sway * sin(heading)
e = shm.kalman.east.get() + forward * sin(heading) + sway * cos(heading)
return numpy.array((n, e))
def cluster(point_list, std, chance_correct):
"""
Given a list of points (x,y), the standard deviation of the point position measurements,
and the chance that the point is correctly identified as part of the group,
this returns the expectation point of where the object is
This is useful for the following situation:
We are getting points, say from vision, that are either approximately the correct point or are something being
mis-identified.
We don't want to simply take the average of the points, since that would include the 'bad' points along with the
good. This gives a way to average only the good points, assuming that the bad points are infrequent enough.
This was originally written for SlagathorIV, the bins 2013 task, but was eventually replaced
with it's 'binning' approach.
"""
# Probability time!
# We assume points are either 'correct' or 'wrong'
# if they're correct then they're drawn from a Gaussian distribution
# with standard deviation std
# We try each possible combination of 'correct' points and take the
# 'true position' to be the average of correct points
# then test how likely it is to get that collection about this true point
best_probability = 0
best = (0, 0)
# N is total number of points, K is number of 'correct' points
N = len(point_list)
for K in range(N, 0, -1):
# Chance of getting this many 'correct' points
p_H = chance_correct ** K * (1 - chance_correct) ** (N - K)
# Short-circuit if we're doing better than we can ever
# possibly do with fewer 'correct' points
if best_probability > p_H:
break
# try all subsets of possible 'correct' points
for subset in itertools.combinations(point_list, K):
# std of average is std/sqrt(k), so
std_total = sqrt((1. + 1. / K)) * std
avg_n = float(sum([n for n, e in subset])) / len(subset)
avg_e = float(sum([e for n, e in subset])) / len(subset)
dists = [sqrt((n - avg_n) ** 2 + (e - avg_e) ** 2) for n, e in subset]
z_scores = [d / std_total for d in dists]
ps = [2 * scipy.stats.norm.sf(z) for z in z_scores]
p_E_H = numpy.product(ps)
p_H_E = p_H * p_E_H
if p_H_E > best_probability:
best_probability = p_H_E
best = (avg_n, avg_e)
return best
# must be used with continuous data streams
class ProgressiveMedianFilterer:
def __init__(self, n=10, t=0.2):
self.n, self.t, self.vals = n, t, []
def call(self, val):
res = val
if len(self.vals) < self.n:
self.vals.append(res)
return res
else:
dev = abs((res / numpy.mean(self.vals)) - 1)
if dev < self.t:
self.vals.pop()
self.vals.insert(0, res)
return res
else:
return self.vals[self.n - 1]
def filtered(func):
f = ProgressiveMedianFilterer()
def wrapper(*args, **kwargs):
return f.call(func(*args, **kwargs))
return wrapper
| cuauv/software | mission/helpers.py | Python | bsd-3-clause | 10,992 | [
"Gaussian"
] | 5eb2834f67947390566a556fc281e59a17ae9fd1b096dd07d40b3d3db00f36e5 |
"""
This module defines a classs for a generic Workflow Parameter. It also defines
a ParameterCollection class as a list of parameters as well as an AttributeCollection
class which is the base class for the main Workflow classes.
"""
from DIRAC.Core.Workflow.Utility import *
__RCSID__ = "$Id$"
# unbound method, returns indentated string
def indent( indent = 0 ):
return indent * 2 * ' '
class Parameter( object ):
def __init__( self, name = None, value = None, type = None, linked_module = None,
linked_parameter = None, typein = None, typeout = None, description = None, parameter = None ):
# the priority to assign values
# if parameter exists all values taken from there
# and then owerriten by values taken from the arguments
if isinstance( parameter, Parameter ):
self.name = parameter.name
self.type = parameter.type
self.value = parameter.value
self.description = parameter.description
self.linked_module = parameter.linked_module
self.linked_parameter = parameter.linked_parameter
self.typein = bool( parameter.typein )
self.typeout = bool( parameter.typeout )
else:
# default values
self.name = ""
self.type = "string"
self.value = ""
self.description = ""
self.linked_module = ""
self.linked_parameter = ""
self.typein = False
self.typeout = False
if name != None:
self.name = name
if type != None:
self.type = type
if value != None:
self.setValue( value )
if description != None:
self.description = description
if linked_module != None:
self.linked_module = linked_module
if linked_parameter != None:
self.linked_parameter = linked_parameter
if typein != None:
self.setInput( typein )
if typeout != None:
self.setOutput( typeout )
def getName( self ):
return self.name
def setName( self, n ):
self.name = n # if collection=None it still will work fine
def getValue( self ):
return self.value
def getValueTypeCorrected( self ):
# this method used to generate code for the workflow
# it NOT used to geterate XML!!!
if self.isTypeString():
return '"""' + str( self.value ).replace( '"', r'\"' ).replace( "'", r"\'" ) + '"""'
return self.value
def setValue( self, value, type_ = None ):
if type_ != None:
self.setType( type_ )
self.setValueByType( value )
def setValueByType( self, value ):
type = self.type.lower() # change the register
if self.isTypeString():
self.value = str( value )
elif type == 'float':
self.value = float( value )
elif type == 'int':
self.value = int( value )
elif type == 'bool':
self.value = bool( value )
else:
#raise TypeError('Can not assing value '+value+' of unknown type '+ self.type + ' to the Parameter '+ str(self.name))
#print 'WARNING: we do not have established conversion algorithm to assing value ',value,' of unknown type ',self.type, ' to the Parameter ', str(self.name)
self.value = value
def getType( self ):
return self.type
def setType( self, type_ ):
self.type = type_
def isTypeString( self ):
"""returns True if type is the string kind"""
type = self.type.lower() # change the register
if type == 'string' or type == 'jdl' or \
type == 'option' or type == 'parameter' or \
type == 'jdlreqt':
return True
return False
def getDescription( self ):
return self.description
def setDescription( self, descr ):
self.description = descr
def link( self, module, parameter ):
self.linked_module = module
self.linked_parameter = parameter
def unlink( self ):
self.linked_module = ""
self.linked_parameter = ""
def getLinkedModule( self ):
return self.linked_module
def getLinkedParameter( self ):
return self.linked_parameter
def getLink( self ):
# we have 4 possibilities
# two fields can be filled independently
# it is possible to fill one field with the valid information
# spaces shall be ignored ( using strip() function)
if ( self.linked_module == None ) or ( self.linked_module.strip() == '' ):
if ( self.linked_parameter == None ) or ( self.linked_parameter.strip() == '' ):
# both empty
return ""
else:
# parameter filled
return self.linked_parameter
else:
if ( self.linked_parameter == None ) or ( self.linked_parameter.strip() == '' ):
return self.linked_module
return self.linked_module + '.' + self.linked_parameter
def isLinked( self ):
if ( self.linked_module == None ) or ( self.linked_module.strip() == '' ):
if ( self.linked_parameter == None ) or ( self.linked_parameter.strip() == '' ):
return False
return True
def preExecute( self ):
""" method to request watever parameter need to be defined before calling execute method
returns TRUE if it needs to be done, FALSE otherwise
PS: parameters with the output status only going to be left out"""
return ( not self.isOutput() ) or self.isInput()
def isInput( self ):
return self.typein
def isOutput( self ):
return self.typeout
def setInput( self, i ):
if isinstance( i, str ) or isinstance( i, unicode ):
self.typein = self.__setBooleanFromString( i )
else:
self.typein = bool( i )
def setOutput( self, i ):
if isinstance( i, str ) or isinstance( i, unicode ):
self.typeout = self.__setBooleanFromString( i )
else:
self.typeout = bool( i )
def __setBooleanFromString( self, i ):
if i.upper() == "TRUE":
return True
else:
return False
def __str__( self ):
return str( type( self ) ) + ": name=" + self.name + " value=" + str( self.getValueTypeCorrected() ) + " type=" + str( self.type )\
+ " linked_module=" + str( self.linked_module ) + " linked_parameter=" + str( self.linked_parameter )\
+ " in=" + str( self.typein ) + " out=" + str( self.typeout )\
+ " description=" + str( self.description )
def toXML( self ):
return '<Parameter name="' + self.name + '" type="' + str( self.type )\
+ '" linked_module="' + str( self.linked_module ) + '" linked_parameter="' + str( self.linked_parameter )\
+ '" in="' + str( self.typein ) + '" out="' + str( self.typeout )\
+ '" description="' + str( self.description ) + '">'\
+ '<value><![CDATA[' + str( self.getValue() ) + ']]></value>'\
+ '</Parameter>\n'
# we got a problem with the index() function
# def __eq__(self, s):
def compare( self, s ):
if isinstance( s, Parameter ):
return ( self.name == s.name ) and \
( self.value == s.value ) and \
( self.type == s.type ) and \
( self.linked_module == s.linked_module ) and \
( self.linked_parameter == s.linked_parameter ) and \
( self.typein == s.typein ) and \
( self.typeout == s.typeout ) and \
( self.description == s.description )
else:
return False
#
# def __deepcopy__(self, memo):
# return Parameter(parameter=self)
#
# def __copy__(self):
# return self.__deepcopy__({})
def copy( self, parameter ):
if isinstance( parameter, Parameter ):
self.name = parameter.name
self.value = parameter.value
self.type = parameter.type
self.description = parameter.description
self.linked_module = parameter.linked_module
self.linked_parameter = parameter.linked_parameter
self.typein = parameter.typein
self.typeout = parameter.typeout
else:
raise TypeError( 'Can not make a copy of object ' + str( type( self ) ) + ' from the ' + str( type( parameter ) ) )
def createParameterCode( self, ind = 0, instance_name = None ):
if ( instance_name == None ) or ( instance_name == '' ):
ret = indent( ind ) + self.getName() + ' = ' + self.getValueTypeCorrected()
else:
if self.isLinked():
ret = indent( ind ) + instance_name + '.' + self.getName() + ' = ' + self.getLink()
else:
ret = indent( ind ) + instance_name + '.' + self.getName() + ' = ' + str( self.getValueTypeCorrected() )
return ret + ' # type=' + self.getType() + ' in=' + str( self.isInput() ) + ' out=' + str( self.isOutput() ) + ' ' + self.getDescription() + '\n'
class ParameterCollection( list ):
""" Parameter collection class representing a list of Parameters
"""
def __init__( self, coll = None ):
list.__init__( self )
if isinstance( coll, ParameterCollection ):
# makes a deep copy of the parameters
for v in coll:
self.append( Parameter( parameter = v ) )
elif coll != None:
raise TypeError( 'Can not create object type ' + str( type( self ) ) + ' from the ' + str( type( coll ) ) )
def appendOrOverwrite( self, opt ):
index = self.findIndex( opt.getName() )
if index > -1:
#print "Warning: Overriting Parameter %s = \"%s\" with the value \"%s\""%(self[index].getName(), self[index].getValue(), opt.getValue())
self[index] = opt
else:
list.append( self, opt )
def append( self, opt ):
if isinstance( opt, ParameterCollection ):
for p in opt:
self.appendOrOverwrite( p )
elif isinstance( opt, Parameter ):
self.appendOrOverwrite( opt )
return opt
else:
raise TypeError( 'Can not append object type ' + str( type( opt ) ) + ' to the ' + str( type( self ) ) + '. Parameter type appendable only' )
def appendCopy( self, opt, prefix = "", postfix = "" ):
if isinstance( opt, ParameterCollection ):
for p in opt:
self.appendOrOverwrite( Parameter( name = prefix + p.getName() + postfix, parameter = p ) )
elif isinstance( opt, Parameter ):
self.appendOrOverwrite( Parameter( name = prefix + opt.getName() + postfix, parameter = opt ) )
else:
raise TypeError( 'Can not append object type ' + str( type( opt ) ) + ' to the ' + str( type( self ) ) + '. Parameter type appendable only' )
def appendCopyLinked( self, opt, prefix = "", postfix = "" ):
if isinstance( opt, ParameterCollection ):
for p in opt:
if p.isLinked():
self.appendOrOverwrite( Parameter( name = prefix + p.getName() + postfix, parameter = p ) )
elif isinstance( opt, Parameter ):
if opt.isLinked():
self.appendOrOverwrite( Parameter( name = prefix + opt.getName() + postfix, parameter = opt ) )
else:
raise TypeError( 'Can not append object type ' + str( type( opt ) ) + ' to the ' + str( type( self ) ) + '. Parameter type appendable only' )
def setValue( self, name, value, vtype = None ):
""" Method finds parameter with the name "name" and if exists its set value
Returns True if sucsessfull
"""
par = self.find( name )
if par == None:
print "ERROR ParameterCollection.setValue() can not find parameter with the name=%s to set Value=%s" % ( name, value )
return False
else:
par.setValue( value, vtype )
return True
def getInput( self ):
""" Get input linked parameters
"""
return self.get( input = True )
def getOutput( self ):
""" Get output linked parameters
"""
return self.get( output = True )
def getLinked( self ):
""" Get linked parameters
"""
return self.get( input = True, output = True )
def get( self, input = False, output = False ):
""" Get a copy of parameters. If input or output is True, get corresponding
io type parameters only. Otherwise, get all the parameters
"""
all = not input and not output
params = ParameterCollection()
for p in self:
OK = False
if all:
OK = True
elif input and p.isInput():
OK = True
elif output and p.isOutput():
OK = True
if OK:
params.append( Parameter( parameter = p ) )
return params
def setLink( self, name, module_name, parameter_name ):
""" Method finds parameter with the name "name" and if exists its set value
Returns True if sucsessfull
"""
par = self.find( name )
if par == None:
print "ERROR ParameterCollection.setLink() can not find parameter with the name=%s to link it with %s.%s" % ( name, module_name, parameter_name )
return False
else:
par.link( module_name, parameter_name )
return True
def linkUp( self, opt, prefix = "", postfix = "", objname = "self" ):
""" This is a GROUP method operates on the 'obj' parameters using only parameters listed in 'opt' list
Method will link self.parameters with the outer object (self) perameters using prefix and postfix
for example if we want to link module instance with the step or step instance with the workflow
opt - ParameterCollection or sigle Parameter (WARNING!! used as reference to get a names!!! opt is not changing!!!)
opt ALSO can be a list of string with the names of parameters to link
objname - name of the object to connect with, usually 'self'
"""
if isinstance( opt, ParameterCollection ):
# if parameter in the list opt is not present in the self
# we are going to ignore this
for p in opt:
par = self.find( p.getName() )
if par == None:
print "WARNING ParameterCollection.linkUp can not find parameter with the name=", p.getName(), " IGNORING"
else:
par.link( objname, prefix + p.getName() + postfix )
elif isinstance( opt, Parameter ):
self.setLink( opt.getName(), objname, prefix + opt.getName() + postfix )
elif isinstance( opt, list ) and isinstance( opt[0], str ):
for s in opt:
par = self.find( s )
if par == None:
print "ERROR ParameterCollection.linkUp() can not find parameter with the name=%s" % ( s )
else:
par.link( objname, prefix + p.getName() + postfix )
elif isinstance( opt, str ):
par = self.find( opt )
if par == None:
print "ERROR ParameterCollection.linkUp() can not find parameter with the name=%s" % ( par )
else:
par.link( objname, prefix + par.getName() + postfix )
else:
raise TypeError( 'Can not link object type ' + str( type( opt ) ) + ' to the ' + str( type( self ) ) + '.' )
def unlink( self, opt ):
""" This is a GROUP method operates on the 'obj' parameters using only parameters listed in 'opt' list
Method will unlink some self.parameters
opt - ParameterCollection or sigle Parameter (WARNING!! used as reference to get a names!!! opt is not changing!!!)
opt ALSO can be a list of string with the names of parameters to link
objname - name of the object to connect with, usually 'self'
"""
if isinstance( opt, ParameterCollection ):
# if parameter in the list opt is not present in the self
# we are going to ignore this
for p in opt:
par = self.find( p.getName() )
if par == None:
print "WARNING ParameterCollection.linkUp can not find parameter with the name=", p.getName(), " IGNORING"
else:
par.unlink()
elif isinstance( opt, Parameter ):
opt.unlink()
elif isinstance( opt, list ) and isinstance( opt[0], str ):
for s in opt:
par = self.find( s )
if par == None:
print "ERROR ParameterCollection.unlink() can not find parameter with the name=%s" % ( s )
else:
par.unlink()
elif isinstance( opt, str ):
par = self.find( opt )
if par == None:
print "ERROR ParameterCollection.unlink() can not find parameter with the name=%s" % ( s )
else:
par.unlink()
else:
raise TypeError( 'Can not unlink object type ' + str( type( opt ) ) + ' to the ' + str( type( self ) ) + '.' )
def removeAllParameters( self ):
self[:] = []
def remove( self, name_or_ind ):
""" Removes a parameter given its name, or the index (the latter is not suggested), and only if it exists
If there are 2 parameters with the same name, only the first will be removed
"""
if isinstance( name_or_ind, list ) and isinstance( name_or_ind[0], str ):
for s in name_or_ind:
par = self.find( s )
if par == None:
print "ERROR ParameterCollection.remove() can not find parameter with the name=%s" % ( s )
else:
index = self.findIndex( s )
if index > -1:
del self[index]
elif isinstance( name_or_ind, str ): # we give a name
index = self.findIndex( name_or_ind )
elif isinstance( name_or_ind, int ): # we give the index
index = name_or_ind
if index > -1:
del self[index]
def find( self, name_or_ind ):
""" Method to find Parameters
Return: Parameter """
# work for index as well as for the string
if isinstance( name_or_ind, str ): # we given name
for v in self:
if v.getName() == name_or_ind:
return v
return None
elif isinstance( name_or_ind, int ) or isinstance( name_or_ind, long ): # we given index
return self[name_or_ind]
return self[int( name_or_ind )]
def findLinked( self, name_or_ind, linked_status = True ):
""" Method to find Parameters
if linked_status is True it returns only linked Var from the list
if linked_status is False it returns only NOTlinked Var from the list
Return: Parameter """
v = self.find( name_or_ind )
if ( v != None ) and ( v.isLinked() != linked_status ):
return None
return v
def findIndex( self, name ):
i = 0
for v in self:
if v.getName() == name:
return i
i = i + 1
return - 1
def getParametersNames( self ):
l = []
for v in self:
l.append( v.getName() )
return l
def compare( self, s ):
# we comparing parameters only, the attributes will be compared in hierarhy above
# we ignore the position of the Parameter in the list
# we assume that names of the Parameters are DIFFERENT otherwise we have to change alghorithm!!!
if ( not isinstance( s, ParameterCollection ) ) or ( len( s ) != len( self ) ):
return False
for v in self:
for i in s:
if v.getName() == i.getName():
if not v.compare( i ):
return False
else:
break
else:
#if we reached this place naturally we can not find matching name
return False
return True
def __str__( self ):
ret = str( type( self ) ) + ':\n'
for v in self:
ret = ret + str( v ) + '\n'
return ret
def toXML( self ):
ret = ""
for v in self:
ret = ret + v.toXML()
return ret
def createParametersCode( self, indent = 0, instance_name = None ):
str = ''
for v in self:
if v.preExecute():
str = str + v.createParameterCode( indent, instance_name )
return str
def resolveGlobalVars( self, wf_parameters = None, step_parameters = None ):
"""This function resolves global parameters of type @{value} within the ParameterCollection
"""
recurrency_max = 12
for v in self:
recurrency = 0
skip_list = []
substitute_vars = getSubstitute( v.value )
while True:
for substitute_var in substitute_vars:
# looking in the current scope
v_other = self.find( substitute_var )
# looking in the scope of step instance
if v_other == None and step_parameters != None :
v_other = step_parameters.findLinked( substitute_var, False )
# looking in the scope of workflow
if v_other == None and wf_parameters != None :
v_other = wf_parameters.findLinked( substitute_var, False )
# finaly the action itself
if v_other != None and not v_other.isLinked():
v.value = substitute( v.value, substitute_var, v_other.value )
elif v_other != None:
print "Leaving %s variable for dynamic resolution" % substitute_var
skip_list.append( substitute_var )
else: # if nothing helped tough!
print "Can not resolve ", substitute_var, str( v )
recurrency += 1
if recurrency > recurrency_max:
# must be an exception
print "ERROR! reached maximum recurrency level", recurrency, "within the parameter ", str( v )
if step_parameters == None:
if wf_parameters == None:
print "on the level of Workflow"
else:
print "on the level of Step"
else:
if wf_parameters != None:
print "on the level of Module"
break
else:
substitute_vars = getSubstitute( v.value, skip_list )
if not substitute_vars:
break
class AttributeCollection( dict ):
""" Attribute Collection class contains Parameter Collection as a data member
"""
def __init__( self ):
dict.__init__( self )
self.parameters = None
self.parent = None
def __str__( self ):
ret = ''
for v in self.keys():
ret = ret + v + ' = ' + str( self[v] ) + '\n'
return ret
def toXMLString( self ):
return self.toXML()
def toXMLFile( self, filename ):
with open( filename, 'w+' ) as f:
sarray = self.toXML()
for element in sarray:
f.write( element )
return
def toXML( self ):
ret = ""
for v in self.keys():
if v == 'parent':
continue # doing nothing
elif v == 'body' or v == 'description':
ret = ret + '<' + v + '><![CDATA[' + str( self[v] ) + ']]></' + v + '>\n'
else:
ret = ret + '<' + v + '>' + str( self[v] ) + '</' + v + '>\n'
return ret
def addParameter( self, opt, prefix = "", postfix = "" ):
self.parameters.appendCopy( opt, prefix, postfix )
def addParameterLinked( self, opt, prefix = "", postfix = "" ):
self.parameters.appendCopyLinked( opt, prefix, postfix )
def linkUp( self, opt, prefix = "", postfix = "", objname = "self" ):
self.parameters.linkUp( opt, prefix, postfix, objname )
def unlink( self, opt ):
self.parameters.unlink( opt )
def removeParameter( self, name_or_ind ):
self.parameters.remove( name_or_ind )
def removeAllParameters( self ):
self.parameters.removeAllParameters()
def findParameter( self, name_or_ind ):
return self.parameters.find( name_or_ind )
def findParameterIndex( self, ind ):
return self.parameters.findIndex( ind )
def compareParameters( self, s ):
return self.parameters.compare( s )
def setValue( self, name, value, type_ = None ):
if not self.parameters.setValue( name, value, type_ ):
print " in the object=", type( self ), "with name=", self.getName(), "of type=", self.getType()
def setLink( self, name, module_name, parameter_name ):
if not self.parameters.setLink( name, module_name, parameter_name ):
print " in the object=", type( self ), "with name=", self.getName(), "of type=", self.getType()
def compare( self, s ):
return ( self == s ) and self.parameters.compare( s.parameters )
def setParent( self, parent ):
self.parent = parent
def getParent( self ):
return self.parent
# ------------- common functions -----------
def setName( self, name ):
self['name'] = name
def getName( self ):
return self.get('name', '')
def setType( self, att_type ):
self['type'] = att_type
def getType( self ):
return self.get('type', '')
def setRequired( self, required ):
self['required'] = required
def getRequired( self ):
return self['required']
def setDescription( self, description ):
self['description'] = description
def getDescription( self ):
return self['description']
def setDescrShort( self, descr_short ):
self['descr_short'] = descr_short
def getDescrShort( self ):
return self['descr_short']
def setBody( self, body ):
self['body'] = body
def getBody( self ):
return self['body']
def setOrigin( self, origin ):
self['origin'] = origin
def getOrigin( self ):
return self['origin']
def setVersion( self, ver ):
self['version'] = ver
def getVersion( self ):
return self['version']
def resolveGlobalVars( self, wf_parameters = None, step_parameters = None ):
self.parameters.resolveGlobalVars( wf_parameters, step_parameters )
| arrabito/DIRAC | Core/Workflow/Parameter.py | Python | gpl-3.0 | 24,393 | [
"DIRAC"
] | 8e7bfa97ef9afbf100fbd5df6fa6224670c7e1e22638d0b9973c4b6a5f0d7f2e |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace for ops used in imperative programming."""
from ..ndarray import numpy as _mx_nd_np
__all__ = ["randint", "uniform", "normal", "choice", "rand", "multinomial", "multivariate_normal",
"logistic", "gumbel", "f",
"laplace",
"shuffle", "randn", "gamma", "beta", "chisquare", "exponential", "lognormal",
"weibull", "pareto", "power", "rayleigh"]
def randint(low, high=None, size=None, dtype=None, ctx=None, out=None):
r"""Return random integers from `low` (inclusive) to `high` (exclusive).
Return random integers from the "discrete uniform" distribution of
the specified dtype in the "half-open" interval [`low`, `high`). If
`high` is None (the default), then results are from [0, `low`).
Parameters
----------
low : int
Lowest (signed) integer to be drawn from the distribution (unless
``high=None``, in which case this parameter is one above the
*highest* such integer).
high : int, optional
If provided, one above the largest (signed) integer to be drawn
from the distribution (see above for behavior if ``high=None``).
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
dtype : dtype, optional
Desired dtype of the result. All dtypes are determined by their
name, i.e., 'int64', 'int', etc, so byteorder is not available
and a specific precision may have different C types depending
on the platform. The default value is 'np.int'.
ctx : Context, optional
Device context of output. Default is current context.
out : ndarray, optional
The output ndarray (default is `None`).
Returns
-------
out : ndarray of ints
`size`-shaped array of random integers from the appropriate
distribution, or a single such random int if `size` not provided.
Examples
--------
>>> np.random.randint(2, size=10)
array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0])
>>> np.random.randint(1, size=10)
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
Generate a 2 x 4 array of ints between 0 and 4, inclusive:
>>> np.random.randint(5, size=(2, 4))
array([[4, 0, 2, 1],
[3, 2, 2, 0]])
"""
return _mx_nd_np.random.randint(low, high, size, dtype, ctx, out)
def uniform(low=0.0, high=1.0, size=None, dtype=None, ctx=None, out=None):
r"""Draw samples from a uniform distribution.
Samples are uniformly distributed over the half-open interval
``[low, high)`` (includes low, but excludes high). In other words,
any value within the given interval is equally likely to be drawn
by `uniform`.
Parameters
----------
low : float, ndarray, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float, ndarray, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a scalar tensor containing a single value is returned if
``low`` and ``high`` are both scalars. Otherwise,
``np.broadcast(low, high).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : ndarray
Drawn samples from the parameterized uniform distribution.
See Also
--------
randint : Discrete uniform distribution, yielding integers.
rand : Convenience function that accepts dimensions as input, e.g.,
``rand(2,2)`` would generate a 2-by-2 array of floats,
uniformly distributed over ``[0, 1)``.
Notes
-----
The probability density function of the uniform distribution is
.. math:: p(x) = \frac{1}{b - a}
anywhere within the interval ``[a, b)``, and zero elsewhere.
When ``high`` == ``low``, values of ``low`` will be returned.
If ``high`` < ``low``, the results are officially undefined
and may eventually raise an error, i.e. do not rely on this
function to behave when passed arguments satisfying that
inequality condition.
"""
return _mx_nd_np.random.uniform(low, high, size=size, ctx=ctx, dtype=dtype, out=out)
def normal(loc=0.0, scale=1.0, size=None, dtype=None, ctx=None, out=None):
r"""Draw random samples from a normal (Gaussian) distribution.
Samples are distributed according to a normal distribution parametrized
by *loc* (mean) and *scale* (standard deviation).
Parameters
----------
loc : float, optional
Mean (centre) of the distribution.
scale : float, optional
Standard deviation (spread or "width") of the distribution.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., `(m, n, k)`, then `m * n * k`
samples are drawn. If size is `None` (default), a scalar tensor containing
a single value is returned if loc and scale are both scalars. Otherwise,
``np.broadcast(low, high).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
ctx : Context, optional
Device context of output, default is current context.
out : ``ndarray``, optional
Store output to an existing ``ndarray``.
Returns
-------
out : ndarray
Drawn samples from the parameterized normal distribution.
Notes
-----
The probability density for the Gaussian distribution is
.. math:: p(x) = \frac{1}{\sqrt{ 2 \pi \sigma^2 }}
e^{ - \frac{ (x - \mu)^2 } {2 \sigma^2} },
where :math:`\mu` is the mean and :math:`\sigma` the standard
deviation. The square of the standard deviation, :math:`\sigma^2`,
is called the variance.
The function has its peak at the mean, and its "spread" increases with
the standard deviation (the function reaches 0.607 times its maximum at
:math:`x + \sigma` and :math:`x - \sigma` [2]_). This implies that
`numpy.random.normal` is more likely to return samples lying close to
the mean, rather than those far away.
References
----------
.. [1] Wikipedia, "Normal distribution",
https://en.wikipedia.org/wiki/Normal_distribution
.. [2] P. R. Peebles Jr., "Central Limit Theorem" in "Probability,
Random Variables and Random Signal Principles", 4th ed., 2001,
pp. 51, 51, 125.
Examples
--------
>>> mu, sigma = 0, 0.1 # mean and standard deviation
>>> s = np.random.normal(mu, sigma, 1000)
Verify the mean and the variance:
>>> np.abs(mu - np.mean(s)) < 0.01
array(True)
"""
return _mx_nd_np.random.normal(loc, scale, size, dtype, ctx, out)
def lognormal(mean=0.0, sigma=1.0, size=None, dtype=None, ctx=None, out=None):
r"""Draw samples from a log-normal distribution.
Draw samples from a log-normal distribution with specified mean,
standard deviation, and array shape. Note that the mean and standard
deviation are not the values for the distribution itself, but of the
underlying normal distribution it is derived from.
Parameters
----------
mean : float or array_like of floats, optional
Mean value of the underlying normal distribution. Default is 0.
sigma : float or array_like of floats, optional
Standard deviation of the underlying normal distribution. Must be
non-negative. Default is 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``mean`` and ``sigma`` are both scalars.
Otherwise, ``np.broadcast(mean, sigma).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context.
out : ``ndarray``, optional
Store output to an existing ``ndarray``.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized log-normal distribution.
Notes
-----
A variable `x` has a log-normal distribution if `log(x)` is normally
distributed. The probability density function for the log-normal
distribution is:
.. math:: p(x) = \frac{1}{\sigma x \sqrt{2\pi}}
e^{(-\frac{(ln(x)-\mu)^2}{2\sigma^2})}
where :math:`\mu` is the mean and :math:`\sigma` is the standard
deviation of the normally distributed logarithm of the variable.
A log-normal distribution results if a random variable is the *product*
of a large number of independent, identically-distributed variables in
the same way that a normal distribution results if the variable is the
*sum* of a large number of independent, identically-distributed
variables.
References
----------
.. [1] Limpert, E., Stahel, W. A., and Abbt, M., "Log-normal
Distributions across the Sciences: Keys and Clues,"
BioScience, Vol. 51, No. 5, May, 2001.
https://stat.ethz.ch/~stahel/lognormal/bioscience.pdf
.. [2] Reiss, R.D. and Thomas, M., "Statistical Analysis of Extreme
Values," Basel: Birkhauser Verlag, 2001, pp. 31-32.
Examples
--------
Draw samples from the distribution:
>>> mu, sigma = 3., 1. # mean and standard deviation
>>> s = np.random.lognormal(mu, sigma, 1000)
"""
return _mx_nd_np.random.lognormal(mean, sigma, size, dtype, ctx, out)
def logistic(loc=0.0, scale=1.0, size=None, ctx=None, out=None):
r"""Draw samples from a logistic distribution.
Samples are drawn from a logistic distribution with specified
parameters, loc (location or mean, also median), and scale (>0).
Parameters
----------
loc : float or array_like of floats, optional
Parameter of the distribution. Default is 0.
scale : float or array_like of floats, optional
Parameter of the distribution. Must be non-negative.
Default is 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``loc`` and ``scale`` are both scalars.
Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
ctx : Context, optional
Device context of output, default is current context.
out : ``ndarray``, optional
Store output to an existing ``ndarray``.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized logistic distribution.
Examples
--------
Draw samples from the distribution:
>>> loc, scale = 10, 1
>>> s = np.random.logistic(loc, scale, 10000)
>>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s, bins=50)
# plot against distribution
>>> def logist(x, loc, scale):
... return np.exp((loc-x)/scale)/(scale*(1+np.exp((loc-x)/scale))**2)
>>> lgst_val = logist(bins, loc, scale)
>>> plt.plot(bins, lgst_val * count.max() / lgst_val.max())
>>> plt.show()
"""
return _mx_nd_np.random.logistic(loc, scale, size, ctx, out)
def gumbel(loc=0.0, scale=1.0, size=None, ctx=None, out=None):
r"""Draw samples from a Gumbel distribution.
Draw samples from a Gumbel distribution with specified location and
scale.
Parameters
----------
loc : float or array_like of floats, optional
The location of the mode of the distribution. Default is 0.
scale : float or array_like of floats, optional
The scale parameter of the distribution. Default is 1. Must be non-
negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``loc`` and ``scale`` are both scalars.
Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
ctx : Context, optional
Device context of output, default is current context.
out : ``ndarray``, optional
Store output to an existing ``ndarray``.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized Gumbel distribution.
Examples
--------
Draw samples from the distribution:
>>> mu, beta = 0, 0.1 # location and scale
>>> s = np.random.gumbel(mu, beta, 1000)
Display the histogram of the samples, along with
the probability density function:
>>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s, 30, density=True)
>>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
... * np.exp( -np.exp( -(bins - mu) /beta) ),
... linewidth=2, color='r')
>>> plt.show()
Show how an extreme value distribution can arise from a Gaussian process
and compare to a Gaussian:
>>> means = []
>>> maxima = []
>>> for i in range(0,1000) :
... a = np.random.normal(mu, beta, 1000)
... means.append(a.mean())
... maxima.append(a.max())
>>> count, bins, ignored = plt.hist(maxima, 30, density=True)
>>> beta = np.std(maxima) * np.sqrt(6) / np.pi
>>> mu = np.mean(maxima) - 0.57721*beta
>>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
... * np.exp(-np.exp(-(bins - mu)/beta)),
... linewidth=2, color='r')
>>> plt.plot(bins, 1/(beta * np.sqrt(2 * np.pi))
... * np.exp(-(bins - mu)**2 / (2 * beta**2)),
... linewidth=2, color='g')
>>> plt.show()
"""
return _mx_nd_np.random.gumbel(loc, scale, size, ctx, out)
def multinomial(n, pvals, size=None, **kwargs):
r"""
Draw samples from a multinomial distribution.
The multinomial distribution is a multivariate generalisation of the binomial distribution.
Take an experiment with one of ``p`` possible outcomes. An example of such an experiment is throwing a dice,
where the outcome can be 1 through 6. Each sample drawn from the distribution represents n such experiments.
Its values, ``X_i = [X_0, X_1, ..., X_p]``, represent the number of times the outcome was ``i``.
Parameters
----------
n : int
Number of experiments.
pvals : sequence of floats, length p
Probabilities of each of the p different outcomes. These should sum to 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples
are drawn. Default is None, in which case a single value is returned.
Returns
-------
out : ndarray
The drawn samples, of shape size, if that was provided. If not, the shape is ``(N,)``.
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional value drawn from the distribution.
Examples
--------
Throw a dice 1000 times, and 1000 times again:
>>> np.random.multinomial(1000, [1/6.]*6, size=2)
array([[164, 161, 179, 158, 150, 188],
[178, 162, 177, 143, 163, 177]])
A loaded die is more likely to land on number 6:
>>> np.random.multinomial(100, [1/7.]*5 + [2/7.])
array([19, 14, 12, 11, 21, 23])
>>> np.random.multinomial(100, [1.0 / 3, 2.0 / 3])
array([32, 68])
"""
return _mx_nd_np.random.multinomial(n, pvals, size, **kwargs)
# pylint: disable=unused-argument
def multivariate_normal(mean, cov, size=None, check_valid=None, tol=None):
"""
multivariate_normal(mean, cov, size=None, check_valid=None, tol=None)
Draw random samples from a multivariate normal distribution.
The multivariate normal, multinormal or Gaussian distribution is a
generalization of the one-dimensional normal distribution to higher
dimensions. Such a distribution is specified by its mean and
covariance matrix. These parameters are analogous to the mean
(average or "center") and variance (standard deviation, or "width,"
squared) of the one-dimensional normal distribution.
This operator is a little different from the one in official NumPy.
The official NumPy operator only accepts 1-D ndarray as mean and 2-D ndarray as cov,
whereas the operator in DeepNumPy supports batch operation and auto-broadcasting.
Both `mean` and `cov` may have any number of leading dimensions, which correspond
to a batch shape. They are not necessarily assumed to have the same batch shape,
just ones which can be broadcasted.
Parameters
----------
mean : K-D ndarray, of shape (..., N)
Mean of the N-dimensional distribution.
cov : (K+1)-D ndarray, of shape (..., N, N)
Covariance matrix of the distribution. The last two dimensions must be symmetric and
positive-semidefinite for proper sampling.
size : int or tuple of ints, optional
Given a shape of, for example, ``(m,n,k)``,
``m*n*k`` identically distributed batchs of samples are
generated, and packed in an `m`-by-`n`-by-`k` arrangement.
If no shape is specified, a batch of (`N`-D) sample is returned.
check_valid : { 'warn', 'raise', 'ignore' }, optional
Behavior when the covariance matrix is not positive semidefinite.
(Not supported)
tol : float, optional
Tolerance when checking the singular values in covariance matrix.
cov is cast to double before the check.
(Not supported)
Returns
-------
out : ndarray
The input shape of `mean` and `cov` should satisfy the requirements of broadcasting.
If the parameter `size` is not provided,
the output shape is ``np.broadcast(mean.shape, cov.shape[:-1])``.
Otherwise, the output shape is ``size + np.broadcast(mean.shape, cov.shape[:-1])``
Examples
--------
>>> mean = np.array([1, 2])
>>> cov = np.array([[1, 0], [0, 1]])
>>> x = np.random.multivariate_normal(mean, cov, (3, 3))
>>> x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> list((x[0,0,:] - mean) < 0.6)
[True, True] # random
# Performs autobroadcasting when the batch shape of
# `mean` and `cov` is different but compatible.
>>> mean = np.zeros((3,2)) # shape (3, 2)
>>> cov = np.array([[1, 0], [0, 100]]) # shape (2, 2)
>>> x = np.random.multivariate_normal(mean, cov)
>>> x
array([[-1.6115597 , -8.726251 ],
[ 2.2425299 , 2.8104177 ],
[ 0.36229908, -8.386591 ]])
"""
return _mx_nd_np.random.multivariate_normal(mean, cov, size=size, check_valid=None, tol=None)
def choice(a, size=None, replace=True, p=None, ctx=None, out=None):
r"""Generates a random sample from a given 1-D array
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a were np.arange(a)
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
replace : boolean, optional
Whether the sample is with or without replacement
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribution over all
entries in a.
ctx : Context, optional
Device context of output. Default is current context.
Returns
--------
samples : ndarray
The generated random samples
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3)
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0])
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False)
array([3,1,0])
>>> #This is equivalent to np.random.permutation(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
array([2, 3, 0])
"""
return _mx_nd_np.random.choice(a, size, replace, p, ctx, out)
def rayleigh(scale=1.0, size=None, ctx=None, out=None):
r"""Draw samples from a Rayleigh distribution.
The :math:`\chi` and Weibull distributions are generalizations of the
Rayleigh.
Parameters
----------
scale : float, optional
Scale, also equals the mode. Must be non-negative. Default is 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``scale`` is a scalar. Otherwise,
``np.array(scale).size`` samples are drawn.
ctx : Context, optional
Device context of output, default is current context.
out : ``ndarray``, optional
Store output to an existing ``ndarray``.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized Rayleigh distribution.
"""
return _mx_nd_np.random.rayleigh(scale, size, ctx, out)
def rand(*size, **kwargs):
r"""Random values in a given shape.
Create an array of the given shape and populate it with random
samples from a uniform distribution over [0, 1).
Parameters
----------
d0, d1, ..., dn : int, optional
The dimensions of the returned array, should be all positive.
If no argument is given a single Python float is returned.
Returns
-------
out : ndarray
Random values.
Examples
--------
>>> np.random.rand(3,2)
array([[ 0.14022471, 0.96360618], #random
[ 0.37601032, 0.25528411], #random
[ 0.49313049, 0.94909878]]) #random
"""
output_shape = ()
for s in size:
output_shape += (s,)
return _mx_nd_np.random.uniform(0, 1, size=output_shape, **kwargs)
def exponential(scale=1.0, size=None, ctx=None, out=None):
r"""Draw samples from an exponential distribution.
Parameters
----------
scale : float or array_like of floats
The scale parameter, :math:`\beta = 1/\lambda`. Must be
non-negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``scale`` is a scalar. Otherwise,
``np.array(scale).size`` samples are drawn.
ctx : Context, optional
Device context of output, default is current context.
out : ``ndarray``, optional
Store output to an existing ``ndarray``.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized exponential distribution.
"""
return _mx_nd_np.random.exponential(scale, size=size, ctx=ctx, out=out)
def weibull(a, size=None, ctx=None, out=None):
r"""Draw samples from a 1-parameter Weibull distribution with given parameter a
via inversion.
Parameters
----------
a : float or array_like of floats
Shape of the distribution. Must be non-negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``a`` is a scalar. Otherwise,
``np.array(a).size`` samples are drawn.
Returns
-------
out : ndarray or scalar
Drawn samples from the 1-parameter Weibull distribution.
Examples
--------
>>> np.random.weibull(a=5)
array(0.9553641)
>>> np.random.weibull(a=5, size=[2,3])
array([[1.0466299 , 1.1320982 , 0.98415005],
[1.1430776 , 0.9532727 , 1.1344457 ]])
>>> np.random.weibull(a=np.array([2,3])
array([0.98843634, 1.0125613 ])
The Weibull distribution is one of a class of Generalized Extreme
Value (GEV) distributions. This class includes the Gumbel and Frechet
distributions.
The probability density for the Weibull distribution is
f(x) = \frac{a}{\lambda}(\frac{x}{\lambda})^{a-1}e^{-(x/\lambda)^a},
where a is the shape and \lambda the scale. The generated 1-parameter Weibull
sample has the scale parameter \lambda = 1.
The Weibull distribution is commonly used in reliability engineering to
model time to failure, in modeling particle sizes, in information retrieval
to model dwell time on pages, in quantitative finance to model risk etc.
"""
return _mx_nd_np.random.weibull(a, size=size, ctx=ctx, out=out)
def pareto(a, size=None, ctx=None, out=None):
r"""Draw samples from a Pareto II or Lomax distribution with specified shape a.
Parameters
----------
a : float or array_like of floats
Shape of the distribution. Must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``a`` is a scalar. Otherwise,
``np.array(a).size`` samples are drawn.
Returns
-------
out : ndarray or scalar
Drawn samples from the Pareto distribution.
Examples
--------
>>> np.random.pareto(a=5)
array(0.12749612)
>>> mx.numpy.random.pareto(a=5, size=[2,3])
array([[0.06933999, 0.0344373 , 0.10654891],
[0.0311172 , 0.12911797, 0.03370714]])
>>> np.random.pareto(a=np.array([2,3])
array([0.26636696, 0.15685666])
The probability density for the Pareto distribution is f(x) = \frac{am^a}{x^{a+1}}
where a is the shape and m the scale. Here m is assumed 1. The Pareto distribution
is a power law distribution. Pareto created it to describe the wealth in the economy.
"""
return _mx_nd_np.random.pareto(a, size=size, ctx=ctx, out=out)
def power(a, size=None, ctx=None, out=None):
r"""Draw samples in [0, 1] from a power distribution with given parameter a.
Parameters
----------
a : float or array_like of floats
Shape of the distribution. Must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``a`` is a scalar. Otherwise,
``np.array(a).size`` samples are drawn.
Returns
-------
out : ndarray or scalar
Drawn samples from the power distribution.
Examples
--------
>>> np.random.power(a=5)
array(0.8602478)
>>> np.random.power(a=5, size=[2,3])
array([[0.988391 , 0.5153122 , 0.9383134 ],
[0.9078098 , 0.87819266, 0.730635]])
>>> np.random.power(a=np.array([2,3])
array([0.7499419 , 0.88894516])
The probability density function is f(x; a) = ax^{a-1}, 0 \le x \le 1, a>0.
The power distribution is just the inverse of the Pareto distribution and
a special case of the Beta distribution.
"""
return _mx_nd_np.random.power(a, size=size, ctx=ctx, out=out)
def shuffle(x):
"""
Modify a sequence in-place by shuffling its contents.
This function only shuffles the array along the first axis of a
multi-dimensional array. The order of sub-arrays is changed but
their contents remain the same.
Parameters
----------
x: ndarray
The array or list to be shuffled.
Returns
-------
None
Examples
--------
>>> arr = np.arange(10)
>>> np.random.shuffle(arr)
>>> arr
array([5., 1., 0., 6., 7., 3., 9., 8., 4., 2.]) # random
Multi-dimensional arrays are only shuffled along the first axis:
>>> arr = np.arange(9).reshape((3, 3))
>>> np.random.shuffle(arr)
>>> arr
array([[6., 7., 8.], # random
[3., 4., 5.],
[0., 1., 2.]])
"""
_mx_nd_np.random.shuffle(x)
def gamma(shape, scale=1.0, size=None, dtype=None, ctx=None, out=None):
"""Draw samples from a Gamma distribution.
Samples are drawn from a Gamma distribution with specified parameters,
`shape` (sometimes designated "k") and `scale` (sometimes designated
"theta"), where both parameters are > 0.
Parameters
----------
shape : float or array_like of floats
The shape of the gamma distribution. Should be greater than zero.
scale : float or array_like of floats, optional
The scale of the gamma distribution. Should be greater than zero.
Default is equal to 1.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``shape`` and ``scale`` are both scalars.
Otherwise, ``np.broadcast(shape, scale).size`` samples are drawn.
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized gamma distribution.
The Gamma distribution is often used to model the times to failure of
electronic components, and arises naturally in processes for which the
waiting times between Poisson distributed events are relevant.
"""
return _mx_nd_np.random.gamma(shape, scale, size, dtype, ctx, out)
def beta(a, b, size=None, dtype=None, ctx=None):
r"""Draw samples from a Beta distribution.
The Beta distribution is a special case of the Dirichlet distribution,
and is related to the Gamma distribution. It has the probability
distribution function
.. math:: f(x; a,b) = \frac{1}{B(\alpha, \beta)} x^{\alpha - 1}
(1 - x)^{\beta - 1},
where the normalisation, B, is the beta function,
.. math:: B(\alpha, \beta) = \int_0^1 t^{\alpha - 1}
(1 - t)^{\beta - 1} dt.
It is often seen in Bayesian inference and order statistics.
Parameters
----------
a : float or array_like of floats
Alpha, positive (>0).
b : float or array_like of floats
Beta, positive (>0).
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``a`` and ``b`` are both scalars.
Otherwise, ``np.broadcast(a, b).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'.
Dtype 'float32' or 'float64' is strongly recommended,
since lower precision might lead to out of range issue.
ctx : Context, optional
Device context of output. Default is current context.
Notes
-------
To use this operator with scalars as input, please run ``npx.set_np()`` first.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized beta distribution.
"""
return _mx_nd_np.random.beta(a, b, size=size, dtype=dtype, ctx=ctx)
def f(dfnum, dfden, size=None, ctx=None):
r"""Draw samples from an F distribution.
Samples are drawn from an F distribution with specified parameters,
`dfnum` (degrees of freedom in numerator) and `dfden` (degrees of
freedom in denominator), where both parameters must be greater than
zero.
The random variate of the F distribution (also known as the
Fisher distribution) is a continuous probability distribution
that arises in ANOVA tests, and is the ratio of two chi-square
variates.
Parameters
----------
dfnum : float or ndarray of floats
Degrees of freedom in numerator, must be > 0.
dfden : float or ndarray of float
Degrees of freedom in denominator, must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``dfnum`` and ``dfden`` are both scalars.
Otherwise, ``np.broadcast(dfnum, dfden).size`` samples are drawn.
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized Fisher distribution.
Examples
--------
An example from Glantz[1], pp 47-40:
Two groups, children of diabetics (25 people) and children from people
without diabetes (25 controls). Fasting blood glucose was measured,
case group had a mean value of 86.1, controls had a mean value of
82.2. Standard deviations were 2.09 and 2.49 respectively. Are these
data consistent with the null hypothesis that the parents diabetic
status does not affect their children's blood glucose levels?
Calculating the F statistic from the data gives a value of 36.01.
Draw samples from the distribution:
>>> dfnum = 1. # between group degrees of freedom
>>> dfden = 48. # within groups degrees of freedom
>>> s = np.random.f(dfnum, dfden, 1000)
The lower bound for the top 1% of the samples is :
>>> np.sort(s)[-10]
7.61988120985 # random
So there is about a 1% chance that the F statistic will exceed 7.62,
the measured value is 36, so the null hypothesis is rejected at the 1%
level.
"""
return _mx_nd_np.random.f(dfnum, dfden, size=size, ctx=ctx)
def chisquare(df, size=None, dtype=None, ctx=None):
r"""
chisquare(df, size=None, dtype=None, ctx=None)
Draw samples from a chi-square distribution.
When `df` independent random variables, each with standard normal
distributions (mean 0, variance 1), are squared and summed, the
resulting distribution is chi-square (see Notes). This distribution
is often used in hypothesis testing.
Parameters
----------
df : float or ndarray of floats
Number of degrees of freedom, must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``df`` is a scalar. Otherwise,
``np.array(df).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'.
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized chi-square distribution.
Raises
------
ValueError
When `df` <= 0 or when an inappropriate `size`
is given.
Notes
-----
The variable obtained by summing the squares of `df` independent,
standard normally distributed random variables:
.. math:: Q = \sum_{i=0}^{\mathtt{df}} X^2_i
is chi-square distributed, denoted
.. math:: Q \sim \chi^2_k.
The probability density function of the chi-squared distribution is
.. math:: p(x) = \frac{(1/2)^{k/2}}{\Gamma(k/2)}
x^{k/2 - 1} e^{-x/2},
where :math:`\Gamma` is the gamma function,
.. math:: \Gamma(x) = \int_0^{-\infty} t^{x - 1} e^{-t} dt.
References
----------
.. [1] NIST "Engineering Statistics Handbook"
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
>>> np.random.chisquare(2,4)
array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) # random
"""
return _mx_nd_np.random.chisquare(df, size=size, dtype=dtype, ctx=ctx)
def randn(*size, **kwargs):
r"""Return a sample (or samples) from the "standard normal" distribution.
If positive, int_like or int-convertible arguments are provided,
`randn` generates an array of shape ``(d0, d1, ..., dn)``, filled
with random floats sampled from a univariate "normal" (Gaussian)
distribution of mean 0 and variance 1 (if any of the :math:`d_i` are
floats, they are first converted to integers by truncation). A single
float randomly sampled from the distribution is returned if no
argument is provided.
This is a convenience function. If you want an interface that takes a
tuple as the first argument, use `numpy.random.standard_normal` instead.
Parameters
----------
d0, d1, ..., dn : int, optional
The dimensions of the returned array, should be all positive.
If no argument is given a single Python float is returned.
Returns
-------
Z : ndarray
A ``(d0, d1, ..., dn)``-shaped array of floating-point samples from
the standard normal distribution, or a single such float if
no parameters were supplied.
Notes
-----
For random samples from :math:`N(\mu, \sigma^2)`, use:
``sigma * np.random.randn(...) + mu``
Examples
--------
>>> np.random.randn()
2.1923875335537315 #random
Two-by-four array of samples from N(3, 6.25):
>>> 2.5 * np.random.randn(2, 4) + 3
array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], #random
[ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) #random
"""
output_shape = ()
for s in size:
output_shape += (s,)
return _mx_nd_np.random.normal(0, 1, size=output_shape, **kwargs)
def laplace(loc=0.0, scale=1.0, size=None, dtype=None, ctx=None, out=None):
r"""Draw random samples from a Laplace distribution.
Samples are distributed according to a Laplace distribution parametrized
by *loc* (mean) and *scale* (the exponential decay).
Parameters
----------
loc : float, The position of the distribution peak.
scale : float, the exponential decay.
size : int or tuple of ints, optional. Output shape.
If the given shape is, e.g., (m, n, k), then m * n * k samples are drawn.
Default is None, in which case a single value is returned.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context.
out : ``ndarray``, optional
Store output to an existing ``ndarray``.
Returns
-------
out : ndarray
Drawn samples from the parameterized Laplace distribution.
"""
return _mx_nd_np.random.laplace(loc, scale, size, dtype, ctx, out)
| zhreshold/mxnet | python/mxnet/numpy/random.py | Python | apache-2.0 | 40,820 | [
"Gaussian"
] | 7243a74c0b7914123db09c1cf1b1a660becb0bf4ac5dd713e4d210f54461ec4e |
# ==============================================================================
# plot depth slices
# ==============================================================================
import os
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MultipleLocator
from mtpy.modeling.modem import Data, Model
try:
from pyevtk.hl import gridToVTK, pointsToVTK
except ImportError:
print ('If you want to write a vtk file for 3d viewing,you need to pip install PyEVTK:'
' https://bitbucket.org/pauloh/pyevtk')
print ('Note: if you are using Windows you should build evtk first with'
'either MinGW or cygwin using the command: \n'
' python setup.py build -compiler=mingw32 or \n'
' python setup.py build -compiler=cygwin')
class PlotDepthSlice(object):
"""
Plots depth slices of resistivity model (file.rho)
:Example: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> mfn = r"/home/MT/ws3dinv/Inv1/Test_model.00"
>>> sfn = r"/home/MT/ws3dinv/Inv1/WSStationLocations.txt"
>>> # plot just first layer to check the formatting
>>> pds = ws.PlotDepthSlice(model_fn=mfn, station_fn=sfn,
>>> ... depth_index=0, save_plots='n')
>>> #move color bar up
>>> pds.cb_location
>>> (0.64500000000000002, 0.14999999999999997, 0.3, 0.025)
>>> pds.cb_location = (.645, .175, .3, .025)
>>> pds.redraw_plot()
>>> #looks good now plot all depth slices and save them to a folder
>>> pds.save_path = r"/home/MT/ws3dinv/Inv1/DepthSlices"
>>> pds.depth_index = None
>>> pds.save_plots = 'y'
>>> pds.redraw_plot()
======================= ===================================================
Attributes Description
======================= ===================================================
cb_location location of color bar (x, y, width, height)
*default* is None, automatically locates
cb_orientation [ 'vertical' | 'horizontal' ]
*default* is horizontal
cb_pad padding between axes and colorbar
*default* is None
cb_shrink percentage to shrink colorbar by
*default* is None
climits (min, max) of resistivity color on log scale
*default* is (0, 4)
cmap name of color map *default* is 'jet_r'
data_fn full path to data file
depth_index integer value of depth slice index, shallowest
layer is 0
dscale scaling parameter depending on map_scale
ew_limits (min, max) plot limits in e-w direction in
map_scale units. *default* is None, sets viewing
area to the station area
fig_aspect aspect ratio of plot. *default* is 1
fig_dpi resolution of figure in dots-per-inch. *default* is
300
fig_list list of matplotlib.figure instances for each
depth slice
fig_size [width, height] in inches of figure size
*default* is [6, 6]
font_size size of ticklabel font in points, labels are
font_size+2. *default* is 7
grid_east relative location of grid nodes in e-w direction
in map_scale units
grid_north relative location of grid nodes in n-s direction
in map_scale units
grid_z relative location of grid nodes in z direction
in map_scale units
initial_fn full path to initial file
map_scale [ 'km' | 'm' ] distance units of map. *default* is
km
mesh_east np.meshgrid(grid_east, grid_north, indexing='ij')
mesh_north np.meshgrid(grid_east, grid_north, indexing='ij')
model_fn full path to model file
nodes_east relative distance betwen nodes in e-w direction
in map_scale units
nodes_north relative distance betwen nodes in n-s direction
in map_scale units
nodes_z relative distance betwen nodes in z direction
in map_scale units
ns_limits (min, max) plot limits in n-s direction in
map_scale units. *default* is None, sets viewing
area to the station area
plot_grid [ 'y' | 'n' ] 'y' to plot mesh grid lines.
*default* is 'n'
plot_yn [ 'y' | 'n' ] 'y' to plot on instantiation
res_model np.ndarray(n_north, n_east, n_vertical) of
model resistivity values in linear scale
save_path path to save figures to
save_plots [ 'y' | 'n' ] 'y' to save depth slices to save_path
station_east location of stations in east direction in
map_scale units
station_fn full path to station locations file
station_names station names
station_north location of station in north direction in
map_scale units
subplot_bottom distance between axes and bottom of figure window
subplot_left distance between axes and left of figure window
subplot_right distance between axes and right of figure window
subplot_top distance between axes and top of figure window
title titiel of plot *default* is depth of slice
xminorticks location of xminorticks
yminorticks location of yminorticks
======================= ===================================================
"""
def __init__(self, model_fn=None, data_fn=None, **kwargs):
self.model_fn = model_fn
self.data_fn = data_fn # optional
self.save_path = kwargs.pop('save_path', None)
if self.save_path is None and self.model_fn is not None:
modelfile_path = os.path.dirname(self.model_fn)
self.save_path = os.path.join(modelfile_path, 'images_mtpy2')
if not os.path.exists(self.save_path):
os.mkdir(self.save_path)
self.save_plots = kwargs.pop('save_plots', 'y')
# no need this self.depth_index = kwargs.pop('depth_index', None)
self.map_scale = kwargs.pop('map_scale', 'km')
# make map scale
if self.map_scale == 'km':
self.dscale = 1000.
elif self.map_scale == 'm':
self.dscale = 1.
self.ew_limits = kwargs.pop('ew_limits', None)
self.ns_limits = kwargs.pop('ns_limits', None)
self.plot_grid = kwargs.pop('plot_grid', 'n')
self.fig_size = kwargs.pop('fig_size', [5, 5])
self.fig_dpi = kwargs.pop('dpi', 200)
self.fig_aspect = kwargs.pop('fig_aspect', 1)
self.title = kwargs.pop('title', 'on')
self.fig_list = []
self.xminorticks = kwargs.pop('xminorticks', 10000)
self.yminorticks = kwargs.pop('yminorticks', 10000)
self.climits = kwargs.pop('climits', (0, 4))
self.cmap = kwargs.pop('cmap', 'jet_r')
self.font_size = kwargs.pop('font_size', 8)
self.cb_shrink = kwargs.pop('cb_shrink', .8)
self.cb_pad = kwargs.pop('cb_pad', .01)
self.cb_orientation = kwargs.pop(
'cb_orientation', 'horizontal') # 'vertical')
self.cb_location = kwargs.pop('cb_location', None)
self.subplot_right = .99
self.subplot_left = .085
self.subplot_top = .92
self.subplot_bottom = .1
self.res_model = None
self.grid_east = None
self.grid_north = None
self.grid_z = None
self.nodes_east = None
self.nodes_north = None
self.nodes_z = None
self.mesh_east = None
self.mesh_north = None
self.station_east = None
self.station_north = None
self.station_names = None
self.plot_yn = kwargs.pop('plot_yn', 'n')
if self.plot_yn == 'y':
self.plot()
# read in the model data.
self.total_horizontal_slices = self._read_model_data()
return
def _read_model_data(self):
"""
read in the files to get appropriate information
"""
# --> read in model file
if self.model_fn is not None and os.path.isfile(self.model_fn):
md_model = Model()
md_model.read_model_file(self.model_fn)
self.res_model = md_model.res_model
self.grid_east = md_model.grid_east / self.dscale
self.grid_north = md_model.grid_north / self.dscale
self.grid_z = md_model.grid_z / self.dscale
self.nodes_east = md_model.nodes_east / self.dscale
self.nodes_north = md_model.nodes_north / self.dscale
self.nodes_z = md_model.nodes_z / self.dscale
else:
raise Exception('Error with the Model file: %s. Please check.' % (self.model_fn))
# --> Optionally: read in data file to get station locations
if self.data_fn is not None and os.path.isfile(self.data_fn):
md_data = Data()
md_data.read_data_file(self.data_fn)
self.station_east = md_data.station_locations[
'rel_east'] / self.dscale # convert meters
self.station_north = md_data.station_locations[
'rel_north'] / self.dscale
self.station_names = md_data.station_locations['station']
else:
print(('Problem with the optional Data file: %s. Please check.' % self.data_fn))
total_horizontal_slices = self.grid_z.shape[0]
print(("Total Number of H-slices=", total_horizontal_slices))
return total_horizontal_slices
def plot(self, ind=1):
"""
plot the depth slice ind-th
"""
self.depth_index = ind
fdict = {'size': self.font_size + 2, 'weight': 'bold'}
cblabeldict = {-2: '$10^{-3}$', -1: '$10^{-1}$', 0: '$10^{0}$', 1: '$10^{1}$',
2: '$10^{2}$', 3: '$10^{3}$', 4: '$10^{4}$', 5: '$10^{5}$',
6: '$10^{6}$', 7: '$10^{7}$', 8: '$10^{8}$'}
# create an list of depth slices to plot
if self.depth_index is None:
zrange = list(range(self.grid_z.shape[0]))
elif isinstance(self.depth_index, int):
zrange = [self.depth_index]
elif isinstance(self.depth_index, list) or \
isinstance(self.depth_index, np.ndarray):
zrange = self.depth_index
print(("The depth index list:", zrange))
# set the limits of the plot
if self.ew_limits is None:
if self.station_east is not None:
xlimits = (np.floor(self.station_east.min()),
np.ceil(self.station_east.max()))
else:
xlimits = (self.grid_east[5], self.grid_east[-6])
else:
xlimits = self.ew_limits
if self.ns_limits is None:
if self.station_north is not None:
ylimits = (np.floor(self.station_north.min()),
np.ceil(self.station_north.max()))
else:
ylimits = (self.grid_north[5], self.grid_north[-6])
else:
ylimits = self.ns_limits
# make a mesh grid of north and east
try:
self.mesh_east, self.mesh_north = np.meshgrid(self.grid_east,
self.grid_north,
indexing='ij')
except:
self.mesh_east, self.mesh_north = [arr.T for arr in np.meshgrid(self.grid_east,
self.grid_north)]
plt.rcParams['font.size'] = self.font_size
# --> plot each depth ii into individual figure
for ii in zrange:
depth = '{0:.3f} ({1})'.format(self.grid_z[ii],
self.map_scale)
fig = plt.figure(depth, figsize=self.fig_size, dpi=self.fig_dpi)
plt.clf()
ax1 = fig.add_subplot(1, 1, 1, aspect=self.fig_aspect)
plot_res = np.log10(self.res_model[:, :, ii].T)
mesh_plot = ax1.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
# plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
ax1.text(ee, nn, '*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size': 5, 'weight': 'bold'})
# set axis properties
ax1.set_xlim(xlimits)
ax1.set_ylim(ylimits)
ax1.xaxis.set_minor_locator(
MultipleLocator(
self.xminorticks /
self.dscale))
ax1.yaxis.set_minor_locator(
MultipleLocator(
self.yminorticks /
self.dscale))
ax1.set_ylabel('Northing (' + self.map_scale + ')', fontdict=fdict)
ax1.set_xlabel('Easting (' + self.map_scale + ')', fontdict=fdict)
ax1.set_title('Depth = {0}'.format(depth), fontdict=fdict)
# plot the grid if desired
if self.plot_grid == 'y':
east_line_xlist = []
east_line_ylist = []
for xx in self.grid_east:
east_line_xlist.extend([xx, xx])
east_line_xlist.append(None)
east_line_ylist.extend([self.grid_north.min(),
self.grid_north.max()])
east_line_ylist.append(None)
ax1.plot(east_line_xlist,
east_line_ylist,
lw=.25,
color='k')
north_line_xlist = []
north_line_ylist = []
for yy in self.grid_north:
north_line_xlist.extend([self.grid_east.min(),
self.grid_east.max()])
north_line_xlist.append(None)
north_line_ylist.extend([yy, yy])
north_line_ylist.append(None)
ax1.plot(north_line_xlist,
north_line_ylist,
lw=.25,
color='k')
# FZ: fix miss-placed colorbar
from mpl_toolkits.axes_grid1 import make_axes_locatable
ax = plt.gca()
# create an axes on the right side of ax. The width of cax will be 5%
# of ax and the padding between cax and ax will be fixed at 0.05
# inch.
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
mycb = plt.colorbar(
mesh_plot,
cax=cax,
label='Resistivity ($\Omega \cdot$m)',
use_gridspec=True
)
self.fig_list.append(fig)
# Figure Objects
print((self.fig_list))
# --> save plots to a common folder
if self.save_plots == 'y':
out_file_name = "Resistivity_Slice_at_Depth_{}_{:.4f}.png".format(
ii, self.grid_z[ii])
path2outfile = os.path.join(self.save_path, out_file_name)
fig.savefig(
path2outfile,
dpi=self.fig_dpi,
bbox_inches='tight')
else:
pass
# when runs interactively, plt show a figure
plt.show()
plt.close()
return
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
"""
for fig in self.fig_list:
plt.close(fig)
self.plot()
def __str__(self):
"""
rewrite the string builtin to give a useful message
"""
return ("Plots depth slices of model from INVERSION")
# -------------------------------------------------------------------------
if __name__ == '__main__':
"""
plot depth slices
"""
import sys
if len(sys.argv) < 2:
print(("Usage: %s file.rho depth_index" % sys.argv[0]))
sys.exit(1)
depth_ind = -1
if len(sys.argv) >= 2:
modrho = sys.argv[1]
if len(sys.argv) >= 3:
depth_ind = int(sys.argv[2])
# pltObj= PlotDepthSlice(model_fn=modrho, xminorticks=100000, yminorticks=100000, depth_index=di, save_plots='y')
pltObj = PlotDepthSlice(model_fn=modrho, save_plots='y') # , depth_index=1)
print (depth_ind)
if depth_ind >= 0:
pltObj.plot(depth_ind)
else:
print("loop to plot all slices: ************** ")
max_slices = pltObj.total_horizontal_slices - 2 # 10
for index in range(1, max_slices):
pltObj.plot(ind=index)
| MTgeophysics/mtpy | mtpy/imaging/plot_depth_slice.py | Python | gpl-3.0 | 18,092 | [
"VTK"
] | 2045f1d86b7a400f99eed76afc73cce90a99ddf03da438ef7d0fabb6863b9645 |
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2015 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Michael Kefeder <oss@multiwave.ch>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Stephane Wirtel <stephane@wirtel.be>
# Copyright (c) 2015 Nick Bastin <nick.bastin@gmail.com>
# Copyright (c) 2016 Alex Jurkiewicz <alex@jurkiewi.cz>
# Copyright (c) 2016 Yannack <yannack@users.noreply.github.com>
# Copyright (c) 2016 Laura Médioni <lmedioni@logilab.fr>
# Copyright (c) 2016 Ashley Whetter <ashley@awhetter.co.uk>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""basic checker for Python code"""
import collections
import itertools
import sys
import re
import six
from six.moves import zip # pylint: disable=redefined-builtin
import astroid
import astroid.bases
import astroid.scoped_nodes
from pylint import checkers
from pylint import exceptions
from pylint import interfaces
from pylint.checkers import utils
from pylint import reporters
from pylint.reporters.ureports import nodes as reporter_nodes
# regex for class/function/variable/constant name
CLASS_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$')
MOD_NAME_RGX = re.compile('(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$')
CONST_NAME_RGX = re.compile('(([A-Z_][A-Z0-9_]*)|(__.*__))$')
COMP_VAR_RGX = re.compile('[A-Za-z_][A-Za-z0-9_]*$')
DEFAULT_NAME_RGX = re.compile('(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$')
CLASS_ATTRIBUTE_RGX = re.compile(r'([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$')
# do not require a doc string on private/system methods
NO_REQUIRED_DOC_RGX = re.compile('^_')
REVERSED_PROTOCOL_METHOD = '__reversed__'
SEQUENCE_PROTOCOL_METHODS = ('__getitem__', '__len__')
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS,
(REVERSED_PROTOCOL_METHOD, ))
TYPECHECK_COMPARISON_OPERATORS = frozenset(('is', 'is not', '==',
'!=', 'in', 'not in'))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
UNITTEST_CASE = 'unittest.case'
BUILTINS = six.moves.builtins.__name__
TYPE_QNAME = "%s.type" % BUILTINS
PY33 = sys.version_info >= (3, 3)
PY3K = sys.version_info >= (3, 0)
PY35 = sys.version_info >= (3, 5)
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = {'exempt', 'ignore'}
# A mapping from builtin-qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(['.'.join([BUILTINS, x]) for x in ('set', 'dict', 'list')],
['set()', '{}', '[]'])
)
REVERSED_COMPS = {'<': '>', '<=': '>=', '>': '<', '>=': '<='}
def _redefines_import(node):
""" Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(parent, (astroid.For, astroid.ListComp, astroid.SetComp,
astroid.DictComp, astroid.GeneratorExp)):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _loop_exits_early(loop):
"""Returns true if a loop has a break statement in its body."""
loop_nodes = (astroid.For, astroid.While)
# Loop over body explicitly to avoid matching break statements
# in orelse.
for child in loop.body:
if isinstance(child, loop_nodes):
# break statement may be in orelse of child loop.
# pylint: disable=superfluous-parens
for orelse in (child.orelse or ()):
for _ in orelse.nodes_of_class(astroid.Break, skip_klass=loop_nodes):
return True
continue
for _ in child.nodes_of_class(astroid.Break, skip_klass=loop_nodes):
return True
return False
def _is_multi_naming_match(match, node_type, confidence):
return (match is not None and
match.lastgroup is not None and
match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != 'method' or confidence != interfaces.INFERENCE_FAILURE))
if sys.version_info < (3, 0):
BUILTIN_PROPERTY = '__builtin__.property'
else:
BUILTIN_PROPERTY = 'builtins.property'
def _get_properties(config):
"""Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
"""
property_classes = set((BUILTIN_PROPERTY,))
property_names = set() # Not returning 'property', it has its own check.
if config is not None:
property_classes.update(config.property_classes)
property_names.update((prop.rsplit('.', 1)[-1]
for prop in config.property_classes))
return property_classes, property_names
def _determine_function_name_type(node, config=None):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:type node: astroid.node_classes.NodeNG
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
"""
property_classes, property_names = _get_properties(config)
if not node.is_method():
return 'function'
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if (isinstance(decorator, astroid.Name) or
(isinstance(decorator, astroid.Attribute) and
decorator.attrname in property_names)):
infered = utils.safe_infer(decorator)
if infered and infered.qname() in property_classes:
return 'attr'
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
elif (isinstance(decorator, astroid.Attribute) and
decorator.attrname in ('setter', 'deleter')):
return 'attr'
return 'method'
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ('module', 'class', 'method', 'function'):
try:
total = stats[node_type]
except KeyError:
raise exceptions.EmptyReportError()
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats['undocumented_'+node_type]
percent = (documented * 100.) / total
nice_stats[node_type]['percent_documented'] = '%.2f' % percent
except KeyError:
nice_stats[node_type]['percent_documented'] = 'NC'
try:
percent = (stats['badname_'+node_type] * 100.) / total
nice_stats[node_type]['percent_badname'] = '%.2f' % percent
except KeyError:
nice_stats[node_type]['percent_badname'] = 'NC'
lines = ('type', 'number', 'old number', 'difference',
'%documented', '%badname')
for node_type in ('module', 'class', 'method', 'function'):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = reporters.diff_string(old, new)
else:
old, diff_str = 'NC', 'NC'
lines += (node_type, str(new), str(old), diff_str,
nice_stats[node_type].get('percent_documented', '0'),
nice_stats[node_type].get('percent_badname', '0'))
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (isinstance(decorator, astroid.Attribute) and
getattr(decorator.expr, 'name', None) == node.name):
return True
return False
class _BasicChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = 'basic'
class BasicErrorChecker(_BasicChecker):
msgs = {
'E0100': ('__init__ method is a generator',
'init-is-generator',
'Used when the special class method __init__ is turned into a '
'generator by a yield in its body.'),
'E0101': ('Explicit return in __init__',
'return-in-init',
'Used when the special class method __init__ has an explicit '
'return value.'),
'E0102': ('%s already defined line %s',
'function-redefined',
'Used when a function / class / method is redefined.'),
'E0103': ('%r not properly in loop',
'not-in-loop',
'Used when break or continue keywords are used outside a loop.'),
'E0104': ('Return outside function',
'return-outside-function',
'Used when a "return" statement is found outside a function or '
'method.'),
'E0105': ('Yield outside function',
'yield-outside-function',
'Used when a "yield" statement is found outside a function or '
'method.'),
'E0106': ('Return with argument inside generator',
'return-arg-in-generator',
'Used when a "return" statement with an argument is found '
'outside in a generator function or method (e.g. with some '
'"yield" statements).',
{'maxversion': (3, 3)}),
'E0107': ("Use of the non-existent %s operator",
'nonexistent-operator',
"Used when you attempt to use the C-style pre-increment or"
"pre-decrement operator -- and ++, which doesn't exist in Python."),
'E0108': ('Duplicate argument name %s in function definition',
'duplicate-argument-name',
'Duplicate argument names in function definitions are syntax'
' errors.'),
'E0110': ('Abstract class %r with abstract methods instantiated',
'abstract-class-instantiated',
'Used when an abstract class with `abc.ABCMeta` as metaclass '
'has abstract methods and is instantiated.'),
'W0120': ('Else clause on loop without a break statement',
'useless-else-on-loop',
'Loops should only have an else clause if they can exit early '
'with a break statement, otherwise the statements under else '
'should be on the same scope as the loop itself.'),
'E0112': ('More than one starred expression in assignment',
'too-many-star-expressions',
'Emitted when there are more than one starred '
'expressions (`*x`) in an assignment. This is a SyntaxError.',
{'minversion': (3, 0)}),
'E0113': ('Starred assignment target must be in a list or tuple',
'invalid-star-assignment-target',
'Emitted when a star expression is used as a starred '
'assignment target.',
{'minversion': (3, 0)}),
'E0114': ('Can use starred expression only in assignment target',
'star-needs-assignment-target',
'Emitted when a star expression is not used in an '
'assignment target.',
{'minversion': (3, 0)}),
'E0115': ('Name %r is nonlocal and global',
'nonlocal-and-global',
'Emitted when a name is both nonlocal and global.',
{'minversion': (3, 0)}),
'E0116': ("'continue' not supported inside 'finally' clause",
'continue-in-finally',
'Emitted when the `continue` keyword is found '
'inside a finally clause, which is a SyntaxError.'),
'E0117': ("nonlocal name %s found without binding",
'nonlocal-without-binding',
'Emitted when a nonlocal variable does not have an attached '
'name somewhere in the parent scopes',
{'minversion': (3, 0)}),
'E0118': ("Name %r is used prior to global declaration",
'used-prior-global-declaration',
'Emitted when a name is used prior a global declaration, '
'which results in an error since Python 3.6.',
{'minversion': (3, 6)}),
}
@utils.check_messages('function-redefined')
def visit_classdef(self, node):
self._check_redefinition('class', node)
@utils.check_messages('too-many-star-expressions',
'invalid-star-assignment-target')
def visit_assign(self, node):
starred = list(node.targets[0].nodes_of_class(astroid.Starred))
if len(starred) > 1:
self.add_message('too-many-star-expressions', node=node)
# Check *a = b
if isinstance(node.targets[0], astroid.Starred):
self.add_message('invalid-star-assignment-target', node=node)
@utils.check_messages('star-needs-assignment-target')
def visit_starred(self, node):
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, astroid.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if PY35 and isinstance(node.parent,
(astroid.List, astroid.Tuple,
astroid.Set, astroid.Dict)):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, astroid.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message('star-needs-assignment-target', node=node)
@utils.check_messages('init-is-generator', 'return-in-init',
'function-redefined', 'return-arg-in-generator',
'duplicate-argument-name', 'nonlocal-and-global',
'used-prior-global-declaration')
def visit_functiondef(self, node):
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if (not redefined_by_decorator(node) and
not utils.is_registered_in_singledispatch_function(node)):
self._check_redefinition(node.is_method() and 'method' or 'function', node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(astroid.Return,
skip_klass=(astroid.FunctionDef,
astroid.ClassDef))
if node.is_method() and node.name == '__init__':
if node.is_generator():
self.add_message('init-is-generator', node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message('return-in-init', node=node)
elif node.is_generator():
# make sure we don't mix non-None returns and yields
if not PY33:
for retnode in returns:
if isinstance(retnode.value, astroid.Const) and \
retnode.value.value is not None:
self.add_message('return-arg-in-generator', node=node,
line=retnode.fromlineno)
# Check for duplicate names
args = set()
for name in node.argnames():
if name in args:
self.add_message('duplicate-argument-name', node=node, args=(name,))
else:
args.add(name)
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node):
scope_globals = {
name: child
for child in node.nodes_of_class(astroid.Global)
for name in child.names
if child.scope() is node
}
for node_name in node.nodes_of_class(astroid.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message('used-prior-global-declaration',
node=node_name, args=(name, ))
def _check_nonlocal_and_global(self, node):
"""Check that a name is both nonlocal and global."""
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(from_iter(
child.names for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)))
global_vars = set(from_iter(
child.names for child in node.nodes_of_class(astroid.Global)
if same_scope(child)))
for name in nonlocals.intersection(global_vars):
self.add_message('nonlocal-and-global',
args=(name, ), node=node)
@utils.check_messages('return-outside-function')
def visit_return(self, node):
if not isinstance(node.frame(), astroid.FunctionDef):
self.add_message('return-outside-function', node=node)
@utils.check_messages('yield-outside-function')
def visit_yield(self, node):
self._check_yield_outside_func(node)
@utils.check_messages('yield-outside-function')
def visit_yieldfrom(self, node):
self._check_yield_outside_func(node)
@utils.check_messages('not-in-loop', 'continue-in-finally')
def visit_continue(self, node):
self._check_in_loop(node, 'continue')
@utils.check_messages('not-in-loop')
def visit_break(self, node):
self._check_in_loop(node, 'break')
@utils.check_messages('useless-else-on-loop')
def visit_for(self, node):
self._check_else_on_loop(node)
@utils.check_messages('useless-else-on-loop')
def visit_while(self, node):
self._check_else_on_loop(node)
@utils.check_messages('nonexistent-operator')
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if ((node.op in '+-') and
isinstance(node.operand, astroid.UnaryOp) and
(node.operand.op == node.op)):
self.add_message('nonexistent-operator', node=node, args=node.op*2)
def _check_nonlocal_without_binding(self, node, name):
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)):
self.add_message('nonlocal-without-binding', args=(name, ),
node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, astroid.FunctionDef):
self.add_message('nonlocal-without-binding', args=(name, ), node=node)
@utils.check_messages('nonlocal-without-binding')
def visit_nonlocal(self, node):
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.check_messages('abstract-class-instantiated')
def visit_call(self, node):
""" Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
infered = next(node.func.infer())
except astroid.InferenceError:
return
if not isinstance(infered, astroid.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is infered:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
metaclass = infered.metaclass()
abstract_methods = _has_abstract_methods(infered)
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in infered.ancestors():
if ancestor.qname() == 'abc.ABC' and abstract_methods:
self.add_message('abstract-class-instantiated',
args=(infered.name, ),
node=node)
break
return
if metaclass.qname() == 'abc.ABCMeta' and abstract_methods:
self.add_message('abstract-class-instantiated',
args=(infered.name, ),
node=node)
def _check_yield_outside_func(self, node):
if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)):
self.add_message('yield-outside-function', node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message('useless-else-on-loop', node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
if node not in _node.orelse:
return
if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)):
break
if (isinstance(_node, astroid.TryFinally)
and node in _node.finalbody
and isinstance(node, astroid.Continue)):
self.add_message('continue-in-finally', node=node)
_node = _node.parent
self.add_message('not-in-loop', node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
defined_self = node.parent.frame()[node.name]
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
self.add_message('function-redefined', node=node,
args=(redeftype, defined_self.fromlineno))
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = interfaces.IAstroidChecker
name = 'basic'
msgs = {
'W0101': ('Unreachable code',
'unreachable',
'Used when there is some code behind a "return" or "raise" '
'statement, which will never be accessed.'),
'W0102': ('Dangerous default value %s as argument',
'dangerous-default-value',
'Used when a mutable value as list or dictionary is detected in '
'a default value for an argument.'),
'W0104': ('Statement seems to have no effect',
'pointless-statement',
'Used when a statement doesn\'t have (or at least seems to) '
'any effect.'),
'W0105': ('String statement has no effect',
'pointless-string-statement',
'Used when a string is used as a statement (which of course '
'has no effect). This is a particular case of W0104 with its '
'own message so you can easily disable it if you\'re using '
'those strings as documentation, instead of comments.'),
'W0106': ('Expression "%s" is assigned to nothing',
'expression-not-assigned',
'Used when an expression that is not a function call is assigned '
'to nothing. Probably something else was intended.'),
'W0108': ('Lambda may not be necessary',
'unnecessary-lambda',
'Used when the body of a lambda expression is a function call '
'on the same argument list as the lambda itself; such lambda '
'expressions are in all but a few cases replaceable with the '
'function being called in the body of the lambda.'),
'W0109': ("Duplicate key %r in dictionary",
'duplicate-key',
'Used when a dictionary expression binds the same key multiple '
'times.'),
'W0122': ('Use of exec',
'exec-used',
'Used when you use the "exec" statement (function for Python '
'3), to discourage its usage. That doesn\'t '
'mean you cannot use it !'),
'W0123': ('Use of eval',
'eval-used',
'Used when you use the "eval" function, to discourage its '
'usage. Consider using `ast.literal_eval` for safely evaluating '
'strings containing Python expressions '
'from untrusted sources. '),
'W0150': ("%s statement in finally block may swallow exception",
'lost-exception',
'Used when a break or a return statement is found inside the '
'finally clause of a try...finally block: the exceptions raised '
'in the try clause will be silently swallowed instead of being '
're-raised.'),
'W0199': ('Assert called on a 2-uple. Did you mean \'assert x,y\'?',
'assert-on-tuple',
'A call of assert on a tuple will always evaluate to true if '
'the tuple is not empty, and will always evaluate to false if '
'it is.'),
'W0124': ('Following "as" with another context manager looks like a tuple.',
'confusing-with-statement',
'Emitted when a `with` statement component returns multiple values '
'and uses name binding with `as` only for a part of those values, '
'as in with ctx() as a, b. This can be misleading, since it\'s not '
'clear if the context manager returns a tuple or if the node without '
'a name binding is another context manager.'),
'W0125': ('Using a conditional statement with a constant value',
'using-constant-test',
'Emitted when a conditional statement (If or ternary if) '
'uses a constant value for its test. This might not be what '
'the user intended to do.'),
'E0111': ('The first reversed() argument is not a sequence',
'bad-reversed-sequence',
'Used when the first argument to reversed() builtin '
'isn\'t a sequence (does not implement __reversed__, '
'nor __getitem__ and __len__'),
}
reports = (('RP0101', 'Statistics by type', report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics
"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0,
method=0, class_=0)
@utils.check_messages('using-constant-test')
def visit_if(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages('using-constant-test')
def visit_ifexp(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages('using-constant-test')
def visit_comprehension(self, node):
if node.ifs:
for if_test in node.ifs:
self._check_using_constant_test(node, if_test)
def _check_using_constant_test(self, node, test):
const_nodes = (
astroid.Module,
astroid.scoped_nodes.GeneratorExp,
astroid.Lambda, astroid.FunctionDef, astroid.ClassDef,
astroid.bases.Generator, astroid.UnboundMethod,
astroid.BoundMethod, astroid.Module)
structs = (astroid.Dict, astroid.Tuple, astroid.Set)
# These nodes are excepted, since they are not constant
# values, requiring a computation to happen. The only type
# of node in this list which doesn't have this property is
# Attribute, which is excepted because the conditional statement
# can be used to verify that the attribute was set inside a class,
# which is definitely a valid use case.
except_nodes = (astroid.Attribute, astroid.Call,
astroid.BinOp, astroid.BoolOp, astroid.UnaryOp,
astroid.Subscript)
inferred = None
emit = isinstance(test, (astroid.Const, ) + structs + const_nodes)
if not isinstance(test, except_nodes):
inferred = utils.safe_infer(test)
if emit or isinstance(inferred, const_nodes):
self.add_message('using-constant-test', node=node)
def visit_module(self, _):
"""check module name, docstring and required arguments
"""
self.stats['module'] += 1
def visit_classdef(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats['class'] += 1
@utils.check_messages('pointless-statement', 'pointless-string-statement',
'expression-not-assigned')
def visit_expr(self, node):
"""check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value,
six.string_types):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)):
if isinstance(scope, astroid.FunctionDef) and scope.name != '__init__':
pass
else:
sibling = expr.previous_sibling()
if (sibling is not None and sibling.scope() is scope and
isinstance(sibling, astroid.Assign)):
return
self.add_message('pointless-string-statement', node=node)
return
# ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield (which are wrapped by a discard node in _ast XXX)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (isinstance(expr, (astroid.Yield, astroid.Await, astroid.Call)) or
(isinstance(node.parent, astroid.TryExcept) and
node.parent.body == [node])):
return
if any(expr.nodes_of_class(astroid.Call)):
self.add_message('expression-not-assigned', node=node,
args=expr.as_string())
else:
self.add_message('pointless-statement', node=node)
@staticmethod
def _filter_vararg(node, call_args):
# Return the arguments for the given call which are
# not passed as vararg.
for arg in call_args:
if isinstance(arg, astroid.Starred):
if (isinstance(arg.value, astroid.Name)
and arg.value.name != node.args.vararg):
yield arg
else:
yield arg
@staticmethod
def _has_variadic_argument(args, variadic_name):
if not args:
return True
for arg in args:
if isinstance(arg.value, astroid.Name):
if arg.value.name != variadic_name:
return True
else:
return True
return False
@utils.check_messages('unnecessary-lambda')
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious
"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if (isinstance(node.body.func, astroid.Attribute) and
isinstance(node.body.func.expr, astroid.Call)):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
elif call.kwargs or call.keywords:
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message('unnecessary-lambda', line=node.fromlineno,
node=node)
@utils.check_messages('dangerous-default-value')
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats[node.is_method() and 'method' or 'function'] += 1
self._check_dangerous_default(node)
visit_asyncfunctiondef = visit_functiondef
def _check_dangerous_default(self, node):
# check for dangerous default values as arguments
is_iterable = lambda n: isinstance(n, (astroid.List,
astroid.Set,
astroid.Dict))
for default in node.args.defaults:
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (isinstance(value, astroid.Instance) and
value.qname() in DEFAULT_ARGUMENT_SYMBOLS):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif isinstance(value, astroid.Instance) or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.Call):
msg = '%s() (%s)' % (value.name, value.qname())
else:
msg = '%s (%s)' % (default.as_string(), value.qname())
else:
# this argument is a name
msg = '%s (%s)' % (default.as_string(),
DEFAULT_ARGUMENT_SYMBOLS[value.qname()])
self.add_message('dangerous-default-value',
node=node,
args=(msg, ))
@utils.check_messages('unreachable', 'lost-exception')
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, 'return', (astroid.FunctionDef,))
@utils.check_messages('unreachable')
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages('unreachable', 'lost-exception')
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, 'break', (astroid.For, astroid.While,))
@utils.check_messages('unreachable')
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages('exec-used')
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message('exec-used', node=node)
@utils.check_messages('eval-used', 'exec-used', 'bad-reversed-sequence')
def visit_call(self, node):
"""visit a Call node -> check if this is not a blacklisted builtin
call and check for * or ** use
"""
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or
name in node.root()):
if name == 'exec':
self.add_message('exec-used', node=node)
elif name == 'reversed':
self._check_reversed(node)
elif name == 'eval':
self.add_message('eval-used', node=node)
@utils.check_messages('assert-on-tuple')
def visit_assert(self, node):
"""check the use of an assert statement on a tuple."""
if node.fail is None and isinstance(node.test, astroid.Tuple) and \
len(node.test.elts) == 2:
self.add_message('assert-on-tuple', node=node)
@utils.check_messages('duplicate-key')
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message('duplicate-key', node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message('unreachable', node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not a in try...finally bloc
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, 'finalbody') and _node in _parent.finalbody:
self.add_message('lost-exception', node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
""" check that the argument to `reversed` is a sequence """
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.YES:
return
if argument is None:
# Nothing was infered.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if (getattr(func, 'name', None) == 'iter' and
utils.is_builtin_object(func)):
self.add_message('bad-reversed-sequence', node=node)
return
if isinstance(argument, astroid.Instance):
if (argument._proxied.name == 'dict' and
utils.is_builtin_object(argument._proxied)):
self.add_message('bad-reversed-sequence', node=node)
return
elif any(ancestor.name == 'dict' and utils.is_builtin_object(ancestor)
for ancestor in argument._proxied.ancestors()):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message('bad-reversed-sequence', node=node)
return
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message('bad-reversed-sequence', node=node)
elif not isinstance(argument, (astroid.List, astroid.Tuple)):
# everything else is not a proper sequence for reversed()
self.add_message('bad-reversed-sequence', node=node)
@utils.check_messages('confusing-with-statement')
def visit_with(self, node):
if not PY3K:
# in Python 2 a "with" statement with multiple managers coresponds
# to multiple nested AST "With" nodes
pairs = []
parent_node = node.parent
if isinstance(parent_node, astroid.With):
# we only care about the direct parent, since this method
# gets called for each with node anyway
pairs.extend(parent_node.items)
pairs.extend(node.items)
else:
# in PY3K a "with" statement with multiple managers coresponds
# to one AST "With" node with multiple items
pairs = node.items
if pairs:
for prev_pair, pair in zip(pairs, pairs[1:]):
if (isinstance(prev_pair[1], astroid.AssignName) and
(pair[1] is None and not isinstance(pair[0], astroid.Call))):
# don't emit a message if the second is a function call
# there's no way that can be mistaken for a name assignment
if PY3K or node.lineno == node.parent.lineno:
# if the line number doesn't match
# we assume it's a nested "with"
self.add_message('confusing-with-statement', node=node)
_NAME_TYPES = {
'module': (MOD_NAME_RGX, 'module'),
'const': (CONST_NAME_RGX, 'constant'),
'class': (CLASS_NAME_RGX, 'class'),
'function': (DEFAULT_NAME_RGX, 'function'),
'method': (DEFAULT_NAME_RGX, 'method'),
'attr': (DEFAULT_NAME_RGX, 'attribute'),
'argument': (DEFAULT_NAME_RGX, 'argument'),
'variable': (DEFAULT_NAME_RGX, 'variable'),
'class_attribute': (CLASS_ATTRIBUTE_RGX, 'class attribute'),
'inlinevar': (COMP_VAR_RGX, 'inline iteration'),
}
def _create_naming_options():
name_options = []
for name_type, (rgx, human_readable_name) in six.iteritems(_NAME_TYPES):
name_type = name_type.replace('_', '-')
name_options.append((
'%s-rgx' % (name_type,),
{'default': rgx, 'type': 'regexp', 'metavar': '<regexp>',
'help': 'Regular expression matching correct %s names' % (human_readable_name,)}))
name_options.append((
'%s-name-hint' % (name_type,),
{'default': rgx.pattern, 'type': 'string', 'metavar': '<string>',
'help': 'Naming hint for %s names' % (human_readable_name,)}))
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
'C0102': ('Black listed name "%s"',
'blacklisted-name',
'Used when the name is listed in the black list (unauthorized '
'names).'),
'C0103': ('%s name "%s" doesn\'t conform to %s',
'invalid-name',
'Used when the name doesn\'t match the regular expression '
'associated to its type (constant, variable, class...).'),
'W0111': ('Name %s will become a keyword in Python %s',
'assign-to-new-keyword',
'Used when assignment will become invalid in future '
'Python release due to introducing new keyword'),
}
options = (('good-names',
{'default' : ('i', 'j', 'k', 'ex', 'Run', '_'),
'type' :'csv', 'metavar' : '<names>',
'help' : 'Good variable names which should always be accepted,'
' separated by a comma'}
),
('bad-names',
{'default' : ('foo', 'bar', 'baz', 'toto', 'tutu', 'tata'),
'type' :'csv', 'metavar' : '<names>',
'help' : 'Bad variable names which should always be refused, '
'separated by a comma'}
),
('name-group',
{'default' : (),
'type' :'csv', 'metavar' : '<name1:name2>',
'help' : ('Colon-delimited sets of names that determine each'
' other\'s naming style when the name regexes'
' allow several styles.')}
),
('include-naming-hint',
{'default': False, 'type' : 'yn', 'metavar' : '<y_or_n>',
'help': 'Include a hint for the correct naming format with invalid-name'}
),
('property-classes',
{'default': ('abc.abstractproperty',),
'type': 'csv',
'metavar': '<decorator names>',
'help': 'List of decorators that produce properties, such as '
'abc.abstractproperty. Add to this list to register '
'other decorators that produce valid properties.'}
),
) + _create_naming_options()
KEYWORD_ONSET = {
(3, 0): {'True', 'False'},
(3, 7): {'async', 'await'}
}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
def open(self):
self.stats = self.linter.add_stats(badname_module=0,
badname_class=0, badname_function=0,
badname_method=0, badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0)
for group in self.config.name_group:
for name_type in group.split(':'):
self._name_group[name_type] = 'group_%s' % (group,)
@utils.check_messages('blacklisted-name', 'invalid-name')
def visit_module(self, node):
self._check_name('module', node.name.split('.')[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in six.itervalues(self._bad_names):
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in six.itervalues(all_groups):
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group))
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@utils.check_messages('blacklisted-name', 'invalid-name', 'assign-to-new-keyword')
def visit_classdef(self, node):
self._check_assign_to_new_keyword_violation(node.name, node)
self._check_name('class', node.name, node)
for attr, anodes in six.iteritems(node.instance_attrs):
if not any(node.instance_attr_ancestors(attr)):
self._check_name('attr', attr, anodes[0])
@utils.check_messages('blacklisted-name', 'invalid-name', 'assign-to-new-keyword')
def visit_functiondef(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
self._check_assign_to_new_keyword_violation(node.name, node)
confidence = interfaces.HIGH
if node.is_method():
if utils.overrides_a_method(node.parent.frame(), node.name):
return
confidence = (interfaces.INFERENCE if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE)
self._check_name(_determine_function_name_type(node,
config=self.config),
node.name, node, confidence)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
visit_asyncfunctiondef = visit_functiondef
@utils.check_messages('blacklisted-name', 'invalid-name')
def visit_global(self, node):
for name in node.names:
self._check_name('const', name, node)
@utils.check_messages('blacklisted-name', 'invalid-name', 'assign-to-new-keyword')
def visit_assignname(self, node):
"""check module level assigned names"""
self._check_assign_to_new_keyword_violation(node.name, node)
frame = node.frame()
ass_type = node.assign_type()
if isinstance(ass_type, astroid.Comprehension):
self._check_name('inlinevar', node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(ass_type, astroid.Assign) and not in_loop(ass_type):
if isinstance(utils.safe_infer(ass_type.value), astroid.ClassDef):
self._check_name('class', node.name, node)
else:
if not _redefines_import(node):
# Don't emit if the name redefines an import
# in an ImportError except handler.
self._check_name('const', node.name, node)
elif isinstance(ass_type, astroid.ExceptHandler):
self._check_name('variable', node.name, node)
elif isinstance(frame, astroid.FunctionDef):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name('variable', node.name, node)
elif isinstance(frame, astroid.ClassDef):
if not list(frame.local_attr_ancestors(node.name)):
self._check_name('class_attribute', node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssignName):
self._check_name('argument', arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(self, node, node_type, name, confidence):
type_label = _NAME_TYPES[node_type][1]
hint_name = node_type.replace('_', '-') + '-name-hint'
hint_part = '%r template' % hint_name
if self.config.include_naming_hint:
hint_rgx = getattr(self.config, hint_name.replace('-', '_'))
hint_part += ' (hint: %r)' % hint_rgx
args = (
type_label.capitalize(),
name,
hint_part
)
self.add_message('invalid-name', node=node, args=args,
confidence=confidence)
self.stats['badname_' + node_type] += 1
def _check_name(self, node_type, name, node, confidence=interfaces.HIGH):
"""check for a name using the type's regexp"""
if utils.is_inside_except(node):
clobbering, _ = utils.clobber_in_except(node)
if clobbering:
return
if name in self.config.good_names:
return
if name in self.config.bad_names:
self.stats['badname_' + node_type] += 1
self.add_message('blacklisted-name', node=node, args=name)
return
regexp = getattr(self.config, node_type + '_rgx')
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None:
self._raise_name_warning(node, node_type, name, confidence)
def _check_assign_to_new_keyword_violation(self, name, node):
keyword_first_version = self._name_became_keyword_in_version(
name, self.KEYWORD_ONSET
)
if keyword_first_version is not None:
self.add_message('assign-to-new-keyword',
node=node, args=(name, keyword_first_version),
confidence=interfaces.HIGH)
@staticmethod
def _name_became_keyword_in_version(name, rules):
for version, keywords in rules.items():
if name in keywords and sys.version_info < version:
return '.'.join(map(str, version))
return None
class DocStringChecker(_BasicChecker):
msgs = {
'C0111': ('Missing %s docstring', # W0131
'missing-docstring',
'Used when a module, function, class or method has no docstring.'
'Some special methods like __init__ doesn\'t necessary require a '
'docstring.'),
'C0112': ('Empty %s docstring', # W0132
'empty-docstring',
'Used when a module, function, class or method has an empty '
'docstring (it would be too easy ;).'),
}
options = (('no-docstring-rgx',
{'default' : NO_REQUIRED_DOC_RGX,
'type' : 'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match '
'function or class names that do not require a '
'docstring.'}
),
('docstring-min-length',
{'default' : -1,
'type' : 'int', 'metavar' : '<int>',
'help': ('Minimum line length for functions/classes that'
' require docstrings, shorter ones are exempt.')}
),
)
def open(self):
self.stats = self.linter.add_stats(undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0)
@utils.check_messages('missing-docstring', 'empty-docstring')
def visit_module(self, node):
self._check_docstring('module', node)
@utils.check_messages('missing-docstring', 'empty-docstring')
def visit_classdef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring('class', node)
@staticmethod
def _is_setter_or_deleter(node):
names = {'setter', 'deleter'}
for decorator in node.decorators.nodes:
if (isinstance(decorator, astroid.Attribute)
and decorator.attrname in names):
return True
return False
@utils.check_messages('missing-docstring', 'empty-docstring')
def visit_functiondef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = 'method' if node.is_method() else 'function'
if node.decorators and self._is_setter_or_deleter(node):
return
if isinstance(node.parent.frame(), astroid.ClassDef):
overridden = False
confidence = (interfaces.INFERENCE if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and \
isinstance(ancestor[node.name], astroid.FunctionDef):
overridden = True
break
self._check_docstring(ftype, node,
report_missing=not overridden,
confidence=confidence)
else:
self._check_docstring(ftype, node)
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(self, node_type, node, report_missing=True,
confidence=interfaces.HIGH):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
if not report_missing:
return
if node.body:
lines = node.body[-1].lineno - node.body[0].lineno + 1
else:
lines = 0
if node_type == 'module' and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != 'module' and max_lines > -1 and lines < max_lines:
return
self.stats['undocumented_'+node_type] += 1
if (node.body and isinstance(node.body[0], astroid.Expr) and
isinstance(node.body[0].value, astroid.Call)):
# Most likely a string with a format call. Let's see.
func = utils.safe_infer(node.body[0].value.func)
if (isinstance(func, astroid.BoundMethod)
and isinstance(func.bound, astroid.Instance)):
# Strings in Python 3, others in Python 2.
if PY3K and func.bound.name == 'str':
return
elif func.bound.name in ('str', 'unicode', 'bytes'):
return
self.add_message('missing-docstring', node=node, args=(node_type,),
confidence=confidence)
elif not docstring.strip():
self.stats['undocumented_'+node_type] += 1
self.add_message('empty-docstring', node=node, args=(node_type,),
confidence=confidence)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {'W0107': ('Unnecessary pass statement',
'unnecessary-pass',
'Used when a "pass" statement that can be avoided is '
'encountered.'),
}
@utils.check_messages('unnecessary-pass')
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1:
self.add_message('unnecessary-pass', node=node)
class LambdaForComprehensionChecker(_BasicChecker):
"""check for using a lambda where a comprehension would do.
See <http://www.artima.com/weblogs/viewpost.jsp?thread=98196>
where GvR says comprehensions would be clearer.
"""
msgs = {'W0110': ('map/filter on lambda could be replaced by comprehension',
'deprecated-lambda',
'Used when a lambda is the first argument to "map" or '
'"filter". It could be clearer as a list '
'comprehension or generator expression.',
{'maxversion': (3, 0)}),
}
@utils.check_messages('deprecated-lambda')
def visit_call(self, node):
"""visit a Call node, check if map or filter are called with a
lambda
"""
if not node.args:
return
if not isinstance(node.args[0], astroid.Lambda):
return
infered = utils.safe_infer(node.func)
if (utils.is_builtin_object(infered)
and infered.name in ['map', 'filter']):
self.add_message('deprecated-lambda', node=node)
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return (isinstance(call, astroid.Call)
and len(call.args) == 1 and not call.keywords)
class ComparisonChecker(_BasicChecker):
"""Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
"""
msgs = {'C0121': ('Comparison to %s should be %s',
'singleton-comparison',
'Used when an expression is compared to singleton '
'values like True, False or None.'),
'C0122': ('Comparison should be %s',
'misplaced-comparison-constant',
'Used when the constant is placed on the left side '
'of a comparison. It is usually clearer in intent to '
'place it in the right hand side of the comparison.'),
'C0123': ('Using type() instead of isinstance() for a typecheck.',
'unidiomatic-typecheck',
'The idiomatic way to perform an explicit typecheck in '
'Python is to use isinstance(x, Y) rather than '
'type(x) == Y, type(x) is Y. Though there are unusual '
'situations where these give different results.',
{'old_names': [('W0154', 'unidiomatic-typecheck')]}),
'R0123': ('Comparison to literal',
'literal-comparison',
'Used when comparing an object to a literal, which is usually '
'what you do not want to do, since you can compare to a different '
'literal than what was expected altogether.'),
}
def _check_singleton_comparison(self, singleton, root_node):
if singleton.value is True:
suggestion = "just 'expr' or 'expr is True'"
self.add_message('singleton-comparison',
node=root_node,
args=(True, suggestion))
elif singleton.value is False:
suggestion = "'not expr' or 'expr is False'"
self.add_message('singleton-comparison',
node=root_node,
args=(False, suggestion))
elif singleton.value is None:
self.add_message('singleton-comparison',
node=root_node,
args=(None, "'expr is None'"))
def _check_literal_comparison(self, literal, node):
"""Check if we compare to a literal, which is usually what we do not want to do."""
nodes = (astroid.List,
astroid.Tuple,
astroid.Dict,
astroid.Set)
is_other_literal = isinstance(literal, nodes)
is_const = False
if isinstance(literal, astroid.Const):
if literal.value in (True, False, None):
# Not interested in this values.
return
is_const = isinstance(literal.value, (bytes, str, int, float))
if is_const or is_other_literal:
self.add_message('literal-comparison', node=node)
def _check_misplaced_constant(self, node, left, right, operator):
if isinstance(right, astroid.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = '%s %s %r' % (right.as_string(), operator, left.value)
self.add_message('misplaced-comparison-constant', node=node,
args=(suggestion,))
@utils.check_messages('singleton-comparison', 'misplaced-comparison-constant',
'unidiomatic-typecheck', 'literal-comparison')
def visit_compare(self, node):
self._check_unidiomatic_typecheck(node)
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if (operator in ('<', '<=', '>', '>=', '!=', '==')
and isinstance(left, astroid.Const)):
self._check_misplaced_constant(node, left, right, operator)
if operator == '==':
if isinstance(left, astroid.Const):
self._check_singleton_comparison(left, node)
elif isinstance(right, astroid.Const):
self._check_singleton_comparison(right, node)
if operator in ('is', 'is not'):
self._check_literal_comparison(right, node)
def _check_unidiomatic_typecheck(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (isinstance(left_func, astroid.ClassDef)
and left_func.qname() == TYPE_QNAME):
return
if operator in ('is', 'is not') and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message('unidiomatic-typecheck', node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(LambdaForComprehensionChecker(linter))
linter.register_checker(ComparisonChecker(linter))
| rogalski/pylint | pylint/checkers/base.py | Python | gpl-2.0 | 73,250 | [
"VisIt"
] | 055b602229d2c5a0f6f5d37e2ab1976da2af3a46a6e594041675984625989a5d |
# Copyright 2001 by Gavin E. Crooks. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Unit test for Astral"""
import unittest
from Bio.SCOP import *
class AstralTests(unittest.TestCase):
def setUp(self):
self.scop = Scop(dir_path="SCOP", version="test")
self.astral = Astral(scop=self.scop, dir_path="SCOP", version="test")
def testGetSeq(self):
self.assertEqual(str(self.astral.getSeqBySid('d3sdha_')), "AAAAA")
self.assertEqual(str(self.astral.getSeqBySid('d4hbib_')), "KKKKK")
dom = self.scop.getDomainBySid('d3sdha_')
self.assertEqual(str(self.astral.getSeq(dom)), "AAAAA")
def testConstructWithCustomFile(self):
scop = Scop(dir_path="SCOP", version="test")
astral = Astral(scop=scop, astral_file="SCOP/scopseq-test/astral-scopdom-seqres-all-test.fa")
self.assertEqual(str(astral.getSeqBySid('d3sdha_')), "AAAAA")
self.assertEqual(str(astral.getSeqBySid('d4hbib_')), "KKKKK")
def testGetDomainsFromFile(self):
filename = "SCOP/scopseq-test/astral-scopdom-seqres-sel-gs-bib-20-test.id"
domains = self.astral.getAstralDomainsFromFile(filename)
self.assertEqual(len(domains), 3)
self.assertEqual(domains[0].sid, "d3sdha_")
self.assertEqual(domains[1].sid, "d4hbib_")
self.assertEqual(domains[2].sid, "d5hbia_")
def testGetDomainsClustered(self):
domains1 = self.astral.domainsClusteredById(20)
self.assertEqual(len(domains1), 3)
self.assertEqual(domains1[0].sid, "d3sdha_")
self.assertEqual(domains1[1].sid, "d4hbib_")
self.assertEqual(domains1[2].sid, "d5hbia_")
domains2 = self.astral.domainsClusteredByEv(1e-15)
self.assertEqual(len(domains2), 1)
# d1 = scop.getDomainBySid("d3sdha_")
# self.assertEqual(d1.isIn(astral.getHashedDomainsClusteredByPercentId(20))
# self.assertEqual(d1.isIn(astral.getHashedDomainsClusteredByEv(-15))
if __name__=='__main__':
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| updownlife/multipleK | dependencies/biopython-1.65/Tests/test_SCOP_Astral.py | Python | gpl-2.0 | 2,234 | [
"Biopython"
] | a7951410eacdc931d8d0cea578607f23b44b718b7d0ad372f22758f97f3affa1 |
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit as u
import mdtraj.reporters
cutoff = 0.95 * u.nanometers
output_frequency = 25000
n_steps = 500000000
temperature = 293.
pressure = 1.0 * u.atmospheres
pdb_filename = "./1am7_equil.pdb"
dcd_filename = "./1am7.dcd"
log_filename = "./1am7.log"
traj = mdtraj.load(pdb_filename)
top, bonds = traj.top.to_dataframe()
atom_indices = top.index[top.chainID == 0].values
pdb = app.PDBFile(pdb_filename)
topology = pdb.topology
positions = pdb.positions
ff = app.ForceField('amber99sbnmr.xml', 'tip3p-fb.xml')
platform = mm.Platform.getPlatformByName("CUDA")
system = ff.createSystem(topology, nonbondedMethod=app.PME, nonbondedCutoff=cutoff, constraints=app.HBonds)
integrator = mm.LangevinIntegrator(temperature, 1.0 / u.picoseconds, 2.0 * u.femtoseconds)
system.addForce(mm.MonteCarloBarostat(pressure, temperature, 25))
simulation = app.Simulation(topology, system, integrator, platform=platform)
simulation.context.setPositions(positions)
simulation.context.setVelocitiesToTemperature(temperature)
print("Using platform %s" % simulation.context.getPlatform().getName())
simulation.reporters.append(mdtraj.reporters.DCDReporter(dcd_filename, output_frequency, atomSubset=atom_indices))
simulation.reporters.append(app.StateDataReporter(open(log_filename, 'w'), 5000, step=True, time=True, speed=True))
simulation.step(n_steps)
| choderalab/open-forcefield-group | nmr/code/simulate_T4.py | Python | gpl-2.0 | 1,413 | [
"MDTraj",
"OpenMM"
] | 5c3c46c166ae8823c4b9fcc6f7a9aa60a6ba93d95522f450b07524afc0bb6206 |
#!/usr/bin/python -O
#PBS -N myjob
#PBS -l select=1:ncpus=9:mem=12GB
#PBS -l walltime=70:00:00
import subprocess
import os
import sys
def module_add(modulename):
p = subprocess.Popen("/usr/bin/modulecmd python add "+modulename, stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True)
stdout,stderr = p.communicate()
exec stdout
module_add("bio-bwa/0.7.5a")
genomeFastaFiles = {"mm9":"/csc/rawdata/Cscbioinf/bioinfResources/mm9/mm9.fa"}
#genome = "mm9"
#fastq = "/csc/rawdata/Merkenshlager/131209_SN172_0451_BD26VHACXX_Merkenschlager/Unaligned/Sample_3InpVS/3InpVS_ACAGTG_L003_R1_001.fastq.gz"
#baseName = "/csc/rawdata/Dillon/DillonTest/test"
fastq = sys.argv[1]
baseName = sys.argv[2]
genome = sys.argv[3]
outputPath = sys.argv[4]
saiOut = os.path.join(outputPath,baseName+".sai")
trimmedFQOut = os.path.join(outputPath,baseName+"trimmed.fq.gz")
print os.environ["LOADEDMODULES"]
if not os.path.isfile(genomeFastaFiles[genome]) or not os.path.isfile(genomeFastaFiles[genome]+".bwt") or not os.path.isfile(genomeFastaFiles[genome]+".sa"):
if os.path.isfile(genomeFastaFiles[genome]):
print "Not all necessary index files found..indexing"
#p = subprocess.Popen(["/bin/bash",'-i',"-c","bwa index -a bwtsw "+genome], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#stdout,stderr = p.communicate()
else:
"Fasta file is not found"
elif os.path.isfile(genomeFastaFiles[genome]) and os.path.isfile(genomeFastaFiles[genome]+".bwt") and os.path.isfile(genomeFastaFiles[genome]+".sa"):
if not os.path.isfile(saiOut):
pairedAlignCMD1 = "zcat "+fastq+" | python /csc/rawdata/Cscbioinf/bioinfResources/trimFQ.py 50 | gzip - > "+trimmedFQOut
print pairedAlignCMD1
p = subprocess.Popen(["/bin/bash",'-i',"-c",pairedAlignCMD1], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = p.communicate()
pairedAlignCMD2 = "bwa aln -t 8 "+genomeFastaFiles[genome]+" "+trimmedFQOut+" > "+saiOut
print pairedAlignCMD2
p = subprocess.Popen(["/bin/bash",'-i',"-c",pairedAlignCMD2], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = p.communicate()
| ThomasCarroll/mrcchip | pairedAlign.py | Python | gpl-2.0 | 2,281 | [
"BWA"
] | f17afbed52a57995e51cf9c061465b90b8d8e1134ae9114b18a263d1dd8b975b |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.plotting.preparation Contains the PreparationPlotter class
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from .component import PlottingComponent
from ..preparation.component import PreparationComponent
from ...core.tools import filesystem as fs
from ...core.basics.log import log
from ...magic.core.frame import Frame
from ...magic.core.fits import get_frame_names
from ...magic.basics.mask import Mask, get_mask_names
from ...magic.region.list import PixelRegionList
from ...magic.plot.imagegrid import StandardImageGridPlotter
from ...core.plot.distribution import DistributionGridPlotter, DistributionPlotter
from ...core.basics.distribution import Distribution
from ...magic.plot.error import ErrorPlotter
from pts.core.tools.utils import lazyproperty
# -----------------------------------------------------------------
class PreparationPlotter(PlottingComponent, PreparationComponent):
"""
This class...
"""
# The load functions
load_functions = dict()
# The plot functions
plot_functions = dict()
# -----------------------------------------------------------------
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param kwargs:
:return:
"""
# Call the constructors of the base classes
PlottingComponent.__init__(self, no_config=True)
PreparationComponent.__init__(self, *args, **kwargs)
# -- Attributes --
# Features to plot
self.features = None
# The paths to the resulting FITS files
self.result_paths = dict()
# The paths to the sky directories
self.sky_paths = dict()
# The dictionary of prepared image frames
self.images = dict()
# The dictionary of error frames
self.errors = dict()
self.poisson_errors = dict()
self.calibration_errors = dict()
self.sky_errors = dict()
# The dictionary of sources masks
self.sources_masks = dict()
# The dictionary of sky masks
self.sky_masks = dict()
# The dictionary of sky values
self.sky_values = dict()
# The dictionary of sky annuli
self.annuli = dict()
# The dictionary of sky apertures
self.apertures = dict()
# -----------------------------------------------------------------
def run(self, features=None):
"""
This function ...
:return:
"""
# 1. Call the setup function
self.setup(features)
# 2. Load the prepared images
self.load_images()
# 3. Load the error frame
self.load_errors()
# 4. Load the source and sky masks
self.load_masks()
# 5. Load the sky values
self.load_sky()
# 6. Load the galaxy and sky annuli
self.load_annuli()
# 7. Load the sky apertures
self.load_apertures()
# 8. Plot
self.plot()
# -----------------------------------------------------------------
def setup(self, features=None):
"""
This function ...
:return:
"""
# Call the setup function of the base class
super(PreparationPlotter, self).setup()
# Set features to plot
self.features = features
# Loop over all directories in the preparation directory
for directory_path, directory_name in fs.directories_in_path(self.prep_path, returns=["path", "name"]):
# Look for a file called 'result.fits'
image_path = fs.join(directory_path, "result.fits")
if not fs.is_file(image_path):
log.warning("Prepared image could not be found for " + directory_name)
continue
# Add the image path to the dictionary
self.result_paths[directory_name] = image_path
# Look for the 'sky' directory
sky_path = fs.join(directory_path, "sky")
if not fs.is_directory(sky_path):
log.warning("Sky directory is not present for " + directory_name)
continue
# Add the sky directory path to the dictionary
self.sky_paths[directory_name] = sky_path
# -----------------------------------------------------------------
def load_images(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the prepared images ...")
# Loop over the image paths
for label in self.result_paths:
# Open the prepared image frame
frame = Frame.from_file(self.result_paths[label])
# Set the image name
frame.name = label
# Add the image to the dictionary
self.images[label] = frame
# -----------------------------------------------------------------
def load_masks(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the masks ...")
# Load sources masks
self.load_sources_masks()
# Load sky masks
self.load_sky_masks()
# -----------------------------------------------------------------
def load_sources_masks(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the sources masks ...")
# Loop over the image paths
for label in self.result_paths:
# Check whether the sources mask is present in the FITS file
if not "sources" in get_mask_names(self.result_paths[label]):
log.warning("The sources mask is not present in the " + label + " image")
# Open the sources mask
mask = Mask.from_file(self.result_paths[label], plane="sources")
# Add the mask to the dictionary
self.sources_masks[label] = mask
# -----------------------------------------------------------------
def load_sky_masks(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the sky masks ...")
# Loop over the image paths
for label in self.result_paths:
# Check whether the sky mask is present in the FITS file
if not "sky" in get_mask_names(self.result_paths[label]):
log.warning("The sky mask is not present in the " + label + " image")
continue
# Open the sky mask
mask = Mask.from_file(self.result_paths[label], plane="sky")
# Add the sky mask to the dictionary
self.sky_masks[label] = mask
# -----------------------------------------------------------------
def load_sky(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the sky values ...")
# Loop over the image paths
for label in self.result_paths:
# Open the sky frame
sky = Frame.from_file(self.result_paths[label], plane="sky")
# Get the sky value (assuming the sky frame is constant)
value = sky[0,0]
# Add the sky value to the dictionary
self.sky_values[label] = value
# -----------------------------------------------------------------
def load_annuli(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the galaxy and sky annuli ...")
# Loop over the sky paths
for label in self.sky_paths:
# Look for the annulus region file
region_path = fs.join(self.sky_paths[label], "annulus.reg")
if not fs.is_file(region_path):
log.warning("The annulus region could not be found for " + label)
continue
# Open the annulus region
region = PixelRegionList.from_file(region_path).homogenized()
# Add the region to the dictionary
self.annuli[label] = region
# -----------------------------------------------------------------
def load_apertures(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the sky apertures ...")
# Loop over the sky paths
for label in self.sky_paths:
# Look for the apertures FITS file
apertures_path = fs.join(self.sky_paths[label], "apertures.fits")
if not fs.is_file(apertures_path):
log.warning("The apertures image could not be found for " + label)
continue
# Open the apertures image
apertures = Frame.from_file(apertures_path)
# Add the apertures image to the dictionary
self.apertures[label] = apertures
# -----------------------------------------------------------------
def load_errors(self):
"""
This function ...
:param self:
:return:
"""
# Inform the user
log.info("Loading the error frames ...")
# Load the total errors
self.load_total_errors()
# Load the poisson errors
self.load_poisson_errors()
# Load the sky errors
self.load_sky_errors()
# Load the calibration errors
self.load_calibration_errors()
# -----------------------------------------------------------------
def load_total_errors(self):
"""
This function ...
:return:
"""
# Loop over the image paths
for label in self.result_paths:
# Open the errors frame
frame = Frame.from_file(self.result_paths[label], plane="errors")
# Set the image name
frame.name = label
# Add the error frame to the dictionary
self.errors[label] = frame
# -----------------------------------------------------------------
def load_poisson_errors(self):
"""
This function ...
:return:
"""
# Loop over the image paths
for label in self.result_paths:
# Check if the poisson_errors frame is present in the FITS file
if not "poisson_errors" in get_frame_names(self.result_paths[label]): continue
# Open the poisson errors frame
errors = Frame.from_file(self.result_paths[label], plane="poisson_errors")
# Add the error frame to the dictionary
self.poisson_errors[label] = errors
# -----------------------------------------------------------------
def load_sky_errors(self):
"""
This function ...
:return:
"""
# Loop over the image paths
for label in self.result_paths:
# Check if the sky_errors frame is present in the FITS file
if not "sky_errors" in get_frame_names(self.result_paths[label]):
log.warning("The sky_errors frame is not present in the " + label + " image")
continue
# Open the sky error frame
errors = Frame.from_file(self.result_paths[label], plane="sky_errors")
# Add the error frame to the dictionary
self.sky_errors[label] = errors
# -----------------------------------------------------------------
def load_calibration_errors(self):
"""
This function ...
:return:
"""
# Loop over the image paths
for label in self.result_paths:
# Check if the calibration_errors frame is present in the FITS file
if not "calibration_errors" in get_frame_names(self.result_paths[label]):
log.warning("The calibration_errors frame is not present in the " + label + " image")
continue
# Open the calibration error frame
errors = Frame.from_file(self.result_paths[label], plane="calibration_errors")
# Add the error frame to the dictionary
self.calibration_errors[label] = errors
# -----------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting ...")
# Plot a grid of the prepared images
if self.features is None or "images" in self.features: self.plot_images()
# Plot the grid of images with the sources masks and sky annuli overlayed
if self.features is None or "masks_annuli" in self.features: self.plot_masks_and_annuli()
# Plot a grid of the apertures
if self.features is None or "apertures" in self.features: self.plot_apertures()
# Plot the sky values
if self.features is None or "sky" in self.features: self.plot_sky()
# Plot the distributions of the relative errors
if self.features is None or "errors" in self.features: self.plot_errors()
# -----------------------------------------------------------------
def plot_images(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the images ...")
# Create the image plotter
plotter = StandardImageGridPlotter()
# Add the images
for label in self.sorted_labels: plotter.add_image(self.images[label], label)
# Determine the path to the plot file
path = fs.join(self.plot_preparation_path, "preparation.pdf")
# plotter.colormap = "hot"
plotter.vmin = 0.0
plotter.set_title("Prepared images")
# Make the plot
plotter.run(path)
# -----------------------------------------------------------------
def plot_sky(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the sky values ...")
# Create the distribution grid plotter
plotter = DistributionGridPlotter()
sky_path = fs.join(self.plot_preparation_path, "sky")
if not fs.is_directory(sky_path): fs.create_directory(sky_path)
# Loop over the different images
for label in self.sorted_labels:
not_nan = Mask.is_nan(self.images[label]).inverse()
# Create the distribution from the image pixel values
distribution = Distribution.from_values("Pixel value", self.images[label][not_nan].flatten() + self.sky_values[label])
# Create an array of all the pixels used for estimating the sky
#notnan = np.logical_not(np.isnan(self.apertures))
#print(self.apertures.dtype)
notnan = Mask.is_nan(self.apertures[label]).inverse()
sky_values = self.apertures[label][notnan]
# Create the distribution of pixel values used for the sky estimation
sky_distribution = Distribution.from_values("Pixel value", sky_values)
# Add the distributions
plotter.add_distribution(distribution, label)
plotter.add_distribution(sky_distribution, label)
# Plot seperately
distr_plotter = DistributionPlotter()
distr_plotter.add_distribution(distribution, "image")
distr_plotter.add_distribution(sky_distribution, "sky")
distr_plotter.run(fs.join(sky_path, label + ".pdf"))
# Determine the path to the plot file
path = fs.join(self.plot_preparation_path, "sky_distribution.pdf")
# Run the plotter
plotter.run(path)
# -----------------------------------------------------------------
def plot_errors(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the errors ...")
# Plot histograms of the absolute error values
#self.plot_error_histograms_absolute()
# Plot histograms of the relative error values
#self.plot_error_histograms_relative()
# Plot the relative errors of each pixel
self.plot_errors_pixels()
# -----------------------------------------------------------------
def plot_error_histograms_absolute(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the absolute error values in a histogram for each prepared image compared to the histogram of the actual image values ...")
# Create the distribution grid plotter
plotter = DistributionGridPlotter()
absolute_errors_path = fs.join(self.plot_preparation_path, "absolute_errors")
if not fs.is_directory(absolute_errors_path): fs.create_directory(absolute_errors_path)
# Loop over the different images
for label in self.sorted_labels:
not_nan = Mask.is_nan(self.images[label]).inverse()
# Create the distribution from the image pixel values
distribution = Distribution.from_values("Pixel value", self.images[label][not_nan].flatten())
not_nan = Mask.is_nan(self.errors[label]).inverse()
# Create the distribution from the error values
error_distribution = Distribution.from_values("Error", self.errors[label][not_nan].flatten())
# Add an entry to the distribution grid plotter
plotter.add_distribution(distribution, label)
plotter.add_distribution(error_distribution, label)
# Plot seperately
distr_plotter = DistributionPlotter()
distr_plotter.add_distribution(distribution, "image")
distr_plotter.add_distribution(error_distribution, "absolute errors")
distr_plotter.run(fs.join(absolute_errors_path, label + ".pdf"))
# Determine the path to the plot file
path = fs.join(self.plot_preparation_path, "absolute_errors.pdf")
# Run the plotter
plotter.run(path)
# -----------------------------------------------------------------
def plot_error_histograms_relative(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the relative error values in a histogram for each prepared image ...")
# Create the distribution grid plotter
plotter = DistributionGridPlotter()
relative_errors_path = fs.join(self.plot_preparation_path, "relative_errors")
if not fs.is_directory(relative_errors_path): fs.create_directory(relative_errors_path)
# Loop over the different images
for label in self.sorted_labels:
# Calculate the relative errors
rel_errors = self.errors[label] / self.images[label]
# Create a distribution from the relative errors
rel_error_distribution = Distribution.from_values("Relative error", rel_errors)
# Add the distribution to the plotter
plotter.add_distribution(rel_error_distribution, label)
# Plot seperately
rel_error_distribution.plot(title="relative errors", path=fs.join(relative_errors_path, label + ".pdf"))
# Determine the path to the plot file
path = fs.join(self.plot_preparation_path, "relative_errors.pdf")
# Run the plotter
plotter.run(path)
# -----------------------------------------------------------------
def plot_errors_pixels(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the relative error for each pixel in each prepared image ...")
# Create the ErrorPlotter instance
plotter = ErrorPlotter()
# Determine the path to the plot file
path = fs.join(self.plot_preparation_path, "errors_pixels.png")
# Run the plotter
plotter.run(path)
# -----------------------------------------------------------------
def plot_masks_and_annuli(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the images with the sources masks and sky annuli overlayed ...")
# Create the image plotter
plotter = StandardImageGridPlotter()
# Add the images
for label in self.sorted_labels: plotter.add_image(self.images[label], label, mask=self.sources_masks[label], region=self.annuli[label])
# Determine the path to the plot file
path = fs.join(self.plot_preparation_path, "preparation_masks_annuli.pdf")
plotter.vmin = 0.0
plotter.set_title("Prepared images with sources masks and sky annuli")
# Make the plot
plotter.run(path)
# -----------------------------------------------------------------
def plot_apertures(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the aperture frames with the sky annuli overlayed ...")
# Create the image plotter
plotter = StandardImageGridPlotter()
# Add the images
for label in self.sorted_labels: plotter.add_image(self.apertures[label], label, region=self.annuli[label])
# Determine the path to the plot file
path = fs.join(self.plot_preparation_path, "preparation_apertures.pdf")
plotter.vmin = 0.0
plotter.set_title("Aperture frames with sky annuli")
# Make the plot
plotter.run(path)
# -----------------------------------------------------------------
@lazyproperty
def sorted_labels(self):
"""
This function ...
:return:
"""
sorted_labels = sorted(self.images.keys(), key=lambda key: self.images[key].filter.pivotwavelength())
return sorted_labels
# -----------------------------------------------------------------
| SKIRT/PTS | modeling/plotting/preparation.py | Python | agpl-3.0 | 22,533 | [
"Galaxy"
] | 57fa3d249a1fac2fcb3c342cd26928405d73d6723f210502565a53fced8175a7 |
"""
ARD gplvm with one covaraince structure per dimension (at least implicitly)
"""
import sys
sys.path.append('./../..')
from pygp.gp import GP
import pygp.gp.gplvm as GPLVM
import pdb
from pygp.optimize.optimize_base import opt_hyper
import scipy as SP
import scipy.linalg as linalg
import copy
#Verbose implements all gradients and evaluations using naive methods and inlcudes debug asserts
VERBOSE=False
def PCA(Y, components):
"""run PCA, retrieving the first (components) principle components
return [s0,w0]
s0: factors
w0: weights
"""
sv = linalg.svd(Y, full_matrices=0);
[s0, w0] = [sv[0][:, 0:components], SP.dot(SP.diag(sv[1]), sv[2]).T[:, 0:components]]
v = s0.std(axis=0)
s0 /= v;
w0 *= v;
return [s0, w0]
class GPLVMARD(GPLVM.GPLVM):
"""
derived class form GP offering GPLVM specific functionality
"""
def __init__(self, *args, **kw_args):
"""gplvm_dimensions: dimensions to learn using gplvm, default -1; i.e. all"""
super(GPLVM.GPLVM, self).__init__(*args,**kw_args)
def get_covariances(self,hyperparams):
if not self._is_cached(hyperparams) or self._active_set_indices_changed:
#update covariance structure
K = self.covar.K(hyperparams['covar'],self.x)
#calc eigenvalue decomposition
[S,U] = SP.linalg.eigh(K)
#noise diagonal
#depending on noise model this may be either a vector or a matrix
Knoise = self.likelihood.Kdiag(hyperparams['lik'],self.x)
#noise version of S
Sn = Knoise + SP.tile(S[:,SP.newaxis],[1,self.d])
#inverse
Si = 1./Sn
#rotate data
y_rot = SP.dot(U.T,self.y)
#also store version of data rotated and Si applied
y_roti = (y_rot*Si)
self._covar_cache = {'S':S,'U':U,'K':K,'Knoise':Knoise,'Sn':Sn,'Si':Si,'y_rot':y_rot,'y_roti':y_roti}
self._covar_cache['hyperparams'] = copy.deepcopy(hyperparams)
pass
#return update covar cache
return self._covar_cache
####PRIVATE####
def _LML_covar(self, hyperparams):
"""
log marginal likelihood contributions from covariance hyperparameters
"""
try:
KV = self.get_covariances(hyperparams)
except linalg.LinAlgError:
LG.error("exception caught (%s)" % (str(hyperparams)))
return 1E6
#all in one go
#negative log marginal likelihood, see derivations
lquad = 0.5* (KV['y_rot']*KV['Si']*KV['y_rot']).sum()
ldet = 0.5*-SP.log(KV['Si'][:,:]).sum()
LML = 0.5*self.n*self.d * SP.log(2*SP.pi) + lquad + ldet
if VERBOSE:
#1. slow and explicit way
lmls_ = SP.zeros([self.d])
for i in xrange(self.d):
_y = self.y[:,i]
sigma2 = SP.exp(2*hyperparams['lik'])
_K = KV['K'] + SP.diag(KV['Knoise'][:,i])
_Ki = SP.linalg.inv(_K)
lquad_ = 0.5 * SP.dot(_y,SP.dot(_Ki,_y))
ldet_ = 0.5 * SP.log(SP.linalg.det(_K))
lmls_[i] = 0.5 * self.n* SP.log(2*SP.pi) + lquad_ + ldet_
assert SP.absolute(lmls_.sum()-LML)<1E-3, 'outch'
return LML
def _LMLgrad_covar(self, hyperparams):
logtheta = hyperparams['covar']
try:
KV = self.get_covariances(hyperparams)
except linalg.LinAlgError:
LG.error("exception caught (%s)" % (str(hyperparams)))
return 1E6
LMLgrad = SP.zeros(len(logtheta))
for i in xrange(len(logtheta)):
#1. derivative of the log det term
Kd = self.covar.Kgrad_theta(hyperparams['covar'], self._get_x(), i)
#rotate Kd with U, U.T
Kd_rot = SP.dot(SP.dot(KV['U'].T,Kd),KV['U'])
#now loop over the various different noise levels which is efficient at this point
dldet = 0.5*(Kd_rot.diagonal()[:,SP.newaxis]*KV['Si']).sum()
#2. deriative of the quadratic term
y_roti = KV['y_roti']
DKy_roti = SP.dot(Kd_rot,KV['y_roti'])
dlquad = -0.5*(y_roti*DKy_roti).sum()
if VERBOSE:
dldet_ = SP.zeros([self.d])
dlquad_ = SP.zeros([self.d])
for d in xrange(self.d):
_K = KV['K'] + SP.diag(KV['Knoise'][:,d])
_Ki = SP.linalg.inv(_K)
dldet_[d] = 0.5*SP.dot(_Ki,Kd).trace()
dKq = SP.dot(SP.dot(_Ki,Kd),_Ki)
dlquad_[d] = -0.5*SP.dot(SP.dot(self.y[:,d],dKq),self.y[:,d])
assert SP.absolute(dldet-dldet_.sum())<1E-3, 'outch'
assert SP.absolute(dlquad-dlquad_.sum())<1E-3, 'outch'
#set results
LMLgrad[i] = dldet + dlquad
RV = {'covar': LMLgrad}
return RV
def _LMLgrad_lik(self,hyperparams):
"""derivative of the likelihood parameters"""
logtheta = hyperparams['covar']
try:
KV = self.get_covariances(hyperparams)
except linalg.LinAlgError:
LG.error("exception caught (%s)" % (str(hyperparams)))
return 1E6
#loop through all dimensions
#logdet term:
Kd = 2*KV['Knoise']
dldet = 0.5*(Kd*KV['Si']).sum(axis=0)
#quadratic term
y_roti = KV['y_roti']
dlquad = -0.5 * (y_roti * Kd * y_roti).sum(axis=0)
if VERBOSE:
dldet_ = SP.zeros([self.d])
dlquad_ = SP.zeros([self.d])
for d in xrange(self.d):
_K = KV['K'] + SP.diag(KV['Knoise'][:,d])
_Ki = SP.linalg.inv(_K)
dldet_[d] = 0.5* SP.dot(_Ki,SP.diag(Kd[:,d])).trace()
dlquad_[d] = -0.5*SP.dot(self.y[:,d],SP.dot(_Ki,SP.dot(SP.diag(Kd[:,d]),SP.dot(_Ki,self.y[:,d]))))
assert (SP.absolute(dldet-dldet_)<1E-3).all(), 'outch'
assert (SP.absolute(dlquad-dlquad_)<1E-3).all(), 'outch'
LMLgrad = dldet + dlquad
RV = {'lik': LMLgrad}
return RV
def _LMLgrad_x(self, hyperparams):
"""GPLVM derivative w.r.t. to latent variables
"""
if not 'x' in hyperparams:
return {}
try:
KV = self.get_covariances(hyperparams)
except linalg.LinAlgError:
LG.error("exception caught (%s)" % (str(hyperparams)))
return 1E6
pass
dlMl = SP.zeros([self.n,len(self.gplvm_dimensions)])
#dlMl_det = SP.zeros([self.n,len(self.gplvm_dimensions)])
#dlMl_quad = SP.zeros([self.n,len(self.gplvm_dimensions)])
#U*Si*y
UYi=SP.dot(KV['U'],KV['y_roti'])
for i in xrange(len(self.gplvm_dimensions)):
d = self.gplvm_dimensions[i]
#dKx is general, not knowing that we are computing the diagonal:
dKx = self.covar.Kgrad_x(hyperparams['covar'], self.x, self.x, d)
#vector with all diagonals of SP.dot(SP.dot(KV['U'].T,dKxn),KV['U']) for n=1..N
dKx_rot = 2*KV['U']*SP.dot(dKx,KV['U'])
#caching for easier construction below
dKx_U = SP.dot(dKx,UYi)
if 0:
# an attept to vectorize this but I think we should use pyrex and dont make this code completely unreadable.
#log det
dKx_rot_tile = SP.tile(dKx_rot[:,:,SP.newaxis],[1,1,self.d])
Si_tile = SP.tile(KV['Si'][SP.newaxis,:,:],[self.n,1,1])
dlMl_det[:,i] = 0.5*( dKx_rot_tile*Si_tile).sum(axis=2).sum(axis=1)
#quad
UYi_tile = SP.tile(UYi[SP.newaxis,:,:],[self.n,1,1])
dxU_tile = SP.zeros([self.n,self.n,self.d])
dxU_tile[:,:,:] = SP.tile(dKx[:,:,SP.newaxis],[1,1,self.d])
dxU_tile[:,:,:] *= SP.tile(UYi[:,SP.newaxis,:],[1,self.n,1])
#dxU_tile[:,:,:] += SP.tile(dKx_U[:,SP.newaxis,:],[1,self.n,1])
dlMl_quad[:,i] = -0.5*dxU_tile.sum(axis=2).sum(axis=1)
for n in xrange(self.n):
dldet = 0.5* (dKx_rot[n,:][:,SP.newaxis]*KV['Si']).sum()
#create SP.dot(dKxn,Uyi) using precaclulated dKx_U vectors
dxU = SP.zeros([self.n, self.d])
dxU[n,:] = dKx_U[n,:]
dxU[:,:] += SP.outer(dKx[n,:],UYi[n,:])
#the res ist now UYi[:,d] * dxU[:,i], pointwise multiplication to do this for all d at the same time and some over them:
dlquad = -0.5*(UYi*dxU).sum()
dlMl[n,i] = dldet + dlquad
if VERBOSE:
#naive way
dKxn = SP.zeros([self.n, self.n])
dKxn[n, :] = dKx[n, :]
dKxn[:, n] += dKx[n, :]
Kd_rot = SP.dot(SP.dot(KV['U'].T,dKxn),KV['U'])
y_roti = KV['y_roti']
DKy_roti = SP.dot(Kd_rot,KV['y_roti'])
dldet = 0.5*(Kd_rot.diagonal()[:,SP.newaxis]*KV['Si']).sum()
dlquad = -0.5*(y_roti*DKy_roti).sum()
assert SP.absolute(dlMl[n,i]-(dldet+dlquad)).max()<1E-5 , 'outch'
pass
RV = {'x':dlMl}
return RV
if __name__ == '__main__':
from pygp.gp import gplvm
from pygp.covar import linear,se, noise, combinators
import pygp.optimize as opt
import pygp.plot.gpr_plot as gpr_plot
import pygp.priors.lnpriors as lnpriors
import pygp.likelihood as lik
import optimize_test
import copy
import logging as LG
LG.basicConfig(level=LG.INFO)
#1. simulate data from a linear PCA model
N = 200
K = 3
D = 10
SP.random.seed(1)
S = SP.random.randn(N,K)
W = SP.random.randn(D,K)
Y = SP.dot(W,S.T).T
sim_fa_noise = False
if sim_fa_noise:
#inerpolate noise levels
noise_levels = SP.linspace(0.1,1.0,Y.shape[1])
Ynoise =noise_levels*random.randn(N,D)
Y+=Ynoise
else:
Y+= 0.1*SP.random.randn(N,D)
#use "standard PCA"
[Spca,Wpca] = gplvm.PCA(Y,K)
#reconstruction
Y_ = SP.dot(Spca,Wpca.T)
covariance = linear.LinearCFISO(n_dimensions=K)
hyperparams = {'covar': SP.log([1.2])}
hyperparams_fa = {'covar': SP.log([1.2])}
#factor analysis noise
likelihood_fa = lik.GaussLikARD(n_dimensions=D)
hyperparams_fa['lik'] = SP.log(0.1*SP.ones(Y.shape[1]))
#standard Gaussian noise
likelihood = lik.GaussLikISO()
hyperparams['lik'] = SP.log([0.1])
#initialization of X at arandom
X0 = SP.random.randn(N,K)
X0 = Spca
hyperparams['x'] = X0
hyperparams_fa['x'] = X0
#try evaluating marginal likelihood first
#del(hyperparams['x'])
#del(hyperparams_fa['x'])
g_fa = GPLVMARD(covar_func=covariance,likelihood=likelihood_fa,x=X0,y=Y)
g = gplvm.GPLVM(covar_func=covariance,likelihood=likelihood,x=X0,y=Y)
if 1:
lml=g.LML(hyperparams)
lml_fa = g_fa.LML(hyperparams_fa)
dg = g.LMLgrad(hyperparams)
dg_fa = g_fa.LMLgrad(hyperparams_fa)
#hyperparams['covar'] = SP.array([-0.02438411])
if 0:
#manual gradcheck
relchange = 1E-5;
change = hyperparams['covar'][0]*relchange
hyperparams_ = copy.deepcopy(hyperparams)
xp = hyperparams['covar'][0] + change
pdb.set_trace()
hyperparams_['covar'][0] = xp
Lp = g.LML(hyperparams_)
xm = hyperparams['covar'][0] - change
hyperparams_['covar'][0] = xm
Lm = g.LML(hyperparams_)
diff = (Lp-Lm)/(2.*change)
anal = g.LMLgrad(hyperparams)
Ifilter_fa = {}
for key in hyperparams_fa:
Ifilter_fa[key] = SP.ones(hyperparams_fa[key].shape,dtype='bool')
#Ifilter_fa['lik'][:] = False
Ifilter = {}
for key in hyperparams:
Ifilter[key] = SP.ones(hyperparams[key].shape,dtype='bool')
#Ifilter['lik'][:] = False
#[opt_hyperparams,opt_lml] = opt.opt_hyper(g,hyperparams,gradcheck=True,Ifilter=Ifilter)
# hyperparams['covar'] = opt_hyperparams['covar']
# hyperparams['x'] = opt_hyperparams['x']
# hyperparams_fa['covar'] = opt_hyperparams['covar']
# hyperparams_fa['x'] = opt_hyperparams['x']
[opt_hyperparams_fa,opt_lml_fa] = opt.opt_hyper(g_fa,hyperparams_fa,gradcheck=True,Ifilter=Ifilter)
#[opt_hyperparams_fa,opt_lml_fa] = optimize_test.opt_hyper(g_fa,g,hyperparams_fa,hyperparams,Ifilter=Ifilter_fa,Ifilter2=Ifilter,gradcheck=True)
if 0:
lml=g.LML(opt_hyperparams)
lml_fa = g_fa.LML(hyperparams_fa)
dg = g.LMLgrad(opt_hyperparams)
dg_fa = g_fa.LMLgrad(hyperparams_fa)
#[opt_hyperparams,opt_lml] = opt.opt_hyper(g_fa,hyperparams_fa,gradcheck=True,Ifilter=Ifilter_fa)
| PMBio/pygp | pygp/gp/gplvm_ard.py | Python | gpl-2.0 | 13,046 | [
"Gaussian"
] | f4b632fb8c245477e14995fcc8a07429a9c888b45c88e5f4b1af0c7f46794d24 |
"""atom.py: ...
"""
import numpy as np
from numpy import nan
import scipy.linalg as spl
import sympy.physics.units as u
import piratechem as pc
class Atom(pc.atom.Atom):
"""Allow each atom to contain more specific quantum chemical
properties than piratechem can currently handle.
"""
def __init__(self, index, name, r):
pc.atom.Atom.__init__(self, name, r)
self.index = index
self.nmr = NMR()
self.hyperfine = Hyperfine()
self.efg = EFG()
self.euler = Euler()
def __str__(self):
s = "Atom(%d, %s, [%6.3f, %6.3f, %6.3f])"
return s % (self.index, self.name, self.posx, self.posy, self.posz)
class Euler(object):
"""Store all possible Euler angle information for a single atom.
"""
def __init__(self):
self.hyperfine = self.Hyperfine()
self.efg = self.EFG()
class Hyperfine:
"""Store the Euler angle information for the atom's hyperfine tensor.
"""
def __init__(self):
self.alpha = self.beta = self.gamma = nan
self.ax = self.ay = self.az = nan
def __str__(self):
s = "EulerHyperfine([{0}, {1}, {2}]; [{3} {4} {5}])"
return s.format(self.alpha, self.beta, self.gamma,
self.ax, self.ay, self.az)
def return_angles(self):
"""Return the three Euler angles as a NumPy row vector.
"""
return np.array([self.alpha, self.beta, self.gamma])
class EFG:
"""Store the Euler angle information for the atom's electric field
gradient (EFG) tensor.
"""
def __init__(self):
self.alpha = self.beta = self.gamma = nan
self.efgx = self.efgy = self.efgz = nan
def __str__(self):
s = "EulerEFG([{0}, {1}, {2}]; [{3} {4} {5}])"
return s.format(self.alpha, self.beta, self.gamma,
self.efgx, self.efgy, self.efgz)
def return_angles(self):
"""Return the three Euler angles as a NumPy row vector.
"""
return np.array([self.alpha, self.beta, self.gamma])
class NMR:
"""Hold all of the fields that may be present in the output file from
an NMR shift calculation.
"""
def __init__(self):
self.shiftmat = np.array([[nan, nan, nan],
[nan, nan, nan],
[nan, nan, nan]])
self.sdso = np.array([nan, nan, nan])
self.spso = np.array([nan, nan, nan])
self.shiftpri = np.array([nan, nan, nan])
self.sdso_iso = nan
self.spso_iso = nan
self.shiftiso = nan
self.shiftori = np.array([[nan, nan, nan],
[nan, nan, nan],
[nan, nan, nan]])
self.eigvals = np.array([nan, nan, nan])
self.iso = nan
def __str__(self):
s = "NMR([{0} {1} {2}]; {3})"
return s.format(self.shiftpri[0],
self.shiftpri[1],
self.shiftpri[2],
self.shiftiso)
def _scale(self):
"""Convert the absolute values given by ORCA to ppm (mutate in
place).
"""
abs_to_ppm = 1e6
self.shiftmat *= abs_to_ppm
self.sdso *= abs_to_ppm
self.spso *= abs_to_ppm
self.shiftpri *= abs_to_ppm
self.sdso_iso *= abs_to_ppm
self.spso_iso *= abs_to_ppm
self.shiftiso *= abs_to_ppm
def _diag(self):
"""Diagonalize the raw shift matrix to get the three principal shift
values, and use them to calculate an isotropic result.
"""
self.eigvals = np.sqrt(spl.eigvals(
np.dot(self.shiftmat.T, self.shiftmat)).real)
self.iso = np.sum(self.eigvals) / 3.0
class Hyperfine:
"""Hold all of the fields that may be present in the output file from
an electron-nuclear hyperfine interaction calculation.
"""
def __init__(self):
self.aiso = nan
self.atensor = np.array([nan, nan, nan])
self.amatrix = np.array([[nan, nan, nan],
[nan, nan, nan],
[nan, nan, nan]])
self.afc = np.array([nan, nan, nan])
self.asd = np.array([nan, nan, nan])
self.aso = np.array([nan, nan, nan])
self.apc = nan
self.aori = np.array([[nan, nan, nan],
[nan, nan, nan],
[nan, nan, nan]])
self.rho = nan
self.tdip = nan
def __str__(self):
s = "Hyperfine([{0} {1} {2}]; {3})"
return s.format(self.atensor[0],
self.atensor[1],
self.atensor[2],
self.aiso)
def _calc_eff_spin_params(self):
"""Calculate the rho and T_dip terms that appear [...]
"""
Axx, Ayy, Azz = self.atensor[0], self.atensor[1], self.atensor[2]
Aiso = self.aiso
rho = (3*Aiso - 2*Axx - Azz)/(Aiso - Azz)
# rho = (-3*Aiso + 2*Ayy + Azz)/(Aiso - Azz)
# need to add an assertion that both of these are equal
# tdip = (-Aiso + Axx)/(rho - 1)
# tdip = (Aiso - Ayy)/(rho + 1)
tdip = (Azz - Aiso)/2
# need to add an assertion that these three are equal
self.rho = rho
self.tdip = tdip
class EFG:
"""Hold all of the fields that may be present in the output file from
an electric field gradient calculation.
"""
def __init__(self):
self.vmatrix = np.array([[nan, nan, nan],
[nan, nan, nan],
[nan, nan, nan]])
self.vel = np.array([nan, nan, nan])
self.vnuc = np.array([nan, nan, nan])
self.vtot = np.array([nan, nan, nan])
self.vori = np.array([[nan, nan, nan],
[nan, nan, nan],
[nan, nan, nan]])
self.nqcc = nan
self.k = nan
self.eta = nan
self.px = nan
self.py = nan
self.pz = nan
self.p = np.array([nan, nan, nan])
def __str__(self):
s = "EFG([{0} {1} {2}]; {3})"
return s.format(self.vtot[0],
self.vtot[1],
self.vtot[2],
self.nqcc)
def _calc_nqi_tensor(self):
"""Calculate the diagonal representation of the NQI tensor as
I*Q*I = e**2qQ/(4I(2I-1))*[-(1-eta),-(1+eta),2].
"""
self.px = self.k * (-(1-self.eta))
self.py = self.k * (-(1+self.eta))
self.pz = self.k * 2
self.p = np.array([self.px, self.py, self.pz])
# eta = (self.px - self.py)/self.pz
def _diag(self):
"""...
"""
eigvals = spl.eigvalsh(self.vmaxtrix)
# needs an assertion against self.vtot
V_xx, V_yy, V_zz = sorted(eigvals, key = lambda x: abs(x))
# needs an assertion against self.eta
eta = (V_xx - V_yy) / V_zz
e = float(u.eV / u.J)
planck = float(u.planck / (u.J * u.s))
barn = 10e-28
| berquist/orcaparse | orcaparse/atom.py | Python | mpl-2.0 | 7,226 | [
"ORCA"
] | 3bf3846332074f6bafadefefc9c150e5d0b889d80c89946d50a6613dd8210358 |
import itertools
from math import sqrt
import os
import re
from warnings import warn
# Isotopic abundances from Meija J, Coplen T B, et al, "Isotopic compositions
# of the elements 2013 (IUPAC Technical Report)", Pure. Appl. Chem. 88 (3),
# pp. 293-306 (2013). The "representative isotopic abundance" values from
# column 9 are used except where an interval is given, in which case the
# "best measurement" is used.
# Note that the abundances are given as atomic fractions!
NATURAL_ABUNDANCE = {
'H1': 0.99984426, 'H2': 0.00015574, 'He3': 0.000002,
'He4': 0.999998, 'Li6': 0.07589, 'Li7': 0.92411,
'Be9': 1.0, 'B10': 0.1982, 'B11': 0.8018,
'C12': 0.988922, 'C13': 0.011078, 'N14': 0.996337,
'N15': 0.003663, 'O16': 0.9976206, 'O17': 0.000379,
'O18': 0.0020004, 'F19': 1.0, 'Ne20': 0.9048,
'Ne21': 0.0027, 'Ne22': 0.0925, 'Na23': 1.0,
'Mg24': 0.78951, 'Mg25': 0.1002, 'Mg26': 0.11029,
'Al27': 1.0, 'Si28': 0.9222968, 'Si29': 0.0468316,
'Si30': 0.0308716, 'P31': 1.0, 'S32': 0.9504074,
'S33': 0.0074869, 'S34': 0.0419599, 'S36': 0.0001458,
'Cl35': 0.757647, 'Cl37': 0.242353, 'Ar36': 0.003336,
'Ar38': 0.000629, 'Ar40': 0.996035, 'K39': 0.932581,
'K40': 0.000117, 'K41': 0.067302, 'Ca40': 0.96941,
'Ca42': 0.00647, 'Ca43': 0.00135, 'Ca44': 0.02086,
'Ca46': 0.00004, 'Ca48': 0.00187, 'Sc45': 1.0,
'Ti46': 0.0825, 'Ti47': 0.0744, 'Ti48': 0.7372,
'Ti49': 0.0541, 'Ti50': 0.0518, 'V50': 0.0025,
'V51': 0.9975, 'Cr50': 0.04345, 'Cr52': 0.83789,
'Cr53': 0.09501, 'Cr54': 0.02365, 'Mn55': 1.0,
'Fe54': 0.05845, 'Fe56': 0.91754, 'Fe57': 0.02119,
'Fe58': 0.00282, 'Co59': 1.0, 'Ni58': 0.680769,
'Ni60': 0.262231, 'Ni61': 0.011399, 'Ni62': 0.036345,
'Ni64': 0.009256, 'Cu63': 0.6915, 'Cu65': 0.3085,
'Zn64': 0.4917, 'Zn66': 0.2773, 'Zn67': 0.0404,
'Zn68': 0.1845, 'Zn70': 0.0061, 'Ga69': 0.60108,
'Ga71': 0.39892, 'Ge70': 0.2052, 'Ge72': 0.2745,
'Ge73': 0.0776, 'Ge74': 0.3652, 'Ge76': 0.0775,
'As75': 1.0, 'Se74': 0.0086, 'Se76': 0.0923,
'Se77': 0.076, 'Se78': 0.2369, 'Se80': 0.498,
'Se82': 0.0882, 'Br79': 0.50686, 'Br81': 0.49314,
'Kr78': 0.00355, 'Kr80': 0.02286, 'Kr82': 0.11593,
'Kr83': 0.115, 'Kr84': 0.56987, 'Kr86': 0.17279,
'Rb85': 0.7217, 'Rb87': 0.2783, 'Sr84': 0.0056,
'Sr86': 0.0986, 'Sr87': 0.07, 'Sr88': 0.8258,
'Y89': 1.0, 'Zr90': 0.5145, 'Zr91': 0.1122,
'Zr92': 0.1715, 'Zr94': 0.1738, 'Zr96': 0.028,
'Nb93': 1.0, 'Mo92': 0.14649, 'Mo94': 0.09187,
'Mo95': 0.15873, 'Mo96': 0.16673, 'Mo97': 0.09582,
'Mo98': 0.24292, 'Mo100': 0.09744, 'Ru96': 0.0554,
'Ru98': 0.0187, 'Ru99': 0.1276, 'Ru100': 0.126,
'Ru101': 0.1706, 'Ru102': 0.3155, 'Ru104': 0.1862,
'Rh103': 1.0, 'Pd102': 0.0102, 'Pd104': 0.1114,
'Pd105': 0.2233, 'Pd106': 0.2733, 'Pd108': 0.2646,
'Pd110': 0.1172, 'Ag107': 0.51839, 'Ag109': 0.48161,
'Cd106': 0.01245, 'Cd108': 0.00888, 'Cd110': 0.1247,
'Cd111': 0.12795, 'Cd112': 0.24109, 'Cd113': 0.12227,
'Cd114': 0.28754, 'Cd116': 0.07512, 'In113': 0.04281,
'In115': 0.95719, 'Sn112': 0.0097, 'Sn114': 0.0066,
'Sn115': 0.0034, 'Sn116': 0.1454, 'Sn117': 0.0768,
'Sn118': 0.2422, 'Sn119': 0.0859, 'Sn120': 0.3258,
'Sn122': 0.0463, 'Sn124': 0.0579, 'Sb121': 0.5721,
'Sb123': 0.4279, 'Te120': 0.0009, 'Te122': 0.0255,
'Te123': 0.0089, 'Te124': 0.0474, 'Te125': 0.0707,
'Te126': 0.1884, 'Te128': 0.3174, 'Te130': 0.3408,
'I127': 1.0, 'Xe124': 0.00095, 'Xe126': 0.00089,
'Xe128': 0.0191, 'Xe129': 0.26401, 'Xe130': 0.04071,
'Xe131': 0.21232, 'Xe132': 0.26909, 'Xe134': 0.10436,
'Xe136': 0.08857, 'Cs133': 1.0, 'Ba130': 0.0011,
'Ba132': 0.001, 'Ba134': 0.0242, 'Ba135': 0.0659,
'Ba136': 0.0785, 'Ba137': 0.1123, 'Ba138': 0.717,
'La138': 0.0008881, 'La139': 0.9991119, 'Ce136': 0.00186,
'Ce138': 0.00251, 'Ce140': 0.88449, 'Ce142': 0.11114,
'Pr141': 1.0, 'Nd142': 0.27153, 'Nd143': 0.12173,
'Nd144': 0.23798, 'Nd145': 0.08293, 'Nd146': 0.17189,
'Nd148': 0.05756, 'Nd150': 0.05638, 'Sm144': 0.0308,
'Sm147': 0.15, 'Sm148': 0.1125, 'Sm149': 0.1382,
'Sm150': 0.0737, 'Sm152': 0.2674, 'Sm154': 0.2274,
'Eu151': 0.4781, 'Eu153': 0.5219, 'Gd152': 0.002,
'Gd154': 0.0218, 'Gd155': 0.148, 'Gd156': 0.2047,
'Gd157': 0.1565, 'Gd158': 0.2484, 'Gd160': 0.2186,
'Tb159': 1.0, 'Dy156': 0.00056, 'Dy158': 0.00095,
'Dy160': 0.02329, 'Dy161': 0.18889, 'Dy162': 0.25475,
'Dy163': 0.24896, 'Dy164': 0.2826, 'Ho165': 1.0,
'Er162': 0.00139, 'Er164': 0.01601, 'Er166': 0.33503,
'Er167': 0.22869, 'Er168': 0.26978, 'Er170': 0.1491,
'Tm169': 1.0, 'Yb168': 0.00123, 'Yb170': 0.02982,
'Yb171': 0.14086, 'Yb172': 0.21686, 'Yb173': 0.16103,
'Yb174': 0.32025, 'Yb176': 0.12995, 'Lu175': 0.97401,
'Lu176': 0.02599, 'Hf174': 0.0016, 'Hf176': 0.0526,
'Hf177': 0.186, 'Hf178': 0.2728, 'Hf179': 0.1362,
'Hf180': 0.3508, 'Ta180': 0.0001201, 'Ta181': 0.9998799,
'W180': 0.0012, 'W182': 0.265, 'W183': 0.1431,
'W184': 0.3064, 'W186': 0.2843, 'Re185': 0.374,
'Re187': 0.626, 'Os184': 0.0002, 'Os186': 0.0159,
'Os187': 0.0196, 'Os188': 0.1324, 'Os189': 0.1615,
'Os190': 0.2626, 'Os192': 0.4078, 'Ir191': 0.373,
'Ir193': 0.627, 'Pt190': 0.00012, 'Pt192': 0.00782,
'Pt194': 0.32864, 'Pt195': 0.33775, 'Pt196': 0.25211,
'Pt198': 0.07356, 'Au197': 1.0, 'Hg196': 0.0015,
'Hg198': 0.1004, 'Hg199': 0.1694, 'Hg200': 0.2314,
'Hg201': 0.1317, 'Hg202': 0.2974, 'Hg204': 0.0682,
'Tl203': 0.29524, 'Tl205': 0.70476, 'Pb204': 0.014,
'Pb206': 0.241, 'Pb207': 0.221, 'Pb208': 0.524,
'Bi209': 1.0, 'Th230': 0.0002, 'Th232': 0.9998,
'Pa231': 1.0, 'U234': 0.000054, 'U235': 0.007204,
'U238': 0.992742
}
# Dictionary to give element symbols from IUPAC names
# (and some common mispellings)
ELEMENT_SYMBOL = {'neutron': 'n', 'hydrogen': 'H', 'helium': 'He',
'lithium': 'Li', 'beryllium': 'Be', 'boron': 'B',
'carbon': 'C', 'nitrogen': 'N', 'oxygen': 'O', 'fluorine': 'F',
'neon': 'Ne', 'sodium': 'Na', 'magnesium': 'Mg',
'aluminium': 'Al', 'aluminum': 'Al', 'silicon': 'Si',
'phosphorus': 'P', 'sulfur': 'S', 'sulphur': 'S',
'chlorine': 'Cl', 'argon': 'Ar', 'potassium': 'K',
'calcium': 'Ca', 'scandium': 'Sc', 'titanium': 'Ti',
'vanadium': 'V', 'chromium': 'Cr', 'manganese': 'Mn',
'iron': 'Fe', 'cobalt': 'Co', 'nickel': 'Ni', 'copper': 'Cu',
'zinc': 'Zn', 'gallium': 'Ga', 'germanium': 'Ge',
'arsenic': 'As', 'selenium': 'Se', 'bromine': 'Br',
'krypton': 'Kr', 'rubidium': 'Rb', 'strontium': 'Sr',
'yttrium': 'Y', 'zirconium': 'Zr', 'niobium': 'Nb',
'molybdenum': 'Mo', 'technetium': 'Tc', 'ruthenium': 'Ru',
'rhodium': 'Rh', 'palladium': 'Pd', 'silver': 'Ag',
'cadmium': 'Cd', 'indium': 'In', 'tin': 'Sn', 'antimony': 'Sb',
'tellurium': 'Te', 'iodine': 'I', 'xenon': 'Xe',
'caesium': 'Cs', 'cesium': 'Cs', 'barium': 'Ba',
'lanthanum': 'La', 'cerium': 'Ce', 'praseodymium': 'Pr',
'neodymium': 'Nd', 'promethium': 'Pm', 'samarium': 'Sm',
'europium': 'Eu', 'gadolinium': 'Gd', 'terbium': 'Tb',
'dysprosium': 'Dy', 'holmium': 'Ho', 'erbium': 'Er',
'thulium': 'Tm', 'ytterbium': 'Yb', 'lutetium': 'Lu',
'hafnium': 'Hf', 'tantalum': 'Ta', 'tungsten': 'W',
'wolfram': 'W', 'rhenium': 'Re', 'osmium': 'Os',
'iridium': 'Ir', 'platinum': 'Pt', 'gold': 'Au',
'mercury': 'Hg', 'thallium': 'Tl', 'lead': 'Pb',
'bismuth': 'Bi', 'polonium': 'Po', 'astatine': 'At',
'radon': 'Rn', 'francium': 'Fr', 'radium': 'Ra',
'actinium': 'Ac', 'thorium': 'Th', 'protactinium': 'Pa',
'uranium': 'U', 'neptunium': 'Np', 'plutonium': 'Pu',
'americium': 'Am', 'curium': 'Cm', 'berkelium': 'Bk',
'californium': 'Cf', 'einsteinium': 'Es', 'fermium': 'Fm',
'mendelevium': 'Md', 'nobelium': 'No', 'lawrencium': 'Lr',
'rutherfordium': 'Rf', 'dubnium': 'Db', 'seaborgium': 'Sg',
'bohrium': 'Bh', 'hassium': 'Hs', 'meitnerium': 'Mt',
'darmstadtium': 'Ds', 'roentgenium': 'Rg', 'copernicium': 'Cn',
'nihonium': 'Nh', 'flerovium': 'Fl', 'moscovium': 'Mc',
'livermorium': 'Lv', 'tennessine': 'Ts', 'oganesson': 'Og'}
ATOMIC_SYMBOL = {0: 'n', 1: 'H', 2: 'He', 3: 'Li', 4: 'Be', 5: 'B', 6: 'C',
7: 'N', 8: 'O', 9: 'F', 10: 'Ne', 11: 'Na', 12: 'Mg', 13: 'Al',
14: 'Si', 15: 'P', 16: 'S', 17: 'Cl', 18: 'Ar', 19: 'K',
20: 'Ca', 21: 'Sc', 22: 'Ti', 23: 'V', 24: 'Cr', 25: 'Mn',
26: 'Fe', 27: 'Co', 28: 'Ni', 29: 'Cu', 30: 'Zn', 31: 'Ga',
32: 'Ge', 33: 'As', 34: 'Se', 35: 'Br', 36: 'Kr', 37: 'Rb',
38: 'Sr', 39: 'Y', 40: 'Zr', 41: 'Nb', 42: 'Mo', 43: 'Tc',
44: 'Ru', 45: 'Rh', 46: 'Pd', 47: 'Ag', 48: 'Cd', 49: 'In',
50: 'Sn', 51: 'Sb', 52: 'Te', 53: 'I', 54: 'Xe', 55: 'Cs',
56: 'Ba', 57: 'La', 58: 'Ce', 59: 'Pr', 60: 'Nd', 61: 'Pm',
62: 'Sm', 63: 'Eu', 64: 'Gd', 65: 'Tb', 66: 'Dy', 67: 'Ho',
68: 'Er', 69: 'Tm', 70: 'Yb', 71: 'Lu', 72: 'Hf', 73: 'Ta',
74: 'W', 75: 'Re', 76: 'Os', 77: 'Ir', 78: 'Pt', 79: 'Au',
80: 'Hg', 81: 'Tl', 82: 'Pb', 83: 'Bi', 84: 'Po', 85: 'At',
86: 'Rn', 87: 'Fr', 88: 'Ra', 89: 'Ac', 90: 'Th', 91: 'Pa',
92: 'U', 93: 'Np', 94: 'Pu', 95: 'Am', 96: 'Cm', 97: 'Bk',
98: 'Cf', 99: 'Es', 100: 'Fm', 101: 'Md', 102: 'No',
103: 'Lr', 104: 'Rf', 105: 'Db', 106: 'Sg', 107: 'Bh',
108: 'Hs', 109: 'Mt', 110: 'Ds', 111: 'Rg', 112: 'Cn',
113: 'Nh', 114: 'Fl', 115: 'Mc', 116: 'Lv', 117: 'Ts',
118: 'Og'}
ATOMIC_NUMBER = {value: key for key, value in ATOMIC_SYMBOL.items()}
# Values here are from the Committee on Data for Science and Technology
# (CODATA) 2014 recommendation (doi:10.1103/RevModPhys.88.035009).
# The value of the Boltzman constant in units of eV / K
K_BOLTZMANN = 8.6173303e-5
# Unit conversions
EV_PER_MEV = 1.0e6
JOULE_PER_EV = 1.6021766208e-19
# Avogadro's constant
AVOGADRO = 6.022140857e23
# Neutron mass in units of amu
NEUTRON_MASS = 1.00866491588
# Used in atomic_mass function as a cache
_ATOMIC_MASS = {}
# Regex for GND nuclide names (used in zam function)
_GND_NAME_RE = re.compile(r'([A-Zn][a-z]*)(\d+)((?:_[em]\d+)?)')
def atomic_mass(isotope):
"""Return atomic mass of isotope in atomic mass units.
Atomic mass data comes from the `Atomic Mass Evaluation 2016
<https://www-nds.iaea.org/amdc/ame2016/AME2016-a.pdf>`_.
Parameters
----------
isotope : str
Name of isotope, e.g., 'Pu239'
Returns
-------
float
Atomic mass of isotope in [amu]
"""
if not _ATOMIC_MASS:
# Load data from AME2016 file
mass_file = os.path.join(os.path.dirname(__file__), 'mass16.txt')
with open(mass_file, 'r') as ame:
# Read lines in file starting at line 40
for line in itertools.islice(ame, 39, None):
name = '{}{}'.format(line[20:22].strip(), int(line[16:19]))
mass = float(line[96:99]) + 1e-6*float(
line[100:106] + '.' + line[107:112])
_ATOMIC_MASS[name.lower()] = mass
# For isotopes found in some libraries that represent all natural
# isotopes of their element (e.g. C0), calculate the atomic mass as
# the sum of the atomic mass times the natural abudance of the isotopes
# that make up the element.
for element in ['C', 'Zn', 'Pt', 'Os', 'Tl']:
isotope_zero = element.lower() + '0'
_ATOMIC_MASS[isotope_zero] = 0.
for iso, abundance in isotopes(element):
_ATOMIC_MASS[isotope_zero] += abundance * _ATOMIC_MASS[iso.lower()]
# Get rid of metastable information
if '_' in isotope:
isotope = isotope[:isotope.find('_')]
return _ATOMIC_MASS[isotope.lower()]
def atomic_weight(element):
"""Return atomic weight of an element in atomic mass units.
Computes an average of the atomic mass of each of element's naturally
occurring isotopes weighted by their relative abundance.
Parameters
----------
element : str
Element symbol (e.g., 'H') or name (e.g., 'helium')
Returns
-------
float
Atomic weight of element in [amu]
"""
weight = 0.
for nuclide, abundance in isotopes(element):
weight += atomic_mass(nuclide) * abundance
if weight > 0.:
return weight
else:
raise ValueError("No naturally-occurring isotopes for element '{}'."
.format(element))
def water_density(temperature, pressure=0.1013):
"""Return the density of liquid water at a given temperature and pressure.
The density is calculated from a polynomial fit using equations and values
from the 2012 version of the IAPWS-IF97 formulation. Only the equations
for region 1 are implemented here. Region 1 is limited to liquid water
below 100 [MPa] with a temperature above 273.15 [K], below 623.15 [K], and
below saturation.
Reference: International Association for the Properties of Water and Steam,
"Revised Release on the IAPWS Industrial Formulation 1997 for the
Thermodynamic Properties of Water and Steam", IAPWS R7-97(2012).
Parameters
----------
temperature : float
Water temperature in units of [K]
pressure : float
Water pressure in units of [MPa]
Returns
-------
float
Water density in units of [g/cm^3]
"""
# Make sure the temperature and pressure are inside the min/max region 1
# bounds. (Relax the 273.15 bound to 273 in case a user wants 0 deg C data
# but they only use 3 digits for their conversion to K.)
if pressure > 100.0:
warn("Results are not valid for pressures above 100 MPa.")
elif pressure < 0.0:
raise ValueError("Pressure must be positive.")
if temperature < 273:
warn("Results are not valid for temperatures below 273.15 K.")
elif temperature > 623.15:
warn("Results are not valid for temperatures above 623.15 K.")
elif temperature <= 0.0:
raise ValueError('Temperature must be positive.')
# IAPWS region 4 parameters
n4 = [0.11670521452767e4, -0.72421316703206e6, -0.17073846940092e2,
0.12020824702470e5, -0.32325550322333e7, 0.14915108613530e2,
-0.48232657361591e4, 0.40511340542057e6, -0.23855557567849,
0.65017534844798e3]
# Compute the saturation temperature at the given pressure.
beta = pressure**(0.25)
E = beta**2 + n4[2] * beta + n4[5]
F = n4[0] * beta**2 + n4[3] * beta + n4[6]
G = n4[1] * beta**2 + n4[4] * beta + n4[7]
D = 2.0 * G / (-F - sqrt(F**2 - 4 * E * G))
T_sat = 0.5 * (n4[9] + D
- sqrt((n4[9] + D)**2 - 4.0 * (n4[8] + n4[9] * D)))
# Make sure we aren't above saturation. (Relax this bound by .2 degrees
# for deg C to K conversions.)
if temperature > T_sat + 0.2:
warn("Results are not valid for temperatures above saturation "
"(above the boiling point).")
# IAPWS region 1 parameters
R_GAS_CONSTANT = 0.461526 # kJ / kg / K
ref_p = 16.53 # MPa
ref_T = 1386 # K
n1f = [0.14632971213167, -0.84548187169114, -0.37563603672040e1,
0.33855169168385e1, -0.95791963387872, 0.15772038513228,
-0.16616417199501e-1, 0.81214629983568e-3, 0.28319080123804e-3,
-0.60706301565874e-3, -0.18990068218419e-1, -0.32529748770505e-1,
-0.21841717175414e-1, -0.52838357969930e-4, -0.47184321073267e-3,
-0.30001780793026e-3, 0.47661393906987e-4, -0.44141845330846e-5,
-0.72694996297594e-15, -0.31679644845054e-4, -0.28270797985312e-5,
-0.85205128120103e-9, -0.22425281908000e-5, -0.65171222895601e-6,
-0.14341729937924e-12, -0.40516996860117e-6, -0.12734301741641e-8,
-0.17424871230634e-9, -0.68762131295531e-18, 0.14478307828521e-19,
0.26335781662795e-22, -0.11947622640071e-22, 0.18228094581404e-23,
-0.93537087292458e-25]
I1f = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 4,
4, 4, 5, 8, 8, 21, 23, 29, 30, 31, 32]
J1f = [-2, -1, 0, 1, 2, 3, 4, 5, -9, -7, -1, 0, 1, 3, -3, 0, 1, 3, 17, -4,
0, 6, -5, -2, 10, -8, -11, -6, -29, -31, -38, -39, -40, -41]
# Nondimensionalize the pressure and temperature.
pi = pressure / ref_p
tau = ref_T / temperature
# Compute the derivative of gamma (dimensionless Gibbs free energy) with
# respect to pi.
gamma1_pi = 0.0
for n, I, J in zip(n1f, I1f, J1f):
gamma1_pi -= n * I * (7.1 - pi)**(I - 1) * (tau - 1.222)**J
# Compute the leading coefficient. This sets the units at
# 1 [MPa] * [kg K / kJ] * [1 / K]
# = 1e6 [N / m^2] * 1e-3 [kg K / N / m] * [1 / K]
# = 1e3 [kg / m^3]
# = 1 [g / cm^3]
coeff = pressure / R_GAS_CONSTANT / temperature
# Compute and return the density.
return coeff / pi / gamma1_pi
def gnd_name(Z, A, m=0):
"""Return nuclide name using GND convention
Parameters
----------
Z : int
Atomic number
A : int
Mass number
m : int, optional
Metastable state
Returns
-------
str
Nuclide name in GND convention, e.g., 'Am242_m1'
"""
if m > 0:
return '{}{}_m{}'.format(ATOMIC_SYMBOL[Z], A, m)
else:
return '{}{}'.format(ATOMIC_SYMBOL[Z], A)
def isotopes(element):
"""Return naturally occurring isotopes and their abundances
.. versionadded:: 0.12.1
Parameters
----------
element : str
Element symbol (e.g., 'H') or name (e.g., 'helium')
Returns
-------
list
A list of tuples of (isotope, abundance)
Raises
------
ValueError
If the element name is not recognized
"""
# Convert name to symbol if needed
if len(element) > 2:
symbol = ELEMENT_SYMBOL.get(element.lower())
if symbol is None:
raise ValueError('Element name "{}" not recognised'.format(element))
element = symbol
# Get the nuclides present in nature
result = []
for kv in sorted(NATURAL_ABUNDANCE.items()):
if re.match(r'{}\d+'.format(element), kv[0]):
result.append(kv)
return result
def zam(name):
"""Return tuple of (atomic number, mass number, metastable state)
Parameters
----------
name : str
Name of nuclide using GND convention, e.g., 'Am242_m1'
Returns
-------
3-tuple of int
Atomic number, mass number, and metastable state
"""
try:
symbol, A, state = _GND_NAME_RE.match(name).groups()
except AttributeError:
raise ValueError("'{}' does not appear to be a nuclide name in GND "
"format".format(name))
if symbol not in ATOMIC_NUMBER:
raise ValueError("'{}' is not a recognized element symbol"
.format(symbol))
metastable = int(state[2:]) if state else 0
return (ATOMIC_NUMBER[symbol], int(A), metastable)
| shikhar413/openmc | openmc/data/data.py | Python | mit | 19,751 | [
"Avogadro"
] | 6395c5d2efbd65dbd0cf1120833c1f1d3ce83e1798f8908769e720b423d44999 |
import unittest
import scipy
import pysal
import numpy as np
from pysal.spreg import error_sp_regimes as SP
from pysal.spreg.error_sp import GM_Error, GM_Endog_Error, GM_Combo
from pysal.common import RTOL
@unittest.skipIf(int(scipy.__version__.split(".")[1]) < 11,
"Maximum Likelihood requires SciPy version 11 or newer.")
class TestGM_Error_Regimes(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("CRIME"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("HOVAL"))
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.w = pysal.queen_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
self.r_var = 'NSA'
self.regimes = db.by_col(self.r_var)
X1 = []
X1.append(db.by_col("INC"))
self.X1 = np.array(X1).T
yd = []
yd.append(db.by_col("HOVAL"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
#Artficial:
n = 256
self.n2 = n//2
self.x_a1 = np.random.uniform(-10,10,(n,1))
self.x_a2 = np.random.uniform(1,5,(n,1))
self.q_a = self.x_a2 + np.random.normal(0,1,(n,1))
self.x_a = np.hstack((self.x_a1,self.x_a2))
self.y_a = np.dot(np.hstack((np.ones((n,1)),self.x_a)),np.array([[1],[0.5],[2]])) + np.random.normal(0,1,(n,1))
latt = int(np.sqrt(n))
self.w_a = pysal.lat2W(latt,latt)
self.w_a.transform='r'
self.regi_a = [0]*(n//2) + [1]*(n//2) ##must be floors!
self.w_a1 = pysal.lat2W(latt//2,latt)
self.w_a1.transform='r'
def test_model(self):
reg = SP.GM_Error_Regimes(self.y, self.X, self.regimes, self.w)
betas = np.array([[ 63.3443073 ],
[ -0.15468 ],
[ -1.52186509],
[ 61.40071412],
[ -0.33550084],
[ -0.85076108],
[ 0.38671608]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
u = np.array([-2.06177251])
np.testing.assert_allclose(reg.u[0],u,RTOL)
predy = np.array([ 17.78775251])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
n = 49
np.testing.assert_allclose(reg.n,n,RTOL)
k = 6
np.testing.assert_allclose(reg.k,k,RTOL)
y = np.array([ 15.72598])
np.testing.assert_allclose(reg.y[0],y,RTOL)
x = np.array([[ 0. , 0. , 0. , 1. , 80.467003, 19.531 ]])
np.testing.assert_allclose(reg.x[0].toarray(),x,RTOL)
e = np.array([ 1.40747232])
np.testing.assert_allclose(reg.e_filtered[0],e,RTOL)
my = 35.128823897959187
np.testing.assert_allclose(reg.mean_y,my,RTOL)
sy = 16.732092091229699
np.testing.assert_allclose(reg.std_y,sy,RTOL)
vm = np.array([ 50.55875289, -0.14444487, -2.05735489, 0. ,
0. , 0. ])
np.testing.assert_allclose(reg.vm[0],vm,RTOL)
sig2 = 102.13050615267227
np.testing.assert_allclose(reg.sig2,sig2,RTOL)
pr2 = 0.5525102200608539
np.testing.assert_allclose(reg.pr2,pr2,RTOL)
std_err = np.array([ 7.11046784, 0.21879293, 0.58477864, 7.50596504, 0.10800686,
0.57365981])
np.testing.assert_allclose(reg.std_err,std_err,RTOL)
chow_r = np.array([[ 0.03533785, 0.85088948],
[ 0.54918491, 0.45865093],
[ 0.67115641, 0.41264872]])
np.testing.assert_allclose(reg.chow.regi,chow_r,RTOL)
chow_j = 0.81985446000130979
np.testing.assert_allclose(reg.chow.joint[0],chow_j,RTOL)
"""
def test_model_regi_error(self):
#Columbus:
reg = SP.GM_Error_Regimes(self.y, self.X, self.regimes, self.w, regime_err_sep=True)
betas = np.array([[ 60.45730439],
[ -0.17732134],
[ -1.30936328],
[ 0.51314713],
[ 66.5487126 ],
[ -0.31845995],
[ -1.29047149],
[ 0.08092997]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
vm = np.array([ 39.33656288, -0.08420799, -1.50350999, 0. ,
0. , 0. ])
np.testing.assert_allclose(reg.vm[0],vm,RTOL)
u = np.array([ 0.00698341])
np.testing.assert_allclose(reg.u[0],u,RTOL)
predy = np.array([ 15.71899659])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
e = np.array([ 0.53685671])
np.testing.assert_allclose(reg.e_filtered[0],e,RTOL)
chow_r = np.array([[ 3.63674458e-01, 5.46472584e-01],
[ 4.29607250e-01, 5.12181727e-01],
[ 5.44739543e-04, 9.81379339e-01]])
np.testing.assert_allclose(reg.chow.regi,chow_r,RTOL)
chow_j = 0.70119418251625387
np.testing.assert_allclose(reg.chow.joint[0],chow_j,RTOL)
#Artficial:
model = SP.GM_Error_Regimes(self.y_a, self.x_a, self.regi_a, w=self.w_a, regime_err_sep=True)
model1 = GM_Error(self.y_a[0:(self.n2)].reshape((self.n2),1), self.x_a[0:(self.n2)], w=self.w_a1)
model2 = GM_Error(self.y_a[(self.n2):].reshape((self.n2),1), self.x_a[(self.n2):], w=self.w_a1)
tbetas = np.vstack((model1.betas, model2.betas))
np.testing.assert_allclose(model.betas,tbetas,RTOL)
vm = np.hstack((model1.vm.diagonal(),model2.vm.diagonal()))
np.testing.assert_allclose(model.vm.diagonal(), vm,RTOL)
"""
def test_model_endog(self):
reg = SP.GM_Endog_Error_Regimes(self.y, self.X1, self.yd, self.q, self.regimes, self.w)
betas = np.array([[ 77.48385551, 4.52986622, 78.93209405, 0.42186261,
-3.23823854, -1.1475775 , 0.20222108]])
print('Runining higher-tolerance test on L133 of test_error_sp_regimes.py')
np.testing.assert_allclose(reg.betas.T,betas,RTOL + .0001)
u = np.array([ 20.89660904])
#np.testing.assert_allclose(reg.u[0],u,RTOL)
np.testing.assert_allclose(reg.u[0],u,rtol=1e-05)
e = np.array([ 25.21818724])
np.testing.assert_allclose(reg.e_filtered[0],e,RTOL)
predy = np.array([-5.17062904])
#np.testing.assert_allclose(reg.predy[0],predy,RTOL)
np.testing.assert_allclose(reg.predy[0],predy,rtol=1e-03)
n = 49
np.testing.assert_allclose(reg.n,n)
k = 6
np.testing.assert_allclose(reg.k,k)
y = np.array([ 15.72598])
np.testing.assert_allclose(reg.y[0],y,RTOL)
x = np.array([[ 0. , 0. , 1. , 19.531]])
np.testing.assert_allclose(reg.x[0].toarray(),x,RTOL)
yend = np.array([[ 0. , 80.467003]])
np.testing.assert_allclose(reg.yend[0].toarray(),yend,RTOL)
z = np.array([[ 0. , 0. , 1. , 19.531 , 0. ,
80.467003]])
np.testing.assert_allclose(reg.z[0].toarray(),z,RTOL)
my = 35.128823897959187
np.testing.assert_allclose(reg.mean_y,my)
sy = 16.732092091229699
np.testing.assert_allclose(reg.std_y,sy)
vm = np.array([ 390.88250241, 52.25924084, 0. , 0. ,
-32.64274729, 0. ])
#np.testing.assert_allclose(reg.vm[0],vm,RTOL)
np.allclose(reg.vm, vm)
pr2 = 0.19623994206233333
np.testing.assert_allclose(reg.pr2,pr2,RTOL)
sig2 = 649.4011
#np.testing.assert_allclose(round(reg.sig2,RTOL),round(sig2,RTOL),RTOL)
np.testing.assert_allclose(sig2, reg.sig2, rtol=1e-05)
std_err = np.array([ 19.77074866, 6.07667394, 24.32254786, 2.17776972,
2.97078606, 0.94392418])
#np.testing.assert_allclose(reg.std_err,std_err,RTOL)
np.testing.assert_allclose(reg.std_err, std_err, rtol=1e-05)
chow_r = np.array([[ 0.0021348 , 0.96314775],
[ 0.40499741, 0.5245196 ],
[ 0.4498365 , 0.50241261]])
print('Running higher-tolerance tests on L176 of test_error_sp_regimes.py')
np.testing.assert_allclose(reg.chow.regi,chow_r,RTOL+.0001)
chow_j = 1.2885590185243503
#np.testing.assert_allclose(reg.chow.joint[0],chow_j)
np.testing.assert_allclose(reg.chow.joint[0], chow_j, rtol=1e-05)
def test_model_endog_regi_error(self):
#Columbus:
reg = SP.GM_Endog_Error_Regimes(self.y, self.X1, self.yd, self.q, self.regimes, self.w, regime_err_sep=True)
betas = np.array([[ 7.91729500e+01],
[ 5.80693176e+00],
[ -3.84036576e+00],
[ 1.46462983e-01],
[ 8.24723791e+01],
[ 5.68908920e-01],
[ -1.28824699e+00],
[ 6.70387351e-02]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
vm = np.array([ 791.86679123, 140.12967794, -81.37581255, 0. ,
0. , 0. ])
np.testing.assert_allclose(reg.vm[0],vm,RTOL)
u = np.array([ 25.80361497])
np.testing.assert_allclose(reg.u[0],u,RTOL)
predy = np.array([-10.07763497])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
e = np.array([ 27.32251813])
np.testing.assert_allclose(reg.e_filtered[0],e,RTOL)
chow_r = np.array([[ 0.00926459, 0.92331985],
[ 0.26102777, 0.60941494],
[ 0.26664581, 0.60559072]])
np.testing.assert_allclose(reg.chow.regi,chow_r,RTOL)
chow_j = 1.1184631131987004
#np.testing.assert_allclose(reg.chow.joint[0],chow_j)
np.testing.assert_allclose(reg.chow.joint[0], chow_j,RTOL)
#Artficial:
model = SP.GM_Endog_Error_Regimes(self.y_a, self.x_a1, yend=self.x_a2, q=self.q_a, regimes=self.regi_a, w=self.w_a, regime_err_sep=True)
model1 = GM_Endog_Error(self.y_a[0:(self.n2)].reshape((self.n2),1), self.x_a1[0:(self.n2)], yend=self.x_a2[0:(self.n2)], q=self.q_a[0:(self.n2)], w=self.w_a1)
model2 = GM_Endog_Error(self.y_a[(self.n2):].reshape((self.n2),1), self.x_a1[(self.n2):], yend=self.x_a2[(self.n2):], q=self.q_a[(self.n2):], w=self.w_a1)
tbetas = np.vstack((model1.betas, model2.betas))
np.testing.assert_allclose(model.betas,tbetas,RTOL)
vm = np.hstack((model1.vm.diagonal(),model2.vm.diagonal()))
np.testing.assert_allclose(model.vm.diagonal(), vm,RTOL)
def test_model_combo(self):
reg = SP.GM_Combo_Regimes(self.y, self.X1, self.regimes, self.yd, self.q, w=self.w)
predy_e = np.array([ 18.82774339])
np.testing.assert_allclose(reg.predy_e[0],predy_e,RTOL)
betas = np.array([[ 36.44798052],
[ -0.7974482 ],
[ 30.53782661],
[ -0.72602806],
[ -0.30953121],
[ -0.21736652],
[ 0.64801059],
[ -0.16601265]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
u = np.array([ 0.84393304])
np.testing.assert_allclose(reg.u[0],u,RTOL)
e_filtered = np.array([ 0.4040027])
np.testing.assert_allclose(reg.e_filtered[0],e_filtered,RTOL)
predy = np.array([ 14.88204696])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
n = 49
np.testing.assert_allclose(reg.n,n,RTOL)
k = 7
np.testing.assert_allclose(reg.k,k,RTOL)
y = np.array([ 15.72598])
np.testing.assert_allclose(reg.y[0],y,RTOL)
x = np.array([[ 0. , 0. , 1. , 19.531]])
np.testing.assert_allclose(reg.x[0].toarray(),x,RTOL)
yend = np.array([[ 0. , 80.467003 , 24.7142675]])
np.testing.assert_allclose(reg.yend[0].toarray(),yend,RTOL)
z = np.array([[ 0. , 0. , 1. , 19.531 , 0. ,
80.467003 , 24.7142675]])
np.testing.assert_allclose(reg.z[0].toarray(),z,RTOL)
my = 35.128823897959187
np.testing.assert_allclose(reg.mean_y,my,RTOL)
sy = 16.732092091229699
np.testing.assert_allclose(reg.std_y,sy,RTOL)
vm = np.array([ 109.23549239, -0.19754121, 84.29574673, -1.99317178,
-1.60123994, -0.1252719 , -1.3930344 ])
np.testing.assert_allclose(reg.vm[0],vm,RTOL)
sig2 = 94.98610921110007
np.testing.assert_allclose(reg.sig2,sig2,RTOL)
pr2 = 0.6493586702255537
np.testing.assert_allclose(reg.pr2,pr2,RTOL)
pr2_e = 0.5255332447240576
np.testing.assert_allclose(reg.pr2_e,pr2_e,RTOL)
std_err = np.array([ 10.45157846, 0.93942923, 11.38484969, 0.60774708,
0.44461334, 0.15871227, 0.15738141])
np.testing.assert_allclose(reg.std_err,std_err,RTOL)
chow_r = np.array([[ 0.49716076, 0.48075032],
[ 0.00405377, 0.94923363],
[ 0.03866684, 0.84411016]])
np.testing.assert_allclose(reg.chow.regi,chow_r,RTOL)
chow_j = 0.64531386285872072
np.testing.assert_allclose(reg.chow.joint[0],chow_j,RTOL)
def test_model_combo_regi_error(self):
#Columbus:
reg = SP.GM_Combo_Regimes(self.y, self.X1, self.regimes, self.yd, self.q, w=self.w, regime_lag_sep=True, regime_err_sep=True)
betas = np.array([[ 42.01035248],
[ -0.13938772],
[ -0.6528306 ],
[ 0.54737621],
[ 0.2684419 ],
[ 34.02473255],
[ -0.14920259],
[ -0.48972903],
[ 0.65883658],
[ -0.17174845]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
vm = np.array([ 153.58614432, 2.96302131, -3.26211855, -2.46914703,
0. , 0. , 0. , 0. ])
np.testing.assert_allclose(reg.vm[0],vm,RTOL)
u = np.array([ 7.73968703])
np.testing.assert_allclose(reg.u[0],u,RTOL)
predy = np.array([ 7.98629297])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
e = np.array([ 6.45052714])
np.testing.assert_allclose(reg.e_filtered[0],e,RTOL)
chow_r = np.array([[ 1.00886404e-01, 7.50768497e-01],
[ 3.61843271e-05, 9.95200481e-01],
[ 4.69585772e-02, 8.28442711e-01],
[ 8.13275259e-02, 7.75506385e-01]])
np.testing.assert_allclose(reg.chow.regi,chow_r,RTOL)
chow_j = 0.28479988992843119
np.testing.assert_allclose(reg.chow.joint[0],chow_j, RTOL)
#Artficial:
model = SP.GM_Combo_Regimes(self.y_a, self.x_a1, yend=self.x_a2, q=self.q_a, regimes=self.regi_a, w=self.w_a, regime_err_sep=True, regime_lag_sep=True)
model1 = GM_Combo(self.y_a[0:(self.n2)].reshape((self.n2),1), self.x_a1[0:(self.n2)], yend=self.x_a2[0:(self.n2)], q=self.q_a[0:(self.n2)], w=self.w_a1)
model2 = GM_Combo(self.y_a[(self.n2):].reshape((self.n2),1), self.x_a1[(self.n2):], yend=self.x_a2[(self.n2):], q=self.q_a[(self.n2):], w=self.w_a1)
tbetas = np.vstack((model1.betas, model2.betas))
np.testing.assert_allclose(model.betas,tbetas)
vm = np.hstack((model1.vm.diagonal(),model2.vm.diagonal()))
if __name__ == '__main__':
unittest.main()
| ljwolf/pysal | pysal/spreg/tests/test_error_sp_regimes.py | Python | bsd-3-clause | 14,966 | [
"COLUMBUS"
] | 712b7ca3f172d5642ead6547353d6f81cf4ec599c3bf557c88f32148f6ba3576 |
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph.parametertree as pt
import numpy as np
import scipy.ndimage as ndi
from ..event_detection import threshold_events, exp_deconvolve
from ..baseline import float_mode
from .filter import SignalFilter
class EventDetector(QtCore.QObject):
"""Analyzer to detect transient events from signals such as calcium indicator
and current clamp recordings.
The basic algorithm is:
1. Lowpass input signal using gaussian filter
2. Exponential deconvolution to isolate spikes
(Richardson & Silberberg, J. Neurophysiol 2008)
3. Threshold detection of events
Signals
-------
parameters_changed(self):
Emitted whenever a parameter has changed that would affect the output
of the analyzer.
"""
parameters_changed = QtCore.Signal(object) # self
def __init__(self):
QtCore.QObject.__init__(self)
self.filter = SignalFilter()
self.params = pt.Parameter(name='Spike detection', type='group', children=[
{'name': 'deconv const', 'type': 'float', 'value': 0.01, 'suffix': 's', 'siPrefix': True, 'dec': True, 'minStep': 1e-4},
{'name': 'threshold', 'type': 'float', 'value': 0.05, 'dec': True, 'minStep': 1e-12},
self.filter.params,
])
self.sig_plot = None
self.deconv_plot = None
self.sig_trace = None
self.vticks = None
self.deconv_trace = None
self.threshold_line = None
self.params.sigTreeStateChanged.connect(self._parameters_changed)
self.params.child('threshold').sigValueChanged.connect(self._threshold_param_changed)
def set_plots(self, plt1=None, plt2=None):
"""Connect this detector to two PlotWidgets where data should be displayed.
The first plot will contain the lowpass-filtered trace and tick marks
for detected events. The second plot will contain the deconvolved signal
and a draggable threshold line.
"""
self.sig_plot = plt1
if plt1 is not None:
if self.sig_trace is None:
self.sig_trace = pg.PlotDataItem()
self.vticks = pg.VTickGroup(yrange=[0.0, 0.05])
plt1.addItem(self.sig_trace)
plt1.addItem(self.vticks)
self.deconv_plot = plt2
if plt2 is not None:
if self.deconv_trace is None:
self.deconv_trace = pg.PlotDataItem()
self.threshold_line = pg.InfiniteLine(angle=0, movable=True, pen='g')
self.threshold_line.setValue(self.params['threshold'])
self.threshold_line.sigPositionChanged.connect(self._threshold_line_moved)
plt2.addItem(self.deconv_trace)
plt2.addItem(self.threshold_line)
def process(self, trace, show=True):
"""Return a table (numpy record array) of events detected in a time series.
Parameters
----------
trace : data.TSeries instance
Signal values to process for events (for example, a single calcium
signal trace or a single electrode recording).
show : bool
If True, then processed data will be displayed in the connected
plots (see `set_plots()`).
Returns
-------
events : numpy record array
The returned table has several fields:
* index: the index in *data* at which an event began
* len: the length of the deconvolved event in samples
* sum: the integral of *data* under the deconvolved event curve
* peak: the peak value of the deconvolved event
"""
y = trace.data
dt = trace.dt
# Exponential deconvolution; see Richardson & Silberberg, J. Neurophysiol 2008
tau = self.params['deconv const']
diff = exp_deconvolve(trace, tau)
# remove baseline
bsub = diff.data - float_mode(diff.data, bins=200)
bsub = diff.copy(data=bsub)
# filter
filt = self.filter.process(bsub)
self.events = threshold_events(filt.data, self.threshold_line.value())
#self.events = self.events[self.events['sum'] > 0]
if show:
if self.sig_plot is not None:
t = trace.time_values
self.sig_trace.setData(t, y)
self.vticks.setXVals(t[self.events['index']])
self.vticks.update() # this should not be needed..
if self.deconv_plot is not None:
self.deconv_trace.setData(filt.time_values, filt.data)
return self.events
def _parameters_changed(self):
self.parameters_changed.emit(self)
def _threshold_line_moved(self):
# link line position to threshold parameter
self.params.child('threshold').setValue(self.threshold_line.value(), blockSignal=self._threshold_param_changed)
def _threshold_param_changed(self):
# link line position to threshold parameter
if self.threshold_line is not None:
self.threshold_line.setValue(self.params['threshold'])
| campagnola/neuroanalysis | neuroanalysis/ui/event_detection.py | Python | mit | 5,335 | [
"Gaussian"
] | 32c58a3aef1e8e0432cebe62e0f11edde7b6fc1a65a7968fdb50cace5b6ef8c0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015, Adrián Gómez Pueyo and Alberto Castro
# This file is part of maxdft.
# maxdft is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# maxdft is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
# You should have received a copy of the GNU General Public License
# along with maxdft. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import signal
def run_octopus(octopus_command, output):
"""The function that runs octopus
This function recieves both the command to call octopus
octopus and the desired directory where the output will
be placed output, and then runs octopus with whatever
input file inp is in the current working directory.
"""
if output is not None:
system_command = octopus_command + " > " + output + " 2>&1 "
#print " Running octopus. Output to " + output + "."
else:
system_command = octopus_command
sys.stdout.flush()
status = os.system(system_command)
# The following code allows the runs to be interrumpted with
# SIGINT or SIGQUI#T (Control-C, typically)
if (os.WIFSIGNALED(status) and
(os.WTERMSIG(status) == signal.SIGINT or os.WTERMSIG(status)
== signal.SIGQUIT)):
sys.exit(0)
| albertocbarrigon/maxdft | src/octopus.py | Python | gpl-3.0 | 1,657 | [
"Octopus"
] | be8d58d3b717d3903d19b465cf4403694f992a603c40a5ef25fabaed6373dc1e |
"""Convert to and from Roman numerals
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim (mark@diveintopython.org)"
__version__ = "$Revision: 1.2 $"
__date__ = "$Date: 2004/05/05 21:57:20 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
#Define exceptions
class RomanError(Exception): pass
class OutOfRangeError(RomanError): pass
class NotIntegerError(RomanError): pass
class InvalidRomanNumeralError(RomanError): pass
#Define digit mapping
romanNumeralMap = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def toRoman(n):
"""convert integer to Roman numeral"""
result = ""
for numeral, integer in romanNumeralMap:
while n >= integer:
result += numeral
n -= integer
return result
def fromRoman(s):
"""convert Roman numeral to integer"""
pass
| tapomayukh/projects_in_python | sandbox_tapo/src/refs/diveintopython-pdf-5.4/diveintopython-5.4/py/roman/stage2/roman2.py | Python | mit | 1,300 | [
"VisIt"
] | 9900dd768b3752d1df3a567b987d1b075eba5b9469367c04b623ea25ca0a2434 |
#!/usr/bin/python
import sys,tempfile,os,hashlib,cPickle
sys.path.append(os.path.join(__file__,'..'))
import xmlstore.datatypes,core.scenario,core.simulator
def run(scenariopath):
# Load scenario.
scen = core.scenario.Scenario.fromSchemaName(core.simulator.gotmscenarioversion)
if scenariopath.endswith('.xml'):
scen.load(scenariopath)
else:
scen.loadAll(scenariopath)
# Simulate
res = core.simulator.simulate(scen,progresscallback=None,redirect=True)
if res.returncode==1:
print 'Simulation failed. Error: %s.\n\nGOTM output:\n%s' % (res.errormessage,res.stderr)
res.unlink()
sys.exit(1)
# Get text output
curerr = unicode(res.stderr)
curout = unicode(res.stdout)
# Get MD5 hash of NetCDF file
f = open(res.datafile,'rb')
m = hashlib.md5()
cursize = 0
while 1:
dat = f.read(m.block_size)
if not dat: break
cursize += len(dat)
m.update(dat)
f.close()
curhash = m.hexdigest()
# Clean up result
res.unlink()
scen.unlink()
return curerr,curout,curhash,cursize
results = {}
itest = 0
def test(scenariopath):
global results,itest
itest += 1
print 'Test %i with "%s"...' % (itest,scenariopath),
curerr,curout,curhash,cursize = run(scenariopath)
if scenariopath not in results:
print 'first run'
results[scenariopath] = (curerr,curout,curhash,cursize)
else:
olderr,oldout,oldhash,oldsize = results[scenariopath]
def writelogs():
log1 = open('log1.txt','w')
log1.write(olderr)
log1.write(oldout)
log1.close()
log2 = open('log2.txt','w')
log2.write(curerr)
log2.write(curout)
log2.close()
match = True
if curerr!=olderr:
if match: print 'FAILED'
print 'ERROR standard error did not match original run.'
match = False
if curout!=oldout:
if match: print 'FAILED'
print 'ERROR standard out did not match original run.'
match = False
if curhash!=oldhash:
if match: print 'FAILED'
print 'ERROR NetCDF did not match original run (old size = %i, new size: %i).' % (oldsize,cursize)
match = False
if not match:
writelogs()
print '%s result differs.' % (os.path.basename(scenariopath),)
print 'Output written to log1.txt (original) and log2.txt (new).'
return False
else:
print 'success'
return True
def linearrun(scenariopaths,nsim=1):
for scenariopath in scenariopaths:
for i in range(nsim):
valid = test(scenariopath)
if not valid: return False
return True
def stresstest(scenariopaths):
import random
while 1:
valid = test(random.choice(scenariopaths))
if not valid: return False
return True
if __name__=='__main__':
import optparse,glob
parser = optparse.OptionParser()
parser.add_option('-r','--repeat',type='int',help='number of times each individual scenario must be run.')
parser.add_option('-l','--loop', action='store_true',help='loop forever testing all provided scenarios.')
parser.add_option('-s','--stress',action='store_true',help='stress test: continuously run all scenarios in random order. All other arguments except -c/--cache are ignored.')
parser.add_option('-c','--cache', type='string',help='cache file for results.')
parser.add_option('-p','--pristine', action='store_true',help='create pristine results by running each scenario once with a clean GOTM library. All other arguments except -c/--cache are ignored.')
parser.set_defaults(stress=False,loop=False,repeat=1,cache=None,pristine=False)
(options, args) = parser.parse_args()
if not args:
print '%s must be called with one or more paths to .gotmscenario files. These paths may contain wildcards.' % os.path.basename(__file__)
sys.exit(2)
paths = []
for p in args:
curpaths = glob.glob(p)
if not curpaths:
print '"%s" was not found.' % p
sys.exit(2)
paths += curpaths
if options.pristine:
import subprocess
for path in paths:
arg = [__file__,path]
if options.cache:
arg += ['--cache',options.cache]
ret = subprocess.call(arg,shell=True)
if ret!=0:
print 'Error occured during pristine runs. Exiting...'
sys.exit(1)
sys.exit(0)
if options.cache and os.path.isfile(options.cache):
print 'Loading earlier results from cache file "%s".' % options.cache
f = open(options.cache,'rb')
results.update(cPickle.load(f))
f.close()
if options.stress:
print 'Stress testing with %i scenarios.' % len(paths)
valid = stresstest(paths)
else:
print 'Iteratively testing %i scenarios.' % len(paths)
while 1:
valid = linearrun(paths,options.repeat)
if not (valid and options.loop): break
print 'Looping...'
if options.cache:
print 'Writing results to cache file "%s".' % options.cache
f = open(options.cache,'wb')
cPickle.dump(results,f,cPickle.HIGHEST_PROTOCOL)
f.close()
if not valid:
print 'Exiting...'
sys.exit(1)
sys.exit(0)
| BoldingBruggeman/gotm | gui.py/unittest/simulate.py | Python | gpl-2.0 | 5,624 | [
"NetCDF"
] | 9a84b4819955a94db3c78ba4dadecbfd1b28453c3cb7218d559d1bf06cf5bd05 |
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import unittest
import numpy as np
import os
import mdtraj
from itertools import combinations, product
# from pyemma.coordinates.data import featurizer as ft
from pyemma.coordinates.data.featurization.featurizer import MDFeaturizer, CustomFeature
from pyemma.coordinates.data.featurization.util import _parse_pairwise_input, _describe_atom
from six.moves import range
import pkg_resources
path = pkg_resources.resource_filename(__name__, 'data') + os.path.sep
xtcfile = os.path.join(path, 'bpti_mini.xtc')
pdbfile = os.path.join(path, 'bpti_ca.pdb')
pdbfile_ops_aa = os.path.join(path,'opsin_aa_1_frame.pdb.gz')
pdbfile_ops_Ca = os.path.join(path,'opsin_Ca_1_frame.pdb.gz')
asn_leu_pdb = """
ATOM 559 N ASN A 69 19.168 -0.936 -10.274 1.00 27.50 N
ATOM 560 CA ASN A 69 20.356 -0.049 -10.419 1.00 25.52 C
ATOM 561 C ASN A 69 21.572 -0.418 -9.653 1.00 24.26 C
ATOM 562 O ASN A 69 22.687 -0.336 -10.171 1.00 24.33 O
ATOM 563 CB ASN A 69 19.965 1.410 -10.149 1.00 26.49 C
ATOM 564 CG ASN A 69 18.932 1.881 -11.124 1.00 26.35 C
ATOM 565 OD1 ASN A 69 18.835 1.322 -12.224 1.00 26.77 O
ATOM 566 ND2 ASN A 69 18.131 2.864 -10.745 1.00 24.85 N
ATOM 567 N LEU A 70 21.419 -0.824 -8.404 1.00 23.02 N
ATOM 568 CA LEU A 70 22.592 -1.275 -7.656 1.00 23.37 C
ATOM 569 C LEU A 70 23.391 -2.325 -8.448 1.00 25.78 C
ATOM 570 O LEU A 70 24.647 -2.315 -8.430 1.00 25.47 O
ATOM 571 CB LEU A 70 22.202 -1.897 -6.306 1.00 22.17 C
ATOM 572 CG LEU A 70 23.335 -2.560 -5.519 1.00 22.49 C
ATOM 573 CD1 LEU A 70 24.578 -1.665 -5.335 1.00 22.56 C
ATOM 574 CD2 LEU A 70 22.853 -3.108 -4.147 1.00 24.47 C
""" *2 ### asn-leu-asn-leu
bogus_geom_pdbfile = """
ATOM 000 MW ACE A 00 0.0000 0.000 0.0000 1.00 0.000 X
ATOM 001 CA ASN A 01 1.0000 0.000 0.0000 1.00 0.000 C
ATOM 002 MW ACE A 02 2.0000 0.000 0.0000 1.00 0.000 X
ATOM 003 CA ASN A 03 3.0000 0.000 0.0000 1.00 0.000 C
ATOM 004 MW ACE B 04 4.0000 0.000 0.0000 1.00 0.000 X
ATOM 005 CA ASN B 05 5.0000 0.000 0.0000 1.00 0.000 C
ATOM 006 MW ACE B 06 6.0000 0.000 0.0000 1.00 0.000 X
ATOM 007 CA ASN B 07 7.0000 0.000 0.0000 1.00 0.000 C
"""
def verbose_assertion_minrmsd(ref_Y, test_Y, test_obj):
for jj in np.arange(test_Y.shape[1]):
ii = np.argmax(np.abs(ref_Y-test_Y[:,jj]))
assert np.allclose(ref_Y, test_Y[:,jj], atol=test_obj.atol), \
'Largest discrepancy between reference (ref_frame %u)' \
' and test: %8.2e, for the pair %f, %f at frame %u'%\
(test_obj.ref_frame,
(ref_Y-test_Y[:,jj])[ii],
ref_Y[ii], test_Y[ii,jj], ii)
class TestFeaturizer(unittest.TestCase):
@classmethod
def setUpClass(cls):
import tempfile
cls.asn_leu_pdbfile = tempfile.mkstemp(suffix=".pdb")[1]
with open(cls.asn_leu_pdbfile, 'w') as fh:
fh.write(asn_leu_pdb)
cls.asn_leu_traj = tempfile.mktemp(suffix='.xtc')
cls.bogus_geom_pdbfile = tempfile.mkstemp(suffix=".pdb")[1]
with open(cls.bogus_geom_pdbfile, 'w') as fh:
fh.write(bogus_geom_pdbfile)
# create traj for asn_leu
n_frames = 4001
traj = mdtraj.load(cls.asn_leu_pdbfile)
ref = traj.xyz
new_xyz = np.empty((n_frames, ref.shape[1], 3))
noise = np.random.random(new_xyz.shape)
new_xyz[:, :,: ] = noise + ref
traj.xyz=new_xyz
traj.time=np.arange(n_frames)
traj.save(cls.asn_leu_traj)
@classmethod
def tearDownClass(cls):
try:
os.unlink(cls.asn_leu_pdbfile)
except EnvironmentError:
pass
try:
os.unlink(cls.bogus_geom_pdbfile)
except EnvironmentError:
pass
def setUp(self):
self.pdbfile = pdbfile
self.traj = mdtraj.load(xtcfile, top=self.pdbfile)
self.feat = MDFeaturizer(self.pdbfile)
self.atol = 1e-5
self.ref_frame = 0
self.atom_indices = np.arange(0, self.traj.n_atoms/2)
def test_select_backbone(self):
inds = self.feat.select_Backbone()
def test_select_all(self):
self.feat.add_all()
assert (self.feat.dimension() == self.traj.n_atoms * 3)
refmap = np.reshape(self.traj.xyz, (len(self.traj), self.traj.n_atoms * 3))
assert (np.all(refmap == self.feat.transform(self.traj)))
def test_select(self):
sel = np.array([1, 2, 5, 20], dtype=int)
self.feat.add_selection(sel)
assert (self.feat.dimension() == sel.shape[0] * 3)
refmap = np.reshape(self.traj.xyz[:, sel, :], (len(self.traj), sel.shape[0] * 3))
assert (np.all(refmap == self.feat.transform(self.traj)))
def test_distances(self):
sel = np.array([1, 2, 5, 20], dtype=int)
pairs_expected = np.array([[1, 5], [1, 20], [2, 5], [2, 20], [5, 20]])
pairs = self.feat.pairs(sel, excluded_neighbors=2)
assert(pairs.shape == pairs_expected.shape)
assert(np.all(pairs == pairs_expected))
self.feat.add_distances(pairs, periodic=False) # unperiodic distances such that we can compare
assert(self.feat.dimension() == pairs_expected.shape[0])
X = self.traj.xyz[:, pairs_expected[:, 0], :]
Y = self.traj.xyz[:, pairs_expected[:, 1], :]
D = np.sqrt(np.sum((X - Y) ** 2, axis=2))
assert(np.allclose(D, self.feat.transform(self.traj)))
def test_inverse_distances(self):
sel = np.array([1, 2, 5, 20], dtype=int)
pairs_expected = np.array([[1, 5], [1, 20], [2, 5], [2, 20], [5, 20]])
pairs = self.feat.pairs(sel, excluded_neighbors=2)
assert(pairs.shape == pairs_expected.shape)
assert(np.all(pairs == pairs_expected))
self.feat.add_inverse_distances(pairs, periodic=False) # unperiodic distances such that we can compare
assert(self.feat.dimension() == pairs_expected.shape[0])
X = self.traj.xyz[:, pairs_expected[:, 0], :]
Y = self.traj.xyz[:, pairs_expected[:, 1], :]
Dinv = 1.0/np.sqrt(np.sum((X - Y) ** 2, axis=2))
assert(np.allclose(Dinv, self.feat.transform(self.traj)))
def test_ca_distances(self):
sel = self.feat.select_Ca()
assert(np.all(sel == list(range(self.traj.n_atoms)))) # should be all for this Ca-traj
pairs = self.feat.pairs(sel, excluded_neighbors=0)
self.feat.add_distances_ca(periodic=False, excluded_neighbors=0) # unperiodic distances such that we can compare
assert(self.feat.dimension() == pairs.shape[0])
X = self.traj.xyz[:, pairs[:, 0], :]
Y = self.traj.xyz[:, pairs[:, 1], :]
D = np.sqrt(np.sum((X - Y) ** 2, axis=2))
assert(np.allclose(D, self.feat.transform(self.traj)))
def test_ca_distances_with_all_atom_geometries(self):
feat = MDFeaturizer(pdbfile_ops_aa)
feat.add_distances_ca(excluded_neighbors=0)
D_aa = feat.transform(mdtraj.load(pdbfile_ops_aa))
# Create a reference
feat_just_ca = MDFeaturizer(pdbfile_ops_Ca)
feat_just_ca.add_distances(np.arange(feat_just_ca.topology.n_atoms))
D_ca = feat_just_ca.transform(mdtraj.load(pdbfile_ops_Ca))
assert(np.allclose(D_aa, D_ca))
def test_ca_distances_with_all_atom_geometries_and_exclusions(self):
feat = MDFeaturizer(pdbfile_ops_aa)
feat.add_distances_ca(excluded_neighbors=2)
D_aa = feat.transform(mdtraj.load(pdbfile_ops_aa))
# Create a reference
feat_just_ca = MDFeaturizer(pdbfile_ops_Ca)
ca_pairs = feat.pairs(feat_just_ca.select_Ca(),excluded_neighbors=2)
feat_just_ca.add_distances(ca_pairs)
D_ca = feat_just_ca.transform(mdtraj.load(pdbfile_ops_Ca))
assert(np.allclose(D_aa, D_ca))
def test_ca_distances_with_residues_not_containing_cas_no_exclusions(self):
# Load test geom
geom = mdtraj.load(self.pdbfile)
# No exclusions
feat_EN0 = MDFeaturizer(self.bogus_geom_pdbfile)
feat_EN0.add_distances_ca(excluded_neighbors=0)
ENO_pairs = [[1,3],[1,5],[1,7],
[3,5], [3,7],
[5,7]
]
# Check indices
assert (np.allclose(ENO_pairs, feat_EN0.active_features[0].distance_indexes))
# Check distances
D = mdtraj.compute_distances(geom, ENO_pairs)
assert (np.allclose(D, feat_EN0.transform(geom)))
# excluded_neighbors=1 ## will yield the same as before, because the first neighbor
# doesn't conting CA's anyway
feat_EN1 = MDFeaturizer(self.bogus_geom_pdbfile)
feat_EN1.add_distances_ca(excluded_neighbors=1)
EN1_pairs = [[1,3],[1,5],[1,7],
[3,5], [3,7],
[5,7]
]
assert (np.allclose(EN1_pairs, feat_EN1.active_features[0].distance_indexes))
D = mdtraj.compute_distances(geom, EN1_pairs)
assert (np.allclose(D, feat_EN1.transform(geom)))
def test_ca_distances_with_residues_not_containing_cas_with_exclusions(self):
# Load test geom
geom = mdtraj.load(self.pdbfile)
# No exclusions
feat_EN2 = MDFeaturizer(self.bogus_geom_pdbfile)
feat_EN2.add_distances_ca(excluded_neighbors=2)
EN2_pairs = [[1,5],[1,7],
[3,7],
]
# Check indices
assert (np.allclose(EN2_pairs, feat_EN2.active_features[0].distance_indexes))
# Check distances
D = mdtraj.compute_distances(geom, EN2_pairs)
assert (np.allclose(D, feat_EN2.transform(geom)))
# excluded_neighbors=1 ## will yield the same as before, because the first neighbor
# doesn't conting CA's anyway
feat_EN1 = MDFeaturizer(self.bogus_geom_pdbfile)
feat_EN1.add_distances_ca(excluded_neighbors=1)
EN1_pairs = [[1,3],[1,5],[1,7],
[3,5], [3,7],
[5,7]
]
assert (np.allclose(EN1_pairs, feat_EN1.active_features[0].distance_indexes))
D = mdtraj.compute_distances(geom, EN1_pairs)
assert (np.allclose(D, feat_EN1.transform(geom)))
def test_contacts(self):
sel = np.array([1, 2, 5, 20], dtype=int)
pairs_expected = np.array([[1, 5], [1, 20], [2, 5], [2, 20], [5, 20]])
pairs = self.feat.pairs(sel, excluded_neighbors=2)
assert(pairs.shape == pairs_expected.shape)
assert(np.all(pairs == pairs_expected))
self.feat.add_contacts(pairs, threshold=0.5, periodic=False) # unperiodic distances such that we can compare
assert(self.feat.dimension() == pairs_expected.shape[0])
X = self.traj.xyz[:, pairs_expected[:, 0], :]
Y = self.traj.xyz[:, pairs_expected[:, 1], :]
D = np.sqrt(np.sum((X - Y) ** 2, axis=2))
C = np.zeros(D.shape)
I = np.argwhere(D <= 0.5)
C[I[:, 0], I[:, 1]] = 1.0
assert(np.allclose(C, self.feat.transform(self.traj)))
def test_contacts_count_contacts(self):
sel = np.array([1, 2, 5, 20], dtype=int)
pairs_expected = np.array([[1, 5], [1, 20], [2, 5], [2, 20], [5, 20]])
pairs = self.feat.pairs(sel, excluded_neighbors=2)
assert(pairs.shape == pairs_expected.shape)
assert(np.all(pairs == pairs_expected))
self.feat.add_contacts(pairs, threshold=0.5, periodic=False, count_contacts=True) # unperiodic distances such that we can compare
# The dimensionality of the feature is now one
assert(self.feat.dimension() == 1)
X = self.traj.xyz[:, pairs_expected[:, 0], :]
Y = self.traj.xyz[:, pairs_expected[:, 1], :]
D = np.sqrt(np.sum((X - Y) ** 2, axis=2))
C = np.zeros(D.shape)
I = np.argwhere(D <= 0.5)
C[I[:, 0], I[:, 1]] = 1.0
# Count the contacts
C = C.sum(1, keepdims=True)
assert(np.allclose(C, self.feat.transform(self.traj)))
def test_angles(self):
sel = np.array([[1, 2, 5],
[1, 3, 8],
[2, 9, 10]], dtype=int)
self.feat.add_angles(sel)
assert(self.feat.dimension() == sel.shape[0])
Y = self.feat.transform(self.traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
self.assertEqual(len(self.feat.describe()), self.feat.dimension())
def test_angles_deg(self):
sel = np.array([[1, 2, 5],
[1, 3, 8],
[2, 9, 10]], dtype=int)
self.feat.add_angles(sel, deg=True)
assert(self.feat.dimension() == sel.shape[0])
Y = self.feat.transform(self.traj)
assert(np.alltrue(Y >= -180.0))
assert(np.alltrue(Y <= 180.0))
def test_angles_cossin(self):
sel = np.array([[1, 2, 5],
[1, 3, 8],
[2, 9, 10]], dtype=int)
self.feat.add_angles(sel, cossin=True)
assert(self.feat.dimension() == 2 * sel.shape[0])
Y = self.feat.transform(self.traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
desc = self.feat.describe()
self.assertEqual(len(desc), self.feat.dimension())
def test_dihedrals(self):
sel = np.array([[1, 2, 5, 6],
[1, 3, 8, 9],
[2, 9, 10, 12]], dtype=int)
self.feat.add_dihedrals(sel)
assert(self.feat.dimension() == sel.shape[0])
Y = self.feat.transform(self.traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
self.assertEqual(len(self.feat.describe()), self.feat.dimension())
def test_dihedrals_deg(self):
sel = np.array([[1, 2, 5, 6],
[1, 3, 8, 9],
[2, 9, 10, 12]], dtype=int)
self.feat.add_dihedrals(sel, deg=True)
assert(self.feat.dimension() == sel.shape[0])
Y = self.feat.transform(self.traj)
assert(np.alltrue(Y >= -180.0))
assert(np.alltrue(Y <= 180.0))
self.assertEqual(len(self.feat.describe()), self.feat.dimension())
def test_dihedrials_cossin(self):
sel = np.array([[1, 2, 5, 6],
[1, 3, 8, 9],
[2, 9, 10, 12]], dtype=int)
self.feat.add_dihedrals(sel, cossin=True)
assert(self.feat.dimension() == 2 * sel.shape[0])
Y = self.feat.transform(self.traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
desc = self.feat.describe()
self.assertEqual(len(desc), self.feat.dimension())
def test_backbone_dihedrals(self):
self.feat = MDFeaturizer(topfile=self.asn_leu_pdbfile)
self.feat.add_backbone_torsions()
traj = mdtraj.load(self.asn_leu_pdbfile)
Y = self.feat.transform(traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
desc = self.feat.describe()
self.assertEqual(len(desc), self.feat.dimension())
# test ordering of indices
backbone_feature = self.feat.active_features[0]
angle_indices = backbone_feature.angle_indexes
np.testing.assert_equal(angle_indices[0], backbone_feature._phi_inds[0])
np.testing.assert_equal(angle_indices[1], backbone_feature._psi_inds[0])
np.testing.assert_equal(angle_indices[2], backbone_feature._phi_inds[1])
np.testing.assert_equal(angle_indices[3], backbone_feature._psi_inds[1])
def test_backbone_dihedrals_deg(self):
self.feat = MDFeaturizer(topfile=self.asn_leu_pdbfile)
self.feat.add_backbone_torsions(deg=True)
traj = mdtraj.load(self.asn_leu_pdbfile)
Y = self.feat.transform(traj)
assert(np.alltrue(Y >= -180.0))
assert(np.alltrue(Y <= 180.0))
desc = self.feat.describe()
self.assertEqual(len(desc), self.feat.dimension())
def test_backbone_dihedrals_cossin(self):
self.feat = MDFeaturizer(topfile=self.asn_leu_pdbfile)
self.feat.add_backbone_torsions(cossin=True)
traj = mdtraj.load(self.asn_leu_traj, top=self.asn_leu_pdbfile)
Y = self.feat.transform(traj)
self.assertEqual(Y.shape, (len(traj), 3*4)) # (3 phi + 3 psi)*2 [cos, sin]
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
desc = self.feat.describe()
self.assertEqual(len(desc), self.feat.dimension(), msg=desc)
self.assertIn("COS", desc[0])
self.assertIn("SIN", desc[1])
def test_backbone_dihedrials_chi(self):
self.feat = MDFeaturizer(topfile=self.asn_leu_pdbfile)
self.feat.add_chi1_torsions()
traj = mdtraj.load(self.asn_leu_pdbfile)
Y = self.feat.transform(traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
desc = self.feat.describe()
self.assertEqual(len(desc), self.feat.dimension())
def test_backbone_dihedrials_chi_cossin(self):
self.feat = MDFeaturizer(topfile=self.asn_leu_pdbfile)
self.feat.add_chi1_torsions(cossin=True)
traj = mdtraj.load(self.asn_leu_pdbfile)
Y = self.feat.transform(traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
desc = self.feat.describe()
assert "COS" in desc[0]
assert "SIN" in desc[1]
self.assertEqual(len(desc), self.feat.dimension())
def test_custom_feature(self):
# TODO: test me
pass
def test_MinRmsd(self):
# Test the Trajectory-input variant
self.feat.add_minrmsd_to_ref(self.traj[self.ref_frame])
# and the file-input variant
self.feat.add_minrmsd_to_ref(xtcfile, ref_frame=self.ref_frame)
test_Y = self.feat.transform(self.traj).squeeze()
# now the reference
ref_Y = mdtraj.rmsd(self.traj, self.traj[self.ref_frame])
verbose_assertion_minrmsd(ref_Y, test_Y, self)
assert self.feat.dimension() == 2
assert len(self.feat.describe())==2
def test_MinRmsd_with_atom_indices(self):
# Test the Trajectory-input variant
self.feat.add_minrmsd_to_ref(self.traj[self.ref_frame], atom_indices=self.atom_indices)
# and the file-input variant
self.feat.add_minrmsd_to_ref(xtcfile, ref_frame=self.ref_frame, atom_indices=self.atom_indices)
test_Y = self.feat.transform(self.traj).squeeze()
# now the reference
ref_Y = mdtraj.rmsd(self.traj, self.traj[self.ref_frame], atom_indices=self.atom_indices)
verbose_assertion_minrmsd(ref_Y, test_Y, self)
assert self.feat.dimension() == 2
assert len(self.feat.describe())==2
def test_MinRmsd_with_atom_indices_precentered(self):
# Test the Trajectory-input variant
self.feat.add_minrmsd_to_ref(self.traj[self.ref_frame], atom_indices=self.atom_indices, precentered=True)
# and the file-input variant
self.feat.add_minrmsd_to_ref(xtcfile, ref_frame=self.ref_frame, atom_indices=self.atom_indices, precentered=True)
test_Y = self.feat.transform(self.traj).squeeze()
# now the reference
ref_Y = mdtraj.rmsd(self.traj, self.traj[self.ref_frame], atom_indices=self.atom_indices, precentered=True)
verbose_assertion_minrmsd(ref_Y, test_Y, self)
assert self.feat.dimension() == 2
assert len(self.feat.describe())==2
def test_Residue_Mindist_Ca_all(self):
n_ca = self.feat.topology.n_atoms
self.feat.add_residue_mindist(scheme='ca')
D = self.feat.transform(self.traj)
Dref = mdtraj.compute_contacts(self.traj, scheme='ca')[0]
assert np.allclose(D, Dref)
assert len(self.feat.describe())==self.feat.dimension()
def test_Residue_Mindist_Ca_all_threshold(self):
threshold = .7
self.feat.add_residue_mindist(scheme='ca', threshold=threshold)
D = self.feat.transform(self.traj)
Dref = mdtraj.compute_contacts(self.traj, scheme='ca')[0]
Dbinary = np.zeros_like(Dref)
I = np.argwhere(Dref <= threshold)
Dbinary[I[:, 0], I[:, 1]] = 1
assert np.allclose(D, Dbinary)
assert len(self.feat.describe())==self.feat.dimension()
def test_Residue_Mindist_Ca_array(self):
contacts=np.array([[20,10,], [10,0]])
self.feat.add_residue_mindist(scheme='ca', residue_pairs=contacts)
D = self.feat.transform(self.traj)
Dref = mdtraj.compute_contacts(self.traj, scheme='ca', contacts=contacts)[0]
assert np.allclose(D, Dref)
assert len(self.feat.describe())==self.feat.dimension()
def test_Residue_Mindist_Ca_array_periodic(self):
traj = mdtraj.load(pdbfile)
# Atoms most far appart in Z
atom_minz = traj.xyz.argmin(1).squeeze()[-1]
atom_maxz = traj.xyz.argmax(1).squeeze()[-1]
# Residues with the atoms most far appart in Z
res_minz = traj.topology.atom(atom_minz).residue.index
res_maxz = traj.topology.atom(atom_maxz).residue.index
contacts=np.array([[res_minz, res_maxz]])
# Tweak the trajectory so that a (bogus) PBC exists (otherwise traj._have_unitcell is False)
traj.unitcell_angles = [90,90,90]
traj.unitcell_lengths = [1, 1, 1]
self.feat.add_residue_mindist(scheme='ca', residue_pairs=contacts, periodic=False)
D = self.feat.transform(traj)
Dperiodic_true = mdtraj.compute_contacts(traj, scheme='ca', contacts=contacts, periodic=True)[0]
Dperiodic_false = mdtraj.compute_contacts(traj, scheme='ca', contacts=contacts, periodic=False)[0]
# This asserts that the periodic option is having an effect at all
assert not np.allclose(Dperiodic_false, Dperiodic_true, )
# This asserts that the periodic option is being handled correctly by pyemma
assert np.allclose(D, Dperiodic_false)
assert len(self.feat.describe())==self.feat.dimension()
def test_Group_Mindist_One_Group(self):
group0= [0,20,30,0]
self.feat.add_group_mindist(group_definitions=[group0]) # Even with duplicates
D = self.feat.transform(self.traj)
dist_list = list(combinations(np.unique(group0),2))
Dref = mdtraj.compute_distances(self.traj, dist_list)
assert np.allclose(D.squeeze(), Dref.min(1))
assert len(self.feat.describe())==self.feat.dimension()
def test_Group_Mindist_All_Three_Groups(self):
group0 = [0,20,30,0]
group1 = [1,21,31,1]
group2 = [2,22,32,2]
self.feat.add_group_mindist(group_definitions=[group0, group1, group2])
D = self.feat.transform(self.traj)
# Now the references, computed separately for each combination of groups
dist_list_01 = np.array(list(product(np.unique(group0),np.unique(group1))))
dist_list_02 = np.array(list(product(np.unique(group0),np.unique(group2))))
dist_list_12 = np.array(list(product(np.unique(group1),np.unique(group2))))
Dref_01 = mdtraj.compute_distances(self.traj, dist_list_01).min(1)
Dref_02 = mdtraj.compute_distances(self.traj, dist_list_02).min(1)
Dref_12 = mdtraj.compute_distances(self.traj, dist_list_12).min(1)
Dref = np.vstack((Dref_01,Dref_02,Dref_12)).T
assert np.allclose(D.squeeze(), Dref)
assert len(self.feat.describe())==self.feat.dimension()
def test_Group_Mindist_All_Three_Groups_threshold(self):
threshold = .7
group0 = [0, 20, 30, 0]
group1 = [1, 21, 31, 1]
group2 = [2, 22, 32, 2]
self.feat.add_group_mindist(group_definitions=[group0, group1, group2], threshold=threshold)
D = self.feat.transform(self.traj)
# Now the references, computed separately for each combination of groups
dist_list_01 = np.array(list(product(np.unique(group0), np.unique(group1))))
dist_list_02 = np.array(list(product(np.unique(group0), np.unique(group2))))
dist_list_12 = np.array(list(product(np.unique(group1), np.unique(group2))))
Dref_01 = mdtraj.compute_distances(self.traj, dist_list_01).min(1)
Dref_02 = mdtraj.compute_distances(self.traj, dist_list_02).min(1)
Dref_12 = mdtraj.compute_distances(self.traj, dist_list_12).min(1)
Dref = np.vstack((Dref_01, Dref_02, Dref_12)).T
Dbinary = np.zeros_like(Dref)
I = np.argwhere(Dref <= threshold)
Dbinary[I[:, 0], I[:, 1]] = 1
assert np.allclose(D, Dbinary)
assert len(self.feat.describe())==self.feat.dimension()
def test_Group_Mindist_Some_Three_Groups(self):
group0 = [0,20,30,0]
group1 = [1,21,31,1]
group2 = [2,22,32,2]
group_pairs=np.array([[0,1],
[2,2],
[0,2]])
self.feat.add_group_mindist(group_definitions=[group0, group1, group2], group_pairs=group_pairs)
D = self.feat.transform(self.traj)
# Now the references, computed separately for each combination of groups
dist_list_01 = np.array(list(product(np.unique(group0),np.unique(group1))))
dist_list_02 = np.array(list(product(np.unique(group0),np.unique(group2))))
dist_list_22 = np.array(list(combinations(np.unique(group2),2)))
Dref_01 = mdtraj.compute_distances(self.traj, dist_list_01).min(1)
Dref_02 = mdtraj.compute_distances(self.traj, dist_list_02).min(1)
Dref_22 = mdtraj.compute_distances(self.traj, dist_list_22).min(1)
Dref = np.vstack((Dref_01,Dref_22,Dref_02)).T
assert np.allclose(D.squeeze(), Dref)
assert len(self.feat.describe())==self.feat.dimension()
class TestFeaturizerNoDubs(unittest.TestCase):
def testAddFeaturesWithDuplicates(self):
"""this tests adds multiple features twice (eg. same indices) and
checks whether they are rejected or not"""
featurizer = MDFeaturizer(pdbfile)
expected_active = 1
featurizer.add_angles([[0, 1, 2], [0, 3, 4]])
featurizer.add_angles([[0, 1, 2], [0, 3, 4]])
self.assertEqual(len(featurizer.active_features), expected_active)
featurizer.add_contacts([[0, 1], [0, 3]])
expected_active += 1
self.assertEqual(len(featurizer.active_features), expected_active)
featurizer.add_contacts([[0, 1], [0, 3]])
self.assertEqual(len(featurizer.active_features), expected_active)
# try to fool it with ca selection
ca = featurizer.select_Ca()
ca = featurizer.pairs(ca, excluded_neighbors=0)
featurizer.add_distances(ca)
expected_active += 1
self.assertEqual(len(featurizer.active_features), expected_active)
featurizer.add_distances_ca(excluded_neighbors=0)
self.assertEqual(len(featurizer.active_features), expected_active)
featurizer.add_inverse_distances([[0, 1], [0, 3]])
expected_active += 1
self.assertEqual(len(featurizer.active_features), expected_active)
featurizer.add_distances([[0, 1], [0, 3]])
expected_active += 1
self.assertEqual(len(featurizer.active_features), expected_active)
featurizer.add_distances([[0, 1], [0, 3]])
self.assertEqual(len(featurizer.active_features), expected_active)
def my_func(x):
return x - 1
def foo(x):
return x - 1
expected_active += 1
my_feature = CustomFeature(my_func)
my_feature.dimension = 3
featurizer.add_custom_feature(my_feature)
self.assertEqual(len(featurizer.active_features), expected_active)
featurizer.add_custom_feature(my_feature)
self.assertEqual(len(featurizer.active_features), expected_active)
# since myfunc and foo are different functions, it should be added
expected_active += 1
foo_feat = CustomFeature(foo, dim=3)
featurizer.add_custom_feature(foo_feat)
self.assertEqual(len(featurizer.active_features), expected_active)
expected_active += 1
ref = mdtraj.load(xtcfile, top=pdbfile)
featurizer.add_minrmsd_to_ref(ref)
featurizer.add_minrmsd_to_ref(ref)
self.assertEqual(len(featurizer.active_features), expected_active)
expected_active += 1
featurizer.add_minrmsd_to_ref(pdbfile)
featurizer.add_minrmsd_to_ref(pdbfile)
self.assertEqual(len(featurizer.active_features), expected_active)
expected_active += 1
featurizer.add_residue_mindist()
featurizer.add_residue_mindist()
self.assertEqual(len(featurizer.active_features), expected_active)
expected_active += 1
featurizer.add_group_mindist([[0,1],[0,2]])
featurizer.add_group_mindist([[0,1],[0,2]])
self.assertEqual(len(featurizer.active_features), expected_active)
def test_labels(self):
""" just checks for exceptions """
featurizer = MDFeaturizer(pdbfile)
featurizer.add_angles([[1, 2, 3], [4, 5, 6]])
with self.assertRaises(ValueError) as cm:
featurizer.add_backbone_torsions()
assert 'emtpy indices' in cm.exception.message
featurizer.add_contacts([[0, 1], [0, 3]])
featurizer.add_distances([[0, 1], [0, 3]])
featurizer.add_inverse_distances([[0, 1], [0, 3]])
cs = CustomFeature(lambda x: x - 1, dim=3)
featurizer.add_custom_feature(cs)
featurizer.add_minrmsd_to_ref(pdbfile)
featurizer.add_residue_mindist()
featurizer.add_group_mindist([[0,1],[0,2]])
featurizer.describe()
class TestPairwiseInputParser(unittest.TestCase):
def setUp(self):
self.feat = MDFeaturizer(pdbfile)
def test_trivial(self):
dist_list = np.array([[0, 1],
[0, 2],
[0, 3]])
assert np.allclose(dist_list, _parse_pairwise_input(dist_list, None, self.feat._logger))
def test_one_unique(self):
# As a list
group1 = [0, 1, 2]
dist_list = np.asarray(list(combinations(group1, 2)))
assert np.allclose(dist_list, _parse_pairwise_input(group1, None, self.feat._logger))
# As an array
group1 = np.array([0, 1, 2])
dist_list = np.asarray(list(combinations(group1, 2)))
assert np.allclose(dist_list, _parse_pairwise_input(group1, None, self.feat._logger))
def test_two_uniques(self):
# As a list
group1 = [0, 1, 2]
group2 = [3, 4, 5]
dist_list = np.asarray(list(product(group1, group2)))
assert np.allclose(dist_list, _parse_pairwise_input(group1, group2, self.feat._logger))
# As an array
group1 = np.array([0, 1, 2])
group2 = np.array([3, 4, 5])
dist_list = np.asarray(list(product(group1, group2)))
assert np.allclose(dist_list, _parse_pairwise_input(group1, group2, self.feat._logger))
def test_two_redundants(self):
group1 = np.array([0, 1, 2, 0])
group2 = np.array([3, 4, 5, 4])
dist_list = np.asarray(list(product(np.unique(group1),
np.unique(group2)
)))
assert np.allclose(dist_list, _parse_pairwise_input(group1, group2, self.feat._logger))
def test_two_redundants_overlap(self):
group1 = np.array([0, 1, 2, 0])
group2 = np.array([3, 4, 5, 4, 0, 1])
dist_list = np.asarray(list(product(np.unique(group1),
np.unique(group2[:-2])
)))
assert np.allclose(dist_list, _parse_pairwise_input(group1, group2, self.feat._logger))
class TestUtils(unittest.TestCase):
@classmethod
def setUpClass(cls):
import tempfile
cls.bogus_geom_pdbfile = tempfile.mkstemp(suffix=".pdb")[1]
print(cls.bogus_geom_pdbfile)
with open(cls.bogus_geom_pdbfile, 'w') as fh:
fh.write(bogus_geom_pdbfile)
super(TestUtils, cls).setUpClass()
@classmethod
def tearDownClass(cls):
try:
os.unlink(cls.bogus_geom_pdbfile)
except EnvironmentError:
pass
super(TestUtils, cls).tearDownClass()
@classmethod
def setUp(self):
self.traj = mdtraj.load(self.bogus_geom_pdbfile)
def test_describe_atom(self):
str1 = _describe_atom(self.traj.topology, 0)
str2 = _describe_atom(self.traj.topology,self.traj.n_atoms-1)
assert len(str1.split()) >=4
assert len(str2.split()) >=4
assert str1.split()[-1] == '0'
assert str2.split()[-1] == '1'
class TestStaticMethods(unittest.TestCase):
def setUp(self):
self.feat = MDFeaturizer(pdbfile)
def test_pairs(self):
n_at = 5
pairs = self.feat.pairs(np.arange(n_at), excluded_neighbors=3)
assert np.allclose(pairs, [0,4])
pairs = self.feat.pairs(np.arange(n_at), excluded_neighbors=2)
assert np.allclose(pairs, [[0,3],[0,4],
[1,4]])
pairs = self.feat.pairs(np.arange(n_at), excluded_neighbors=1)
assert np.allclose(pairs, [[0,2], [0,3],[0,4],
[1,3], [1,4],
[2,4]])
pairs = self.feat.pairs(np.arange(n_at), excluded_neighbors=0)
assert np.allclose(pairs, [[0,1], [0,2], [0,3],[0,4],
[1,2], [1,3], [1,4],
[2,3], [2,4],
[3,4]])
# Define some function that somehow mimics one would typically want to do,
# e.g. 1. call mdtraj,
# 2. perform some other operations on the result
# 3. return a numpy array
def some_call_to_mdtraj_some_operations_some_linalg(traj, pairs, means, U):
D = mdtraj.compute_distances(traj, pairs)
D_meanfree = D - means
Y = (U.T.dot(D_meanfree.T)).T
return Y.astype('float32')
class TestCustomFeature(unittest.TestCase):
def setUp(self):
self.feat = MDFeaturizer(pdbfile)
self.traj = mdtraj.load(xtcfile, top=pdbfile)
self.pairs = [[0,1],[0,2], [1,2]] #some distances
self.means = [.5, .75, 1.0] #bogus means
self.U = np.array([[0,1],
[1,0],
[1,1]]) #bogus transformation, projects from 3 distances to 2 components
def test_some_feature(self):
self.feat.add_custom_func(some_call_to_mdtraj_some_operations_some_linalg , self.U.shape[1],
self.pairs,
self.means,
self.U
)
Y_custom_feature = self.feat.transform(self.traj)
# Directly call the function
Y_function = some_call_to_mdtraj_some_operations_some_linalg(self.traj, self.pairs, self.means, self.U)
assert np.allclose(Y_custom_feature, Y_function)
def test_describe(self):
self.feat.add_custom_func(some_call_to_mdtraj_some_operations_some_linalg, self.U.shape[1],
self.pairs,
self.means,
self.U
)
self.feat.describe()
def test_dimensionality(self):
self.feat.add_custom_func(some_call_to_mdtraj_some_operations_some_linalg, self.U.shape[1],
self.pairs,
self.means,
self.U
)
assert self.feat.dimension()==self.U.shape[1]
if __name__ == "__main__":
unittest.main() | gph82/PyEMMA | pyemma/coordinates/tests/test_featurizer.py | Python | lgpl-3.0 | 37,158 | [
"MDTraj"
] | 09c42c3173814f9342566c48f19b5da38ac38c362475aca69473084622f0ed69 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.