text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""Module collecting functions dealing with the GLUE2 information schema
:author: A.Sailer
Known problems:
* ARC CEs do not seem to publish wall or CPU time per queue anywhere
* There is no consistency between which memory information is provided where,
execution environment vs. information for a share
* Some execution environment IDs are used more than once
Print outs with "SCHEMA PROBLEM" point -- in my opinion -- to errors in the
published information, like a foreign key pointing to non-existent entry.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pprint import pformat
from DIRAC import gLogger, gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getCESiteMapping, getGOCSiteName
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Core.Utilities.Grid import ldapsearchBDII
__RCSID__ = "$Id$"
sLog = gLogger.getSubLogger(__name__)
def getGlue2CEInfo(vo, host=None):
"""call ldap for GLUE2 and get information
:param str vo: Virtual Organisation
:param str host: host to query for information
:returns: result structure with result['Value'][siteID]['CEs'][ceID]['Queues'][queueName]. For
each siteID, ceID, queueName all the GLUE2 parameters are retrieved
"""
# get all Policies allowing given VO
filt = "(&(objectClass=GLUE2Policy)(|(GLUE2PolicyRule=VO:%s)(GLUE2PolicyRule=vo:%s)))" % (vo, vo)
polRes = ldapsearchBDII(filt=filt, attr=None, host=host, base="o=glue", selectionString="GLUE2")
if not polRes["OK"]:
return S_ERROR("Failed to get policies for this VO")
polRes = polRes["Value"]
sLog.notice("Found %s policies for this VO %s" % (len(polRes), vo))
# get all shares for this policy
# create an or'ed list of all the shares and then call the search
listOfSitesWithPolicies = set()
shareFilter = ""
for policyValues in polRes:
# skip entries without GLUE2DomainID in the DN because we cannot associate them to a site
if "GLUE2DomainID" not in policyValues["attr"]["dn"]:
continue
shareID = policyValues["attr"].get("GLUE2MappingPolicyShareForeignKey", None)
policyID = policyValues["attr"]["GLUE2PolicyID"]
siteName = policyValues["attr"]["dn"].split("GLUE2DomainID=")[1].split(",", 1)[0]
listOfSitesWithPolicies.add(siteName)
if shareID is None: # policy not pointing to ComputingInformation
sLog.debug("Policy %s does not point to computing information" % (policyID,))
continue
sLog.verbose("%s policy %s pointing to %s " % (siteName, policyID, shareID))
sLog.debug("Policy values:\n%s" % pformat(policyValues))
shareFilter += "(GLUE2ShareID=%s)" % shareID
filt = "(&(objectClass=GLUE2Share)(|%s))" % shareFilter
shareRes = ldapsearchBDII(filt=filt, attr=None, host=host, base="o=glue", selectionString="GLUE2")
if not shareRes["OK"]:
sLog.error("Could not get share information", shareRes["Message"])
return shareRes
shareInfoLists = {}
for shareInfo in shareRes["Value"]:
if "GLUE2DomainID" not in shareInfo["attr"]["dn"]:
continue
if "GLUE2ComputingShare" not in shareInfo["objectClass"]:
sLog.debug("Share %r is not a ComputingShare: \n%s" % (shareID, pformat(shareInfo)))
continue
sLog.debug("Found computing share:\n%s" % pformat(shareInfo))
siteName = shareInfo["attr"]["dn"].split("GLUE2DomainID=")[1].split(",", 1)[0]
shareInfoLists.setdefault(siteName, []).append(shareInfo["attr"])
siteInfo = __getGlue2ShareInfo(host, shareInfoLists)
if not siteInfo["OK"]:
sLog.error("Could not get CE info for", "%s: %s" % (shareID, siteInfo["Message"]))
return siteInfo
siteDict = siteInfo["Value"]
sLog.debug("Found Sites:\n%s" % pformat(siteDict))
sitesWithoutShares = set(siteDict) - listOfSitesWithPolicies
if sitesWithoutShares:
sLog.error("Found some sites without any shares", pformat(sitesWithoutShares))
else:
sLog.notice("Found information for all known sites")
# remap siteDict to assign CEs to known sites,
# in case their names differ from the "gocdb name" in the CS.
newSiteDict = {}
ceSiteMapping = getCESiteMapping().get("Value", {})
# pylint thinks siteDict is a tuple, so we cast
for siteName, infoDict in dict(siteDict).items():
for ce, ceInfo in infoDict.get("CEs", {}).items():
ceSiteName = ceSiteMapping.get(ce, siteName)
gocSiteName = getGOCSiteName(ceSiteName).get("Value", siteName)
newSiteDict.setdefault(gocSiteName, {}).setdefault("CEs", {})[ce] = ceInfo
return S_OK(newSiteDict)
def __getGlue2ShareInfo(host, shareInfoLists):
"""get information from endpoints, which are the CE at a Site
:param str host: BDII host to query
:param dict shareInfoDict: dictionary of GLUE2 parameters belonging to the ComputingShare
:returns: result structure S_OK/S_ERROR
"""
executionEnvironments = []
for _siteName, shareInfoDicts in shareInfoLists.items():
for shareInfoDict in shareInfoDicts:
executionEnvironment = shareInfoDict.get("GLUE2ComputingShareExecutionEnvironmentForeignKey", [])
if not executionEnvironment:
sLog.error("No entry for GLUE2ComputingShareExecutionEnvironmentForeignKey", pformat(shareInfoDict))
continue
if isinstance(executionEnvironment, str):
executionEnvironment = [executionEnvironment]
executionEnvironments.extend(executionEnvironment)
resExeInfo = __getGlue2ExecutionEnvironmentInfo(host, executionEnvironments)
if not resExeInfo["OK"]:
sLog.error(
"Cannot get execution environment info for:",
str(executionEnvironments)[:100] + " " + resExeInfo["Message"],
)
return resExeInfo
exeInfos = resExeInfo["Value"]
siteDict = {}
for siteName, shareInfoDicts in shareInfoLists.items():
siteDict[siteName] = {"CEs": {}}
cesDict = siteDict[siteName]["CEs"]
for shareInfoDict in shareInfoDicts:
ceInfo = {}
ceInfo["MaxWaitingJobs"] = shareInfoDict.get("GLUE2ComputingShareMaxWaitingJobs", "-1") # This is not used
ceInfo["Queues"] = {}
queueInfo = {}
queueInfo["GlueCEStateStatus"] = shareInfoDict["GLUE2ComputingShareServingState"]
queueInfo["GlueCEPolicyMaxCPUTime"] = str(
int(int(shareInfoDict.get("GLUE2ComputingShareMaxCPUTime", 86400)) / 60)
)
queueInfo["GlueCEPolicyMaxWallClockTime"] = str(
int(int(shareInfoDict.get("GLUE2ComputingShareMaxWallTime", 86400)) / 60)
)
queueInfo["GlueCEInfoTotalCPUs"] = shareInfoDict.get("GLUE2ComputingShareMaxRunningJobs", "10000")
queueInfo["GlueCECapability"] = ["CPUScalingReferenceSI00=2552"]
try:
maxNOPfromCS = gConfig.getValue(
"/Resources/Computing/CEDefaults/GLUE2ComputingShareMaxSlotsPerJob_limit", 8
)
maxNOPfromGLUE = int(shareInfoDict.get("GLUE2ComputingShareMaxSlotsPerJob", 1))
numberOfProcs = min(maxNOPfromGLUE, maxNOPfromCS)
queueInfo["NumberOfProcessors"] = numberOfProcs
if numberOfProcs != maxNOPfromGLUE:
sLog.info(
"Limited NumberOfProcessors for", "%s from %s to %s" % (siteName, maxNOPfromGLUE, numberOfProcs)
)
except ValueError:
sLog.error(
"Bad content for GLUE2ComputingShareMaxSlotsPerJob:",
siteName + " " + shareInfoDict.get("GLUE2ComputingShareMaxSlotsPerJob"),
)
queueInfo["NumberOfProcessors"] = 1
executionEnvironment = shareInfoDict.get("GLUE2ComputingShareExecutionEnvironmentForeignKey", [])
if isinstance(executionEnvironment, str):
executionEnvironment = [executionEnvironment]
resExeInfo = __getGlue2ExecutionEnvironmentInfoForSite(siteName, executionEnvironment, exeInfos)
if not resExeInfo["OK"]:
continue
exeInfo = resExeInfo.get("Value")
if not exeInfo:
sLog.error("Using dummy values. Did not find information for execution environment", siteName)
exeInfo = {
"GlueHostMainMemoryRAMSize": "1999", # intentionally identifiably dummy value
"GlueHostOperatingSystemVersion": "",
"GlueHostOperatingSystemName": "",
"GlueHostOperatingSystemRelease": "",
"GlueHostArchitecturePlatformType": "x86_64",
"GlueHostBenchmarkSI00": "2500", # needed for the queue to be used by the sitedirector
"MANAGER": "manager:unknownBatchSystem", # need some value for ARC
}
else:
sLog.info("Found information for execution environment for", siteName)
# sometimes the time is still in hours
maxCPUTime = int(queueInfo["GlueCEPolicyMaxCPUTime"])
if maxCPUTime in [12, 24, 36, 48, 168]:
queueInfo["GlueCEPolicyMaxCPUTime"] = str(maxCPUTime * 60)
queueInfo["GlueCEPolicyMaxWallClockTime"] = str(int(queueInfo["GlueCEPolicyMaxWallClockTime"]) * 60)
ceInfo.update(exeInfo)
shareEndpoints = shareInfoDict.get("GLUE2ShareEndpointForeignKey", [])
if isinstance(shareEndpoints, str):
shareEndpoints = [shareEndpoints]
for endpoint in shareEndpoints:
ceType = endpoint.rsplit(".", 1)[1]
# get queue Name, in CREAM this is behind GLUE2entityOtherInfo...
if ceType == "CREAM":
for otherInfo in shareInfoDict["GLUE2EntityOtherInfo"]:
if otherInfo.startswith("CREAMCEId"):
queueName = otherInfo.split("/", 1)[1]
# creamCEs are EOL soon, ignore any info they have
if queueInfo.pop("NumberOfProcessors", 1) != 1:
sLog.verbose("Ignoring MaxSlotsPerJob option for CreamCE", endpoint)
# HTCondorCE, htcondorce
elif ceType.lower().endswith("htcondorce"):
ceType = "HTCondorCE"
queueName = "condor"
else:
sLog.error("Unknown CE Type, please check the available information", ceType)
continue
queueInfo["GlueCEImplementationName"] = ceType
ceName = endpoint.split("_", 1)[0]
cesDict.setdefault(ceName, {})
existingQueues = dict(cesDict[ceName].get("Queues", {}))
existingQueues[queueName] = queueInfo
ceInfo["Queues"] = existingQueues
cesDict[ceName].update(ceInfo)
# ARC CEs do not have endpoints, we have to try something else to get the information about the queue etc.
try:
if not shareEndpoints and shareInfoDict["GLUE2ShareID"].startswith("urn:ogf"):
exeInfo = dict(exeInfo) # silence pylint about tuples
queueInfo["GlueCEImplementationName"] = "ARC"
managerName = exeInfo.pop("MANAGER", "").split(" ", 1)[0].rsplit(":", 1)[1]
managerName = managerName.capitalize() if managerName == "condor" else managerName
queueName = "nordugrid-%s-%s" % (managerName, shareInfoDict["GLUE2ComputingShareMappingQueue"])
ceName = shareInfoDict["GLUE2ShareID"].split("ComputingShare:")[1].split(":")[0]
cesDict.setdefault(ceName, {})
existingQueues = dict(cesDict[ceName].get("Queues", {}))
existingQueues[queueName] = queueInfo
ceInfo["Queues"] = existingQueues
cesDict[ceName].update(ceInfo)
except Exception:
sLog.error("Exception in ARC part for site:", siteName)
return S_OK(siteDict)
def __getGlue2ExecutionEnvironmentInfo(host, executionEnvironments):
"""Find all the executionEnvironments.
:param str host: BDII host to query
:param list executionEnvironments: list of the execution environments to get some information from
:returns: result of the ldapsearch for all executionEnvironments, Glue2 schema
"""
listOfValues = []
# break up to avoid argument list too long, it started failing at about 1900 entries
for exeEnvs in breakListIntoChunks(executionEnvironments, 1000):
exeFilter = ""
for execEnv in exeEnvs:
exeFilter += "(GLUE2ResourceID=%s)" % execEnv
filt = "(&(objectClass=GLUE2ExecutionEnvironment)(|%s))" % exeFilter
response = ldapsearchBDII(filt=filt, attr=None, host=host, base="o=glue", selectionString="GLUE2")
if not response["OK"]:
return response
if not response["Value"]:
sLog.error("No information found for %s" % executionEnvironments)
continue
listOfValues += response["Value"]
if not listOfValues:
return S_ERROR("No information found for executionEnvironments")
return S_OK(listOfValues)
def __getGlue2ExecutionEnvironmentInfoForSite(sitename, foreignKeys, exeInfos):
"""Get the information about the execution environment for a specific site or ce or something.
:param str sitename: Name of the site we are looking at
:param list foreignKeys: list of ExecutionEnvironmentForeignkeys linked by the site
:param list exeInfos: bdii list of dictionaries containing all the ExecutionEnvironment information for all sites
:return: Dictionary with the information as required by the Bdii2CSagent for this site
"""
# filter those that we want
exeInfos = [exeInfo for exeInfo in exeInfos if exeInfo["attr"]["GLUE2ResourceID"] in foreignKeys]
# take the CE with the lowest MainMemory
exeInfo = sorted(exeInfos, key=lambda k: int(k["attr"]["GLUE2ExecutionEnvironmentMainMemorySize"]))
if not exeInfo:
sLog.error(
"SCHEMA PROBLEM: Did not find execution info for site", sitename + " and keys: " + " ".join(foreignKeys)
)
return S_OK()
sLog.debug("Found ExecutionEnvironments", pformat(exeInfo[0]))
exeInfo = exeInfo[0]["attr"] # pylint: disable=unsubscriptable-object
maxRam = exeInfo.get("GLUE2ExecutionEnvironmentMainMemorySize", "")
architecture = exeInfo.get("GLUE2ExecutionEnvironmentPlatform", "")
architecture = "x86_64" if architecture == "amd64" else architecture
architecture = "x86_64" if architecture == "UNDEFINEDVALUE" else architecture
architecture = "x86_64" if "Intel(R) Xeon(R)" in architecture else architecture
osFamily = exeInfo.get("GLUE2ExecutionEnvironmentOSFamily", "") # e.g. linux
osName = exeInfo.get("GLUE2ExecutionEnvironmentOSName", "")
osVersion = exeInfo.get("GLUE2ExecutionEnvironmentOSVersion", "")
manager = exeInfo.get("GLUE2ExecutionEnvironmentComputingManagerForeignKey", "manager:unknownBatchSystem")
# translate to Glue1 like keys, because that is used later on
infoDict = {
"GlueHostMainMemoryRAMSize": maxRam,
"GlueHostOperatingSystemVersion": osName,
"GlueHostOperatingSystemName": osFamily,
"GlueHostOperatingSystemRelease": osVersion,
"GlueHostArchitecturePlatformType": architecture.lower(),
"GlueHostBenchmarkSI00": "2500", # needed for the queue to be used by the sitedirector
"MANAGER": manager, # to create the ARC QueueName mostly
}
return S_OK(infoDict)
|
ic-hep/DIRAC
|
src/DIRAC/Core/Utilities/Glue2.py
|
Python
|
gpl-3.0
| 16,119
|
[
"DIRAC"
] |
60af180f62d952e94a7bbbff320aade90e748d6e7c0b60b6b44a64a4c80d865f
|
# -*- coding: utf-8 -*-
#
# MNE documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 11 10:45:48 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import os.path as op
from datetime import date
import sphinxgallery
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curdir = op.dirname(__file__)
sys.path.append(op.abspath(op.join(curdir, '..', 'mne')))
sys.path.append(op.abspath(op.join(curdir, 'sphinxext')))
import mne
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
import numpy_ext.numpydoc
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.pngmath',
'sphinx.ext.mathjax',
'numpy_ext.numpydoc',
# 'sphinx.ext.intersphinx',
# 'flow_diagram',
'sphinxgallery.gen_gallery']
autosummary_generate = True
autodoc_default_flags = ['inherited-members']
# extensions = ['sphinx.ext.autodoc',
# 'sphinx.ext.doctest',
# 'sphinx.ext.todo',
# 'sphinx.ext.pngmath',
# 'sphinx.ext.inheritance_diagram',
# 'numpydoc',
# 'ipython_console_highlighting',
# 'only_directives']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MNE'
copyright = u'2012-%s, MNE Developers' % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = mne.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = ['config_doc.rst']
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
exclude_patterns = ['source/generated']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['mne.']
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'navbar_title': ' ',
'source_link_position': "footer",
'bootswatch_theme': "flatly",
'navbar_sidebarrel': False,
'bootstrap_version': "3",
'navbar_links': [("Tutorials", "tutorials"),
("Gallery", "auto_examples/index"),
("Manual", "manual/index"),
("API", "python_reference"),
("FAQ", "faq"),
("Cite", "cite"),
],
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/mne_logo_small.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static', '_images', sphinxgallery.glr_path_static()]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# variables to pass to HTML templating engine
build_dev_html = bool(int(os.environ.get('BUILD_DEV_HTML', False)))
html_context = {'use_google_analytics': True, 'use_twitter': True,
'use_media_buttons': True, 'build_dev_html': build_dev_html}
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'mne-doc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
# ('index', 'MNE.tex', u'MNE Manual',
# u'MNE Contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = True
trim_doctests_flags = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
sphinxgallery_conf = {
'examples_dirs' : ['../examples', '../tutorials'],
'gallery_dirs' : ['auto_examples', 'auto_tutorials'],
'doc_module': ('sphinxgallery', 'numpy'),
'reference_url': {
'mne': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
'mayavi': 'http://docs.enthought.com/mayavi/mayavi'},
'find_mayavi_figures': True,
'default_thumb_file': '_static/mne_helmet.png',
}
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
python-packages/mne-python-0.10/doc/conf.py
|
Python
|
bsd-3-clause
| 9,408
|
[
"Mayavi"
] |
dfcedb18eb4b690cbb8d30957a8ff6b48b0beb8ee5f0851b6ca99acf786eb823
|
# wxapp.py
#
# Copyright 2009 dan collins <quaninux@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# generated by wxGlade 0.6.3 on Mon Sep 15 19:43:15 2008
import wx
import os
#~ from matplotlib import use
#~ use('WXAgg')
#~ from pylab import *
#~ from pdf2py import data,channel,listparse, pdf, readwrite, tapbuild, lA2array, headshape
#~ from mri import img, viewmri, transform, mr2vtk, vtkview, sourcesolution2img,pydicom, mr2nifti
#~ from nifti import *
#~ from gui import file
#~
#~ from meg import megcontour, offset, leadfield, timef, \
#~ plotvtk, sensors, sourcespaceprojection, trigger, averager, epoch, fftmeg, badchannels, \
#~ density
from numpy import array, append, size, shape
#~ from scipy import ndimage
#~ #from rpy import *
#~ from meg import weightfit
import time
from time import sleep
import subprocess
#~ from meg import dbscan
#~ from pdf2py import tap, channel
#~ from mswtools import projectdu
# begin wxGlade: extracode
# end wxGlade
import sys
# end of class frame
class MyFrameDENSITY(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrameDENSITY.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.label_27 = wx.StaticText(self, -1, "--Dipole Density Setup--")
self.GOF = wx.StaticText(self, -1, "GOF scale:")
self.gofval = wx.TextCtrl(self, -1, ".8")
self.Sigma = wx.StaticText(self, -1, "Sigma:")
self.sigmaval = wx.TextCtrl(self, -1, "3")
self.LPA = wx.StaticText(self, -1, "LPA:")
self.lpa_loc = wx.StaticText(self, -1, "[NA,NA,NA]")
self.RPA = wx.StaticText(self, -1, "RPA:")
self.rpa_loc = wx.StaticText(self, -1, "[NA,NA,NA]")
self.NAS = wx.StaticText(self, -1, "NAS:")
self.nas_loc = wx.StaticText(self, -1, "[NA,NA,NA]")
self.numofdips = wx.StaticText(self, -1, "Num of Dipoles:")
self.numdipolesval = wx.StaticText(self, -1, "No Dipoles Loaded")
self.apply = wx.Button(self, wx.ID_APPLY, "")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.run, self.apply)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrameDENSITY.__set_properties
self.SetTitle("Dipole Density")
# end wxGlade
self.Bind(wx.EVT_CLOSE, self.OnClose)
def __do_layout(self):
# begin wxGlade: MyFrameDENSITY.__do_layout
sizer_41 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_5 = wx.GridSizer(1, 2, 2, 2)
grid_sizer_6 = wx.GridSizer(4, 2, 0, 0)
grid_sizer_7 = wx.FlexGridSizer(3, 2, 2, 2)
sizer_41.Add(self.label_27, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_7.Add(self.GOF, 0, 0, 0)
grid_sizer_7.Add(self.gofval, 0, 0, 0)
grid_sizer_7.Add(self.Sigma, 0, 0, 0)
grid_sizer_7.Add(self.sigmaval, 0, 0, 0)
grid_sizer_7.AddGrowableRow(0)
grid_sizer_5.Add(grid_sizer_7, 1, wx.EXPAND, 0)
grid_sizer_6.Add(self.LPA, 0, 0, 0)
grid_sizer_6.Add(self.lpa_loc, 0, 0, 0)
grid_sizer_6.Add(self.RPA, 0, 0, 0)
grid_sizer_6.Add(self.rpa_loc, 0, 0, 0)
grid_sizer_6.Add(self.NAS, 0, 0, 0)
grid_sizer_6.Add(self.nas_loc, 0, 0, 0)
grid_sizer_6.Add(self.numofdips, 0, 0, 0)
grid_sizer_6.Add(self.numdipolesval, 0, 0, 0)
grid_sizer_5.Add(grid_sizer_6, 1, wx.EXPAND, 0)
sizer_41.Add(grid_sizer_5, 1, wx.EXPAND, 12)
sizer_41.Add(self.apply, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
self.SetSizer(sizer_41)
sizer_41.Fit(self)
self.Layout()
# end wxGlade
def OnClose(self, event):
print 'closing win'
self.Hide()
def run(self, event): # wxGlade: MyFrameDENSITY.<event_handler>
from pdf2py import readwrite
from meg import density
from mri import transform
from scipy import ndimage
from nifti import NiftiImage
from numpy import float32, int16
print "Event handler `run'"
print 'dipoles', frame1.points
report = {}
#self.points = array([[0,0,0],[10,0,0],[0,20,0]])#DEBUG-----------------
xyz = transform.meg2mri(frame1.lpa,frame1.rpa,frame1.nas, dipole=frame1.points)
print xyz
print frame1.points
readwrite.writedata(xyz, os.path.dirname(frame1.mripath)+'/'+'xyz')
print 'lpa, rpa, nas', frame1.lpa, frame1.rpa, frame1.nas
print frame1.mr.pixdim
print frame1.VoxDim
xyzscaled = (xyz/frame1.VoxDim).T
print xyzscaled
d = density.calc(xyz)
gofscale = float32(self.gofval.GetValue())
print 'gofscale',gofscale
s= frame1.gof-gofscale
sf=(1/(1-gofscale))*s
ds = d*sf
#ds = d #DEBUG-----------------
report['points'] = frame1.points
report['gof'] = frame1.gof
report['density'] = ds
readwrite.writedata(report, os.path.dirname(frame1.mripath)+'/'+'DensityReport')
z = density.val2img(frame1.mr.data, ds, xyzscaled)
sigma = float32(self.sigmaval.GetValue())
print 'sigma',sigma
#sigma = 3
print 'filtering 1st dimension'
f = ndimage.gaussian_filter1d(z, sigma*1/frame1.VoxDim[0], axis=0)
print 'filtering 2nd dimension'
f = ndimage.gaussian_filter1d(f, sigma*1/frame1.VoxDim[1], axis=1)
print 'filtering 3rd dimension'
f = ndimage.gaussian_filter1d(f, sigma*1/frame1.VoxDim[2], axis=2)
scaledf = int16((z.max()/f.max())*f*1000)
#scaledf = ((z.max()/f.max())*f*1000)
print 'writing nifti output image'
overlay = NiftiImage(int16(scaledf))
#overlay = NiftiImage((scaledf))
overlay.setDescription(frame1.mr.description)
overlay.setFilename(frame1.mr.filename+'dd')
overlay.setQForm(frame1.mr.getQForm())
text = "Select a save file name"
suffix='*dd.nii.gz'; filter='*dd.nii.gz'
dialog = wx.FileDialog(None, text, os.getcwd(), suffix, filter, wx.SAVE)
if dialog.ShowModal() == wx.ID_OK:
fn = (dialog.GetPaths())
print fn
overlay.save(str(fn[0]))
else:
print 'Nothing was choosen'
dialog.Destroy()
# end of class MyFrameDENSITY
class Guage(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: Guage.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.gauge_1 = wx.Gauge(self, -1, 10, style=wx.GA_HORIZONTAL|wx.GA_SMOOTH)
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: Guage.__set_properties
self.SetTitle("guage")
self.gauge_1.SetMinSize((400, 20))
# end wxGlade
def __do_layout(self):
# begin wxGlade: Guage.__do_layout
sizer_36 = wx.BoxSizer(wx.VERTICAL)
sizer_36.Add(self.gauge_1, 42, 0, 0)
self.SetSizer(sizer_36)
sizer_36.Fit(self)
self.Layout()
# end wxGlade
def start(self, value):
max = value
app = wx.PySimpleApp()
dlg = wx.ProgressDialog("Progress dialog","test",maximum = max,style = wx.PD_ELAPSED_TIME| wx.PD_ESTIMATED_TIME| wx.PD_REMAINING_TIME)
keepGoing = True
skip = False
count = 0
while keepGoing and count < max:
count += 1
#wx.MilliSleep(1000)
#time.sleep(1)
newtext = "(before) count: %s, index: %s, skip: %s " % \
(count, keepGoing, skip)
#print newtext
(keepGoing, skip) = dlg.Update(count, newtext)
newtext = "(after) count: %s, index: %s, skip: %s " % \
(count, keepGoing, skip)
#print newtext
dlg.Destroy()
# end of class Guage
class MyFramePROJECTUTILS(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFramePROJECTUTILS.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.frame_ProjectUtils_statusbar = self.CreateStatusBar(1, 0)
self.tree_ctrl_2 = wx.TreeCtrl(self, -1, style=wx.TR_HAS_BUTTONS|wx.TR_NO_LINES|wx.TR_DEFAULT_STYLE|wx.SUNKEN_BORDER)
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFramePROJECTUTILS.__set_properties
self.SetTitle("Project UTILS")
self.frame_ProjectUtils_statusbar.SetStatusWidths([-1])
# statusbar fields
frame_ProjectUtils_statusbar_fields = ["statusbar"]
for i in range(len(frame_ProjectUtils_statusbar_fields)):
self.frame_ProjectUtils_statusbar.SetStatusText(frame_ProjectUtils_statusbar_fields[i], i)
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFramePROJECTUTILS.__do_layout
sizer_39 = wx.BoxSizer(wx.VERTICAL)
sizer_40 = wx.BoxSizer(wx.HORIZONTAL)
sizer_40.Add(self.tree_ctrl_2, 1, wx.EXPAND, 0)
sizer_39.Add(sizer_40, 1, wx.EXPAND, 0)
self.SetSizer(sizer_39)
sizer_39.Fit(self)
self.Layout()
# end wxGlade
# end of class MyFramePROJECTUTILS
class TAPWIN(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: TAPWIN.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
# Menu Bar
self.frameTAPWIN_menubar = wx.MenuBar()
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(1, "Generate-Template", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(2, "Save-Template", "", wx.ITEM_NORMAL)
self.frameTAPWIN_menubar.Append(wxglade_tmp_menu, "File")
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(3, "Avg Contour", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(4, "Avg Projection", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(wx.NewId(), "item", "", wx.ITEM_NORMAL)
self.frameTAPWIN_menubar.Append(wxglade_tmp_menu, "View")
self.SetMenuBar(self.frameTAPWIN_menubar)
# Menu Bar end
self.frameTAPWIN_statusbar = self.CreateStatusBar(1, 0)
self.label_01 = wx.StaticText(self, -1, "template")
self.choice_1 = wx.Choice(self, -1, choices=[])
self.button_27 = wx.Button(self, -1, "Get Posted Run")
self.radio_box_3 = wx.RadioBox(self, -1, "realtime view", choices=["average", "continious"], majorDimension=1, style=wx.RA_SPECIFY_ROWS)
self.text_ctrl_13 = wx.TextCtrl(self, -1, "")
self.label_23 = wx.StaticText(self, -1, "number of samples\n after trigger to epoch")
self.static_line_6 = wx.StaticLine(self, -1)
self.label_26 = wx.StaticText(self, -1, "Display Parameters")
self.checkbox_11 = wx.CheckBox(self, -1, "contour")
self.text_ctrl_14 = wx.TextCtrl(self, -1, "")
self.checkbox_13 = wx.CheckBox(self, -1, "source projection")
self.text_ctrl_15 = wx.TextCtrl(self, -1, "")
self.checkbox_12 = wx.CheckBox(self, -1, "butterfly")
self.checkbox_14 = wx.CheckBox(self, -1, "current source density")
self.button_28 = wx.ToggleButton(self, -1, "Tap Data")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_MENU, self.maketemplate, id=1)
self.Bind(wx.EVT_MENU, self.savetemplate, id=2)
self.Bind(wx.EVT_MENU, self.avgcontour, id=3)
self.Bind(wx.EVT_MENU, self.avgprojection, id=4)
self.Bind(wx.EVT_CHOICE, self.selchoice, self.choice_1)
self.Bind(wx.EVT_BUTTON, self.getposted, self.button_27)
self.Bind(wx.EVT_TOGGLEBUTTON, self.tapit, self.button_28)
# end wxGlade
def __set_properties(self):
# begin wxGlade: TAPWIN.__set_properties
self.SetTitle("frameTAPWIN")
self.frameTAPWIN_statusbar.SetStatusWidths([-1])
# statusbar fields
frameTAPWIN_statusbar_fields = ["statusbar"]
for i in range(len(frameTAPWIN_statusbar_fields)):
self.frameTAPWIN_statusbar.SetStatusText(frameTAPWIN_statusbar_fields[i], i)
self.radio_box_3.SetSelection(0)
self.checkbox_11.SetValue(1)
self.text_ctrl_14.SetToolTipString("ms post trigger")
self.checkbox_13.Enable(False)
self.text_ctrl_15.SetToolTipString("ms post trigger")
self.checkbox_12.Enable(False)
self.checkbox_12.SetValue(1)
self.checkbox_14.Enable(False)
self.checkbox_14.SetValue(1)
# end wxGlade
self.Bind(wx.EVT_CLOSE, self.OnClose)
#self.scantemplates = dbscan.run()
try:
pass
#self.choice_1.SetItems(self.scantemplates)
except TypeError:
print 'something wrong with your msw data dir structure'
def __do_layout(self):
# begin wxGlade: TAPWIN.__do_layout
sizer_31 = wx.BoxSizer(wx.VERTICAL)
sizer_38_copy_1_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_38_copy_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_38_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_38 = wx.BoxSizer(wx.HORIZONTAL)
sizer_37 = wx.BoxSizer(wx.HORIZONTAL)
sizer_35 = wx.BoxSizer(wx.HORIZONTAL)
sizer_35.Add(self.label_01, 0, wx.ALL, 15)
sizer_35.Add(self.choice_1, 0, wx.ALL, 16)
sizer_31.Add(sizer_35, 1, wx.EXPAND, 0)
sizer_31.Add(self.button_27, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_31.Add(self.radio_box_3, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_37.Add(self.text_ctrl_13, 0, 0, 0)
sizer_37.Add(self.label_23, 0, 0, 0)
sizer_31.Add(sizer_37, 1, wx.EXPAND, 0)
sizer_31.Add(self.static_line_6, 0, wx.EXPAND, 0)
sizer_31.Add(self.label_26, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 11)
sizer_38.Add(self.checkbox_11, 0, 0, 0)
sizer_38.Add(self.text_ctrl_14, 0, 0, 0)
sizer_31.Add(sizer_38, 0, wx.ALIGN_CENTER_VERTICAL|wx.SHAPED, 0)
sizer_38_copy.Add(self.checkbox_13, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_38_copy.Add(self.text_ctrl_15, 0, wx.ALIGN_RIGHT|wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_31.Add(sizer_38_copy, 0, 0, 0)
sizer_38_copy_1.Add(self.checkbox_12, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_31.Add(sizer_38_copy_1, 0, 0, 0)
sizer_38_copy_1_copy.Add(self.checkbox_14, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_31.Add(sizer_38_copy_1_copy, 0, 0, 0)
sizer_31.Add(self.button_28, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 0)
self.SetSizer(sizer_31)
sizer_31.Fit(self)
self.Layout()
# end wxGlade
def OnClose(self, event):
print 'closing win'
self.Hide()
def maketemplate(self, event): # wxGlade: TAPWIN.<event_handler>
print "Event handler `maketemplate' not implemented!"
frame1.openfile(event)
def loadtemplate(self, event): # wxGlade: TAPWIN.<event_handler>
print "Event handler `loadtemplate' not implemented!"
def savetemplate(self, event): # wxGlade: TAPWIN.<event_handler>
from pdf2py import readwrite
print "Event handler `savetemplate' not implemented"
os.environ['pymeg']
taptemplate = readwrite.readdata('taptemplate')
readwrite.writedata()
def selchoice(self, event): # wxGlade: TAPWIN.<event_handler>
print "Event handler `selchoice' "
print 'selecting',self.choice_1.GetStringSelection()
print self.scantemplates[str(self.choice_1.GetStringSelection())]
ch = channel.index(self.scantemplates[str(self.choice_1.GetStringSelection())], 'meg')
def getposted(self, event): # wxGlade: TAPWIN.<event_handler>
print "Event handler `getposted' "
p = subprocess.Popen('get_posted_sel', stdout=subprocess.PIPE)
out = p.stdout.readlines()
s = out[0];
print 's', s
self.stage = os.environ['STAGE']
datastring = out[0].strip('\n').replace('/','%').replace('@','/').replace(' ','@')
self.posted = [self.stage+'/data/sam_data0/'+datastring]
print 'posted',self.posted
self.SetStatusText(str(self.posted), 0)
def tapit(self, event): # wxGlade: TAPWIN.<event_handler>
print "Event handler `tapit' "
if self.button_28.GetValue() == True:
template = self.scantemplates[str(self.choice_1.GetStringSelection())]
datafilename = template.split('/')[-1]
print datafilename
ch=channel.index(template, 'meg')
acqfile = self.posted[0]+'/'+datafilename
print 'acqfile', acqfile
tapped = tapbuild.get(acqfile, template)
epochwidth = int(self.text_ctrl_13.GetLineText(0))#20 #number of samples after the trigger you wish to call an epoch
if self.checkbox_11.IsChecked() == True:
if type(int(self.text_ctrl_14.GetLineText(0))) == int:
contour = int(self.text_ctrl_14.GetLineText(0))
print 'contour val', contour
else:
contour = None
print 'no contour'
if self.checkbox_13.IsChecked() == True:
if type(int(self.text_ctrl_15.GetLineText(0))) == int:
sp = int(self.text_ctrl_15.GetLineText(0))
print 'sp val', sp
else:
sp = None
print 'no sp'
tapped.avg(epochwidth, ch, contour=None, butterfly=None, csd=None, sp=None)
def avgcontour(self, event): # wxGlade: TAPWIN.<event_handler>
print "Event handler `avgcontour' not implemented"
event.Skip()
def avgprojection(self, event): # wxGlade: TAPWIN.<event_handler>
print "Event handler `avgprojection' not implemented"
event.Skip()
# end of class TAPWIN
class MyFrameBADCH(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrameBADCH.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.frameBADCH_statusbar = self.CreateStatusBar(1, 0)
self.label_1 = wx.StaticText(self, -1, "Frequency based bad channel detection. \n Calculate FFT first on channels of interest.")
self.static_line_5 = wx.StaticLine(self, -1)
self.label_25 = wx.StaticText(self, -1, "Difference Ratio:")
self.text_ctrl_12 = wx.TextCtrl(self, -1, "2")
self.checkbox_2 = wx.CheckBox(self, -1, "HighPass Cutoff:")
self.text_ctrl_2 = wx.TextCtrl(self, -1, "3")
self.label_2 = wx.StaticText(self, -1, "hz")
self.checkbox_3 = wx.CheckBox(self, -1, "LowPass Cutoff:")
self.text_ctrl_3 = wx.TextCtrl(self, -1, "200")
self.label_3 = wx.StaticText(self, -1, "hz")
self.checkbox_4 = wx.CheckBox(self, -1, "PowerLine Notch:")
self.label_24 = wx.StaticText(self, -1, "60,120,180")
self.label_3_copy = wx.StaticText(self, -1, "hz")
self.button_25 = wx.Button(self, -1, "Calculate Bad Channels")
self.list_box_3 = wx.ListBox(self, -1, choices=["No Bad Channels Calculated"], style=wx.LB_HSCROLL)
self.button_26 = wx.Button(self, -1, "Remove Bad Channels from Selected")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.calcbadch, self.button_25)
self.Bind(wx.EVT_BUTTON, self.removechsel, self.button_26)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrameBADCH.__set_properties
self.SetTitle("frameBADCH")
self.SetBackgroundColour(wx.Colour(143, 143, 188))
self.frameBADCH_statusbar.SetStatusWidths([-1])
# statusbar fields
frameBADCH_statusbar_fields = ["no bad channels removed"]
for i in range(len(frameBADCH_statusbar_fields)):
self.frameBADCH_statusbar.SetStatusText(frameBADCH_statusbar_fields[i], i)
self.checkbox_2.SetValue(1)
self.checkbox_3.SetValue(1)
self.checkbox_4.SetValue(1)
self.list_box_3.SetMinSize((120, 163))
self.list_box_3.SetSelection(0)
self.button_26.Enable(False)
# end wxGlade
self.Bind(wx.EVT_CLOSE, self.OnClose)
def __do_layout(self):
# begin wxGlade: MyFrameBADCH.__do_layout
sizer_32 = wx.BoxSizer(wx.VERTICAL)
sizer_33_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_33_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_33 = wx.BoxSizer(wx.HORIZONTAL)
sizer_34 = wx.BoxSizer(wx.HORIZONTAL)
sizer_32.Add(self.label_1, 0, wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_32.Add(self.static_line_5, 0, wx.EXPAND, 0)
sizer_34.Add(self.label_25, 0, 0, 0)
sizer_34.Add(self.text_ctrl_12, 0, 0, 0)
sizer_32.Add(sizer_34, 1, wx.EXPAND, 0)
sizer_33.Add(self.checkbox_2, 0, 0, 0)
sizer_33.Add(self.text_ctrl_2, 0, 0, 0)
sizer_33.Add(self.label_2, 0, 0, 0)
sizer_32.Add(sizer_33, 1, wx.EXPAND, 0)
sizer_33_copy.Add(self.checkbox_3, 0, 0, 0)
sizer_33_copy.Add(self.text_ctrl_3, 0, 0, 0)
sizer_33_copy.Add(self.label_3, 0, 0, 0)
sizer_32.Add(sizer_33_copy, 1, wx.EXPAND, 0)
sizer_33_copy_copy.Add(self.checkbox_4, 0, 0, 0)
sizer_33_copy_copy.Add(self.label_24, 0, 0, 0)
sizer_33_copy_copy.Add(self.label_3_copy, 0, 0, 0)
sizer_32.Add(sizer_33_copy_copy, 1, wx.EXPAND, 0)
sizer_32.Add(self.button_25, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_32.Add(self.list_box_3, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_32.Add(self.button_26, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
self.SetSizer(sizer_32)
sizer_32.Fit(self)
self.Layout()
# end wxGlade
def OnClose(self, event):
print 'closing win'
self.Hide()
def calcbadch(self, event): # wxGlade: MyFrameBADCH.<event_handler>
from meg import badchannels
print "Event handler `calcbadch'"
try:
frame1.fftpow
except AttributeError:
dlg = wx.MessageDialog(self, 'First Calculate FFT.', 'fft error', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
return
thresh = int(self.text_ctrl_12.GetLineText(0))
if self.checkbox_2.IsChecked() == True:
minhz = int(self.text_ctrl_2.GetLineText(0))
print minhz
if self.checkbox_3.IsChecked() == True:
maxhz = int(self.text_ctrl_3.GetLineText(0))
print maxhz
if self.checkbox_4.IsChecked() == True:
powernotch = 'yes'
else:
powernotch = 'no'
#data,frame1.badch,badmat,badmax= badchannels.calc(frame1.datapdf, frame1.fftpow, frame1.ch, thresh=thresh, freqarray=frame1.fftfreqs, minhz=minhz, maxhz=maxhz, powernotch=powernotch)
bad = badchannels.calc(frame1.datapdf, frame1.fftpow, frame1.ch, thresh=thresh, freqarray=frame1.fftfreqs, minhz=minhz, maxhz=maxhz, powernotch=powernotch)
frame1.badch = bad['badch']
self.list_box_3.SetItems(frame1.badch)
self.button_26.Enable(True)
def removechsel(self, event): # wxGlade: MyFrameBADCH.<event_handler>
print "Event handler `removechsel' "
for c in range(0,size(frame1.badch)):
frame1.d.ch2keep(frame1.ch.channelsortedlabels != frame1.badch[c])
frame1.ch.channelsortedlabels = frame1.ch.channelsortedlabels[frame1.ch.channelsortedlabels != frame1.badch[c]]
frame1.chantypeind = frame1.chantypeind[frame1.ch.channelsortedlabels != frame1.badch[c]]
frame1.chanlabel = frame1.chanlabel[frame1.ch.channelsortedlabels != frame1.badch[c]]
frame1.d.numofchannels = size(frame1.ch.channelsortedlabels)
frame1.fftpow = frame1.fftpow[:,frame1.ch.channelsortedlabels != frame1.badch[c]]
frame1.ch.chanlocs = frame1.ch.chanlocs[:, frame1.ch.channelsortedlabels != frame1.badch[c]]
frame1.data_block = frame1.d.data_block
print shape(eval('frame1.'+frame1.selitem))
self.SetStatusText("removed channels from: %s" % frame1.selitem)
# end of class MyFrameBADCH
class MyFrameFFT(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrameFFT.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.frameFFT_statusbar = self.CreateStatusBar(2, 0)
self.button_18 = wx.Button(self, -1, "Get Selected Data")
self.radio_box_1 = wx.RadioBox(self, -1, "FFT from...", choices=["Single Channel", "All Channels", "ICA Component", "Projection"], majorDimension=0, style=wx.RA_SPECIFY_ROWS)
self.list_box_2 = wx.ListBox(self, -1, choices=["test", "test2"], style=wx.LB_MULTIPLE|wx.LB_EXTENDED|wx.LB_NEEDED_SB)
self.label_21 = wx.StaticText(self, -1, "number of epochs")
self.text_ctrl_11 = wx.TextCtrl(self, -1, "")
self.button_19 = wx.Button(self, -1, "run fft")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_RADIOBOX, self.getselected, self.radio_box_1)
self.Bind(wx.EVT_LISTBOX_DCLICK, self.getclick, self.list_box_2)
self.Bind(wx.EVT_LISTBOX, self.getclick, self.list_box_2)
self.Bind(wx.EVT_BUTTON, self.runfft, self.button_19)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrameFFT.__set_properties
self.SetTitle("FFT config")
self.SetSize((295, 302))
self.frameFFT_statusbar.SetStatusWidths([-1, -1])
# statusbar fields
frameFFT_statusbar_fields = ["No Data Selected:", "No Items Selected:"]
for i in range(len(frameFFT_statusbar_fields)):
self.frameFFT_statusbar.SetStatusText(frameFFT_statusbar_fields[i], i)
self.button_18.Enable(False)
self.button_18.Hide()
self.radio_box_1.SetSelection(0)
self.list_box_2.SetSelection(0)
# end wxGlade
self.Bind(wx.EVT_CLOSE, self.OnClose)
def __do_layout(self):
# begin wxGlade: MyFrameFFT.__do_layout
sizer_23 = wx.BoxSizer(wx.VERTICAL)
sizer_26 = wx.BoxSizer(wx.HORIZONTAL)
sizer_23.Add(self.button_18, 0, 0, 0)
sizer_26.Add(self.radio_box_1, 0, 0, 0)
sizer_26.Add(self.list_box_2, 0, wx.ALL|wx.EXPAND, 10)
sizer_23.Add(sizer_26, 1, wx.EXPAND, 0)
sizer_23.Add(self.label_21, 0, 0, 0)
sizer_23.Add(self.text_ctrl_11, 0, wx.BOTTOM, 20)
sizer_23.Add(self.button_19, 0, 0, 0)
self.SetSizer(sizer_23)
self.Layout()
# end wxGlade
def OnClose(self, event):
print 'closing win'
self.Hide()
def getselecteddata(self, event): # wxGlade: MyFrameFFT.<event_handler>
print "Event handler `getseleted'"
self.SetStatusText("You selected data: %s" % frame1.selitem, 0)
def getselected(self, event): # wxGlade: MyFrameFFT.<event_handler>
print "Event handler `getseleted'"
try:
frame1.selitem
self.list_box_2.SetItems(frame1.chanlabel)
except AttributeError:
print 'no data selected'
dlg = wx.MessageDialog(self, 'Select some data first from workspace.', 'data select error', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
return
self.srate = 1/frame1.p.hdr.header_data.sample_period
print 'srate of file',self.srate
if self.radio_box_1.GetSelection() == 0: #single channel
self.list_box_2.Enable(True)
self.list_box_2.SetSelection(1, select=True)
self.fftdata = eval('frame1.'+frame1.selitem)[:,frame1.chanlabel == str(frame1.listitem)]
print shape(self.fftdata)
if self.radio_box_1.GetSelection() == 1: #all channels
self.list_box_2.Enable(False)
print 'fft of all channels from file', str(frame1.selitem)
self.fftdata = eval('frame1.'+frame1.selitem)[:,:]
print shape(self.fftdata)
if self.radio_box_1.GetSelection() == 2: #ica
pass
if self.radio_box_1.GetSelection() == 3: #projection
pass
self.SetStatusText("You selected: %s" % frame1.selitem)
self.list_box_2.SetItems(frame1.chanlabel)
def getclick(self, event): # wxGlade: MyFrameFFT.<event_handler>
print "Event handler `getclick' "
self.listselecteditem = list(self.list_box_2.GetSelections())
print type(self.listselecteditem), self.listselecteditem
self.SetStatusText("You selected: %s" % str(self.listselecteditem), 1)
print 'fft of', frame1.chanlabel[self.listselecteditem]
self.fftdata = eval('frame1.'+frame1.selitem)[:,self.listselecteditem]
def runfft(self, event): # wxGlade: MyFrameFFT.<event_handler>
from meg import fftmeg
print "Event handler `runfft'"
print self.text_ctrl_11.GetLineText(0)
if self.text_ctrl_11.GetLineText(0) != '':
epochs = int(eval(self.text_ctrl_11.GetLineText(0)))
else:
print 'fftepochs',str(eval('frame1.'+frame1.selitem+'epochs'))
epochs=eval('frame1.'+frame1.selitem+'epochs')
print 'num of epochs',epochs
fft = fftmeg.calc(self.fftdata, self.srate, epochs=epochs)
frame1.fftpow = fft.pow
frame1.fftfreqs = fft.freq
frame1.FFT = frame1.tree_ctrl_1.AppendItem(frame1.PROCESSES, 'fftpow')
print shape(self.fftdata)
self.Hide()
# end of class MyFrameFFT
class MyFrame2DPLOT(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame2DPLOT.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.frame2DPLOT_statusbar = self.CreateStatusBar(1, 0)
self.label_28 = wx.StaticText(self, -1, "Plot Data Type")
self.checkbox_1 = wx.CheckBox(self, -1, "Signal")
self.checkbox_2 = wx.CheckBox(self, -1, "Reference")
self.checkbox_3 = wx.CheckBox(self, -1, "Trigger")
self.checkbox_4 = wx.CheckBox(self, -1, "EEG")
self.checkbox_5 = wx.CheckBox(self, -1, "Utility")
self.checkbox_6 = wx.CheckBox(self, -1, "Derived")
self.checkbox_7 = wx.CheckBox(self, -1, "Shorted")
self.checkbox_8 = wx.CheckBox(self, -1, "Unknown")
self.checkbox_10 = wx.CheckBox(self, -1, "FFT")
self.checkbox_9 = wx.CheckBox(self, -1, "Misc Data")
self.radio_box_2 = wx.RadioBox(self, -1, "Plot Type", choices=["Butterfly", "Spaced"], majorDimension=0, style=wx.RA_SPECIFY_ROWS)
self.button_17 = wx.Button(self, -1, "plot")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.plotselected, self.button_17)
# end wxGlade
self.Bind(wx.EVT_CLOSE, self.OnClose)
def __set_properties(self):
# begin wxGlade: MyFrame2DPLOT.__set_properties
self.SetTitle("Plot Controller")
self.frame2DPLOT_statusbar.SetStatusWidths([-1])
# statusbar fields
frame2DPLOT_statusbar_fields = ["plot statusbar"]
for i in range(len(frame2DPLOT_statusbar_fields)):
self.frame2DPLOT_statusbar.SetStatusText(frame2DPLOT_statusbar_fields[i], i)
self.checkbox_1.Enable(False)
self.checkbox_2.Enable(False)
self.checkbox_3.Enable(False)
self.checkbox_4.Enable(False)
self.checkbox_5.Enable(False)
self.checkbox_6.Enable(False)
self.checkbox_7.Enable(False)
self.checkbox_8.Enable(False)
self.checkbox_10.Enable(False)
self.radio_box_2.SetSelection(0)
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrame2DPLOT.__do_layout
sizer_22 = wx.BoxSizer(wx.HORIZONTAL)
sizer_27 = wx.BoxSizer(wx.VERTICAL)
sizer_24 = wx.BoxSizer(wx.VERTICAL)
sizer_25 = wx.BoxSizer(wx.VERTICAL)
sizer_25.Add(self.label_28, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5)
sizer_25.Add(self.checkbox_1, 0, 0, 0)
sizer_25.Add(self.checkbox_2, 0, 0, 0)
sizer_25.Add(self.checkbox_3, 0, 0, 0)
sizer_25.Add(self.checkbox_4, 0, 0, 0)
sizer_25.Add(self.checkbox_5, 0, 0, 0)
sizer_25.Add(self.checkbox_6, 0, 0, 0)
sizer_25.Add(self.checkbox_7, 0, 0, 0)
sizer_25.Add(self.checkbox_8, 0, 0, 0)
sizer_25.Add(self.checkbox_10, 0, wx.TOP, 10)
sizer_25.Add(self.checkbox_9, 0, wx.BOTTOM, 10)
sizer_22.Add(sizer_25, 1, wx.EXPAND, 0)
sizer_24.Add(self.radio_box_2, 0, wx.TOP|wx.BOTTOM|wx.ALIGN_BOTTOM|wx.ALIGN_CENTER_HORIZONTAL, 29)
sizer_22.Add(sizer_24, 1, wx.EXPAND, 0)
sizer_27.Add(self.button_17, 0, wx.ALL|wx.ALIGN_BOTTOM|wx.ALIGN_CENTER_HORIZONTAL, 9)
sizer_22.Add(sizer_27, 1, wx.EXPAND, 0)
self.SetSizer(sizer_22)
sizer_22.Fit(self)
self.Layout()
# end wxGlade
def OnClose(self, event):
print 'closing win'
#del self.tbIcon
self.Hide()
def typeselect(self, event): # wxGlade: MyFrame2DPLOT.<event_handler>
print "Event handler `typeselect' "
pass
def checkandclear(self, event): # check and clear
print "Event handler `checkandclear' "
pass
def plotselected(self, event): # wxGlade: MyFrame2DPLOT.<event_handler>
print "Event handler `plotselected'"
from pylab import figure,plot,show, subplot, connect, subplots_adjust
from meg import fftmeg
ind2plot = [];
data2plot = eval('frame1.'+frame1.selitem)
if size(data2plot,0) > 10000:
print 'too many pnts to plot'
return
self.SetStatusText("selected to plot: %s" % frame1.selitem)
self.Hide()
def event_response(event):
print event.name
print event.xdata
frame1.SetStatusText("You selected: %s" % event.xdata)
indsel = fftmeg.nearest(frame1.timeaxis, event.xdata)
print 'you selected index value',indsel
event.xdata = indsel[0]
frame1.SetPyData(int(event.xdata), data=data)
figure();
try:
numplots = frame1.numplots
except AttributeError:
numplots = 1
if self.checkbox_9.IsChecked() == True:
numplots = 1
print numplots
print 'timeaxis shape',shape(frame1.timeaxis)
for i in range(0, numplots):
startval = (i)*frame1.timeaxis.size
endval = (i+1)*frame1.timeaxis.size
subplot(1,numplots,i+1)
print i,startval,endval
if self.checkbox_1.IsChecked() == True: frame1.chindex = frame1.chantypeind == 'meg'; plot(frame1.timeaxis, data2plot[startval:endval,frame1.chindex]);
if self.checkbox_2.IsChecked() == True: frame1.chindex = frame1.chantypeind == 'ref'; plot(frame1.timeaxis, data2plot[startval:endval,frame1.chindex]);#plot(data2plot[:,frame1.chantypeind == 'ref']);
if self.checkbox_3.IsChecked() == True: frame1.chindex = frame1.chantypeind == 'trig'; plot(frame1.timeaxis, data2plot[startval:endval,frame1.chindex]);#plot(data2plot[:,frame1.chantypeind == 'trig']);
if self.checkbox_4.IsChecked() == True: frame1.chindex = frame1.chantypeind == 'eeg'; plot(frame1.timeaxis, data2plot[startval:endval,frame1.chindex]);#plot(data2plot[:,frame1.chantypeind == 'eeg']);
if self.checkbox_5.IsChecked() == True: frame1.chindex = frame1.chantypeind == 'util'; plot(frame1.timeaxis, data2plot[startval:endval,frame1.chindex]);#plot(data2plot[:,frame1.chantypeind == 'util']);
if self.checkbox_6.IsChecked() == True: frame1.chindex = frame1.chantypeind == 'derived'; plot(frame1.timeaxis, data2plot[startval:endval,frame1.chindex]);#plot(data2plot[:,frame1.chantypeind == 'derived']);
if self.checkbox_7.IsChecked() == True: frame1.chindex = frame1.chantypeind == 'shorted'; plot(frame1.timeaxis, data2plot[startval:endval,frame1.chindex]);#plot(data2plot[:,frame1.chantypeind == 'shorted']);
if self.checkbox_8.IsChecked() == True: frame1.chindex = frame1.chantypeind == 'unknown'; plot(frame1.timeaxis, data2plot[startval:endval,frame1.chindex]);#plot(data2plot[:,frame1.chantypeind == 'unknown']);
if self.checkbox_9.IsChecked() == True: plot(data2plot[:,:]);#plot(data2plot[:,frame1.chantypeind == 'unknown']);
if self.checkbox_10.IsChecked() == True: plot(frame1.timeaxis,data2plot[:,:]);
#if i > 0: setp(gca(), 'yticklabels', [])
cid = connect('button_press_event', event_response)
subplots_adjust(wspace=0)
print 'plotting', frame1.chantypeind
show()
# end of class MyFrame2DPLOT
class MyFrameCHAN(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrameCHAN.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.label_20 = wx.StaticText(self, -1, "Load Channels")
self.signalbutton = wx.ToggleButton(self, -1, "signal")
self.refbutton = wx.ToggleButton(self, -1, "reference")
self.trigbutton = wx.ToggleButton(self, -1, "trigger")
self.eegbutton = wx.ToggleButton(self, -1, "eeg")
self.utilbutton = wx.ToggleButton(self, -1, "utility")
self.derivedbutton = wx.ToggleButton(self, -1, "derived")
self.shortedbutton = wx.ToggleButton(self, -1, "shorted")
self.unknownbutton = wx.ToggleButton(self, -1, "unknown")
self.editlabel = wx.StaticText(self, -1, "Channels")
self.list_ctrl_chlist = wx.ListCtrl(self, -1, style=wx.LC_REPORT|wx.LC_EDIT_LABELS|wx.SUNKEN_BORDER)
self.button_delchans = wx.Button(self, -1, "delete channels")
self.button_24 = wx.Button(self, -1, "clear channels")
self.getchanindices = wx.Button(self, -1, "Load Channels")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_TOGGLEBUTTON, self.getchind, self.signalbutton)
self.Bind(wx.EVT_TOGGLEBUTTON, self.getchind, self.refbutton)
self.Bind(wx.EVT_TOGGLEBUTTON, self.getchind, self.trigbutton)
self.Bind(wx.EVT_TOGGLEBUTTON, self.getchind, self.eegbutton)
self.Bind(wx.EVT_TOGGLEBUTTON, self.getchind, self.utilbutton)
self.Bind(wx.EVT_TOGGLEBUTTON, self.getchind, self.derivedbutton)
self.Bind(wx.EVT_TOGGLEBUTTON, self.getchind, self.shortedbutton)
self.Bind(wx.EVT_TOGGLEBUTTON, self.getchind, self.unknownbutton)
self.Bind(wx.EVT_LIST_DELETE_ITEM, self.delchan, self.list_ctrl_chlist)
self.Bind(wx.EVT_BUTTON, self.delchan, self.button_delchans)
self.Bind(wx.EVT_BUTTON, self.delchannels, self.button_24)
self.Bind(wx.EVT_BUTTON, self.loadchannels, self.getchanindices)
# end wxGlade
self.Bind(wx.EVT_CLOSE, self.OnClose)
def __set_properties(self):
# begin wxGlade: MyFrameCHAN.__set_properties
self.SetTitle("Load Channels")
self.SetBackgroundColour(wx.Colour(143, 143, 188))
self.signalbutton.SetFocus()
self.getchanindices.SetBackgroundColour(wx.Colour(182, 182, 238))
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrameCHAN.__do_layout
sizer_17 = wx.BoxSizer(wx.VERTICAL)
sizer_19 = wx.BoxSizer(wx.HORIZONTAL)
sizer_21 = wx.BoxSizer(wx.VERTICAL)
sizer_20 = wx.BoxSizer(wx.HORIZONTAL)
grid_sizer_4 = wx.GridSizer(8, 1, 0, 0)
sizer_17.Add(self.label_20, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 20)
grid_sizer_4.Add(self.signalbutton, 0, 0, 0)
grid_sizer_4.Add(self.refbutton, 0, 0, 0)
grid_sizer_4.Add(self.trigbutton, 0, 0, 0)
grid_sizer_4.Add(self.eegbutton, 0, 0, 0)
grid_sizer_4.Add(self.utilbutton, 0, 0, 0)
grid_sizer_4.Add(self.derivedbutton, 0, 0, 0)
grid_sizer_4.Add(self.shortedbutton, 0, 0, 0)
grid_sizer_4.Add(self.unknownbutton, 0, 0, 0)
sizer_20.Add(grid_sizer_4, 1, wx.EXPAND, 0)
sizer_19.Add(sizer_20, 1, wx.EXPAND, 0)
sizer_21.Add(self.editlabel, 0, 0, 0)
sizer_21.Add(self.list_ctrl_chlist, 1, wx.EXPAND, 0)
sizer_21.Add(self.button_delchans, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_19.Add(sizer_21, 1, wx.EXPAND, 0)
sizer_17.Add(sizer_19, 1, wx.EXPAND, 0)
sizer_17.Add(self.button_24, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_17.Add(self.getchanindices, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 19)
self.SetSizer(sizer_17)
sizer_17.Fit(self)
self.Layout()
# end wxGlade
self.list_ctrl_chlist.InsertColumn(0, 'Channel')
def OnClose(self, event):
print 'closing win'
self.Hide()
def getchind(self, event, command='none'): # wxGlade: MyFrameCHAN.<event_handler>
from pdf2py import channel
print "Event handler `getchind'"
frame2DPLOT.Show()
#frame2DPLOT.Hide()
frameCHAN.chdict = {};
if self.signalbutton.GetValue() == True:
frame1.meg = channel.index(frame1.datapdf, 'meg'); frameCHAN.chdict['meg'] = 'true'; frame2DPLOT.checkbox_1.Enable(True)
if self.refbutton.GetValue() == True:
frame1.ref = channel.index(frame1.datapdf, 'ref'); frameCHAN.chdict['ref'] = 'true'; frame2DPLOT.checkbox_2.Enable(True)
if self.trigbutton.GetValue() == True:
frame1.trig = channel.index(frame1.datapdf, 'trig'); frameCHAN.chdict['trig'] = 'true'; frame2DPLOT.checkbox_3.Enable(True)
if self.eegbutton.GetValue() == True:
frame1.eeg = channel.index(frame1.datapdf, 'eeg'); frameCHAN.chdict['eeg'] = 'true'; frame2DPLOT.checkbox_4.Enable(True)
if self.utilbutton.GetValue() == True:
frame1.util = channel.index(frame1.datapdf, 'util'); frameCHAN.chdict['util'] = 'true'; frame2DPLOT.checkbox_5.Enable(True)
if self.derivedbutton.GetValue() == True:
frame1.derived = channel.index(frame1.datapdf, 'derived'); frameCHAN.chdict['derived'] = 'true'; frame2DPLOT.checkbox_6.Enable(True)
if self.shortedbutton.GetValue() == True:
frame1.shorted = channel.index(frame1.datapdf, 'shorted'); frameCHAN.chdict['shorted'] = 'true'; frame2DPLOT.checkbox_7.Enable(True)
if self.shortedbutton.GetValue() == True:
frame1.shorted = channel.index(frame1.datapdf, 'shorted'); frameCHAN.chdict['shorted'] = 'true'; frame2DPLOT.checkbox_7.Enable(True)
# for t in range(0, len(frameCHAN.chdict)):
# for i in range(0, len(chanindices[t])):
self.populatechannels()
print 'debug ch list'
print frame1.chanlabel
def populatechannels(self):
try:
frame1.tree_ctrl_1.Delete(frame1.CHANNELS)
except AttributeError:
print 'cant delete item'
chanindices = []; chantype = []; chanlabel = [];
print frameCHAN.chdict
for type in frameCHAN.chdict: #concatanate channels
chanindices.append(eval('frame1.'+type+'.channelindexhdr'))
chantype.append(type)
chanlabel.append(eval('frame1.'+type+'.channelsortedlabels'))
frame1.tree_ctrl_1.AppendItem(frame1.DATA, str(type))
chantypeind = []
chanlabelsind = []
for t in range(0, len(frameCHAN.chdict)):
for i in range(0, len(chanindices[t])):
chantypeind.append(chantype[t])
#chanlabelsind.append(chanlabel[i])
from numpy import hstack
try:
frame1.chanlabel = hstack(chanlabel)
except ValueError:
pass #nothing selected
#frame1.chantype = chantype
chantypeind = array(chantypeind)
frame1.chantypeind = chantypeind
try:
frame1.chanind = hstack(chanindices)
except ValueError:
pass #nothing selected
self.list_ctrl_chlist.DeleteAllItems()
for i in frame1.chanlabel:
index = self.list_ctrl_chlist.InsertStringItem(sys.maxint, str(i))
self.list_ctrl_chlist.SetStringItem(index, 0, i)
def loadchannels(self, event): # wxGlade: MyFrameCHAN.<event_handler>
print "Event handler `loadchannels' "
from pdf2py import channel
#===============================================================================
#
#
#
# try:
# frame1.tree_ctrl_1.Delete(frame1.CHANNELS)
# except AttributeError:
# print 'cant delete item'
#
# #GET CHANNEL INDICES BY TYPE (ex MEG or REF)
# chanindices = []; chantype = []; chanlabel = [];
# print frameCHAN.chdict
# for type in frameCHAN.chdict: #concatanate channels
# chanindices.append(eval('frame1.'+type+'.channelindexhdr'))
# chantype.append(type)
# chanlabel.append(eval('frame1.'+type+'.channelsortedlabels'))
# frame1.tree_ctrl_1.AppendItem(frame1.DATA, str(type))
#
# #locals()[type] = 1
#
#
# chantypeind = []
# chanlabelsind = []
#
# for t in range(0, len(frameCHAN.chdict)):
# for i in range(0, len(chanindices[t])):
# chantypeind.append(chantype[t])
# #chanlabelsind.append(chanlabel[i])
# from numpy import hstack
# frame1.chanlabel = hstack(chanlabel)
# print frame1.chanlabel
#
# frame1.chantype = chantype
# chantypeind = array(chantypeind)
# frame1.chantypeind = chantypeind
# chanind = hstack(chanindices)
#===============================================================================
#READ THE DATA
frame1.d.getdata(0, frame1.d.pnts_in_file, chindex=frame1.chanind)#chindex=frame1.ch.channelindexhdr)
frame1.SetPyData(event, 'd')
frame1.data_block = frame1.d.data_block
frame1.megdataepochs = frame1.d.numofepochs
frame1.timeaxis = frame1.d.wintime
#get meg channels for leadfield
frame1.ch = channel.index(frame1.datapdf, 'meg')
self.Hide()
def delchannels(self, event): # wxGlade: MyFrameCHAN.<event_handler>
print "Event handler `delchannels' "
del frameCHAN.chdict
self.list_ctrl_chlist.DeleteAllItems()
def delchan(self, event): # wxGlade: MyFrameCHAN.<event_handler>
#print self.list_ctrl_chlist.GetSelectedItemCount()
print len(frame1.chanlabel)
for i in range(0, len(frame1.chanlabel)):
if self.list_ctrl_chlist.IsSelected(i):
x = self.list_ctrl_chlist.GetItem(i)
print i,x
self.list_ctrl_chlist.DeleteItem(x).Get()
# i = self.list_ctrl_chlist.GetFirstSelected()
# print i
# self.list_ctrl_chlist.DeleteItem(i)
# for i in range(0, self.list_ctrl_chlist.GetSelectedItemCount()):
# print i
# self.list_ctrl_chlist.DeleteItem(self.list_ctrl_chlist.GetNextSelected(i))
## if self.list_ctrl_chlist.IsSelected(i):
## self.list_ctrl_chlist.DeleteItem(i)
# end of class MyFrameCHAN
class MyFrameCUT(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrameCUT.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.frameCUT_statusbar = self.CreateStatusBar(1, 0)
self.label_16 = wx.StaticText(self, -1, "Resize data relative to ...")
self.combo_box_6 = wx.ComboBox(self, -1, choices=["Epochs", "Trigger"], style=wx.CB_DROPDOWN)
self.resizewhat = wx.StaticText(self, -1, "Resize What...")
self.combo_box_7 = wx.ComboBox(self, -1, choices=[], style=wx.CB_DROPDOWN)
self.static_line_3 = wx.StaticLine(self, -1)
self.label_17 = wx.StaticText(self, -1, "")
self.label_18 = wx.StaticText(self, -1, "window start(ms)")
self.text_ctrl_6 = wx.TextCtrl(self, -1, "")
self.label_19 = wx.StaticText(self, -1, "window end(ms)")
self.text_ctrl_7 = wx.TextCtrl(self, -1, "")
self.button_15 = wx.Button(self, -1, "Epoch DATA")
self.button_16 = wx.Button(self, -1, "Average DATA")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_TEXT, self.cuttype, self.combo_box_6)
self.Bind(wx.EVT_BUTTON, self.epochdata, self.button_15)
self.Bind(wx.EVT_BUTTON, self.averagedata, self.button_16)
# end wxGlade
self.Bind(wx.EVT_CLOSE, self.OnClose)
def __set_properties(self):
# begin wxGlade: MyFrameCUT.__set_properties
self.SetTitle("Resize Data")
self.frameCUT_statusbar.SetStatusWidths([-1])
# statusbar fields
frameCUT_statusbar_fields = ["frameAVG_statusbar"]
for i in range(len(frameCUT_statusbar_fields)):
self.frameCUT_statusbar.SetStatusText(frameCUT_statusbar_fields[i], i)
self.combo_box_6.SetToolTipString("select method to average. Epoch means file is already epoched")
self.combo_box_6.SetSelection(-1)
self.label_17.SetMinSize((300, 157))
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrameCUT.__do_layout
sizer_13 = wx.BoxSizer(wx.VERTICAL)
sizer_18 = wx.BoxSizer(wx.HORIZONTAL)
grid_sizer_3 = wx.GridSizer(2, 2, 2, 2)
sizer_13.Add(self.label_16, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 12)
sizer_13.Add(self.combo_box_6, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5)
sizer_13.Add(self.resizewhat, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_13.Add(self.combo_box_7, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_13.Add(self.static_line_3, 0, wx.EXPAND, 0)
sizer_13.Add(self.label_17, 0, wx.TOP|wx.ALIGN_CENTER_HORIZONTAL, 17)
grid_sizer_3.Add(self.label_18, 0, wx.EXPAND, 0)
grid_sizer_3.Add(self.text_ctrl_6, 0, wx.EXPAND, 0)
grid_sizer_3.Add(self.label_19, 0, wx.EXPAND, 0)
grid_sizer_3.Add(self.text_ctrl_7, 0, wx.EXPAND, 0)
sizer_13.Add(grid_sizer_3, 1, wx.EXPAND, 0)
sizer_18.Add(self.button_15, 0, wx.ALL, 11)
sizer_18.Add(self.button_16, 0, wx.ALL, 11)
sizer_13.Add(sizer_18, 1, wx.EXPAND, 0)
self.SetSizer(sizer_13)
sizer_13.Fit(self)
self.Layout()
# end wxGlade
def OnClose(self, event):
print 'closing win'
self.Hide()
def cuttype(self, event): # wxGlade: MyFrameCUT.<event_handler>
print "Event handler `cuttype' not implemented"
self.populatebox()
self.sel = self.combo_box_6.GetStringSelection()
if frame1.p.hdr.header_data.total_epochs[0] > 1: #epoched
self.epochduration = frame1.p.hdr.epoch_data[0].epoch_duration[0]*1000
if frame1.p.hdr.event_data[0].start_lat == 0:
self.label_17.SetLabel('if using trigger to average, start must be greater than 0, as the trigger appears to coincide with begin of each epoch')
else:
print 'either average or continious file'
self.epochduration = frame1.d.pnts_in_file*frame1.p.hdr.header_data.sample_period*1000
self.text_ctrl_6.SetValue('0')
self.text_ctrl_7.SetValue(str(self.epochduration))
if self.combo_box_6.GetStringSelection() == 'Epochs':
from numpy import arange
print 'you selected epochs'
if frame1.p.hdr.header_data.total_epochs[0] == 1:
dlg = wx.MessageDialog(self, 'I dont think you meant to do that... File doesnt appear to be an epoch file', 'epoch error', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
return
else:
frame1.ind = arange(0, frame1.d.pnts_in_file, frame1.d.wintime.shape[0])
wins = self.text_ctrl_6.GetLineText(0)
wine = self.text_ctrl_7.GetLineText(0)
if self.combo_box_6.GetStringSelection() == 'Trigger':
print 'you selected trigger based epoching'
try:
wins = frame1.ind - int(eval(self.text_ctrl_6.GetLineText(0)))
wine = frame1.ind - int(eval(self.text_ctrl_7.GetLineText(0)))
print wins, wine
print frame1.ind
self.timeaxis = arange(wins,wine,frame1.p.hdr.header_data.sample_period)
print 'timeaxis',shape(self.timeaxis)
except AttributeError:
dlg = wx.MessageDialog(self, 'I think you mean to get triggers first...', 'trigger error', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
frameTRIG.Show()
return
def averagedata(self, event): # wxGlade: MyFrameCUT.<event_handler>
print "Event handler `averagedata' "
self.epochdata(event)
#frame1.avg = averager.on_epochs(frame1.d.data_block, frame1.p.hdr.header_data.total_epochs[0], self.skipfrom, self.skipto)
reshapedepochs = frame1.epoch.reshape(frame1.epochtrials,frame1.epochpnts, frame1.epochnumch)
frame1.avg = mean(reshapedepochs, 0)
print 'avg shape',frame1.avg.shape
frame1.AVG = frame1.tree_ctrl_1.AppendItem(frame1.PROCESSES, 'avg')
self.Hide()
def populatebox(self): # wxGlade: MyFrameCUT.<event_handler>
print "Event handler `populatebox' "
self.combo_box_7.SetItems(frame1.chantype)
def epochdata(self, event): # wxGlade: MyFrameCUT.<event_handler>
print "Event handler `epochdata' "
from meg import epoch
wins = self.text_ctrl_6.GetLineText(0)
wine = self.text_ctrl_7.GetLineText(0)
self.skipfrom = int(eval(wins))/(self.epochduration*frame1.p.hdr.header_data.sample_period)
self.skipto = int(eval(wine))/(self.epochduration*frame1.p.hdr.header_data.sample_period)
self.indfrom = int((eval(wins))/1000.0 * (1/frame1.p.hdr.header_data.sample_period))
self.indto = int((eval(wine))/1000.0 * (1/frame1.p.hdr.header_data.sample_period))
print self.indfrom, self.indto
print int(eval(wine)), self.epochduration, frame1.p.hdr.header_data.sample_period
print self.skipfrom,self.skipto
epochs = size(frame1.ind)
print epochs, shape(frame1.d.data_block)
#if epoch
if frame1.p.hdr.header_data.total_epochs[0] > 1:
self.cutdata = eval('frame1.'+frame1.selitem)[:,:]
frame1.epoch = epoch.epochs(self.cutdata, epochs, self.skipfrom, self.skipto)
#if contin
if frame1.p.hdr.header_data.total_epochs[0] == 1:
self.cutdata = eval('frame1.'+frame1.selitem)[:,:]
frame1.epoch = epoch.cont(self.cutdata, epochs, self.indfrom, self.indto, frame1.ind)
frame1.epochtrials = epochs
frame1.epochepochs = epochs
frame1.epochpnts = self.indto - self.indfrom
frame1.epochnumch = size(frame1.d.data_block,1)
print shape(frame1.epoch)
frame1.EPOCHED = frame1.tree_ctrl_1.AppendItem(frame1.PROCESSES, 'epoch')
self.Hide()
# end of class MyFrameCUT
class MyFrameEPOCH(wx.Frame):
def __init__(self, *args, **kwds):
# content of this block not found: did you rename this class?
pass
def __set_properties(self):
# content of this block not found: did you rename this class?
pass
def __do_layout(self):
# content of this block not found: did you rename this class?
pass
def getaveragetype(self, event): # wxGlade: MyFrameEPOCH.<event_handler>
print "Event handler `getaveragetype' not implemented!"
event.Skip()
def epochdata(self, event): # wxGlade: MyFrameEPOCH.<event_handler>
print "Event handler `epochdata' "
print self.combo_box_6_copy.GetStringSelection()
if self.combo_box_6_copy.GetStringSelection() == 'Trigger':
pass
# end of class MyFrameEPOCH
class MyFrameTRIG(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrameTRIG.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.frameTRIG_statusbar = self.CreateStatusBar(1, 0)
self.label_15 = wx.StaticText(self, -1, "Select Trigger Type")
self.combo_box_4 = wx.ComboBox(self, -1, choices=["", "TRIGGER", "RESPONSE"], style=wx.CB_DROPDOWN)
self.combo_box_5 = wx.ComboBox(self, -1, choices=[], style=wx.CB_DROPDOWN)
self.button_14 = wx.Button(self, -1, "Get Triggers")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_TEXT, self.gettrigtype, self.combo_box_4)
self.Bind(wx.EVT_TEXT, self.getind, self.combo_box_5)
self.Bind(wx.EVT_BUTTON, self.settrig, self.button_14)
# end wxGlade
self.Bind(wx.EVT_CLOSE, self.OnClose)
def __set_properties(self):
# begin wxGlade: MyFrameTRIG.__set_properties
self.SetTitle("Triggers")
self.frameTRIG_statusbar.SetStatusWidths([-1])
# statusbar fields
frameTRIG_statusbar_fields = ["Number of Triggers: NA"]
for i in range(len(frameTRIG_statusbar_fields)):
self.frameTRIG_statusbar.SetStatusText(frameTRIG_statusbar_fields[i], i)
self.combo_box_4.SetSelection(0)
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrameTRIG.__do_layout
sizer_14 = wx.BoxSizer(wx.VERTICAL)
sizer_14.Add(self.label_15, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 12)
sizer_14.Add(self.combo_box_4, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 10)
sizer_14.Add(self.combo_box_5, 0, wx.BOTTOM|wx.ALIGN_CENTER_HORIZONTAL, 14)
sizer_14.Add(self.button_14, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
self.SetSizer(sizer_14)
sizer_14.Fit(self)
self.Layout()
# end wxGlade
def OnClose(self, event):
print 'closing win'
self.Hide()
def gettrigtype(self, event): # wxGlade: MyFrameTRIG.<event_handler>
print "Event handler `gettrigtype'"
from meg import trigger
if self.combo_box_4.GetCurrentSelection() == 1:
self.uvals,self.nzind,self.nz = trigger.vals(frame1.d.data_block[:,frame1.chantypeind == 'trig'])# = channel.index(frame1.datapdf, 'TRIGGER')
print self.uvals
for t in self.uvals:
self.combo_box_5.AppendItems([str([t])])
def getind(self, event): # wxGlade: MyFrameTRIG.<event_handler>
from meg import trigger
print "Event handler `getind' "
frame1.sel = self.combo_box_5.GetStringSelection()
ind = trigger.ind(frame1.sel, self.nzind,self.nz )
self.SetStatusText("Number of Triggers: "+str(len(ind)))
def settrig(self, event): # wxGlade: MyFrameTRIG.<event_handler>
from pdf2py import readwrite
from meg import trigger
print "Event handler `settrig' "
frame1.ind = trigger.ind(frame1.sel, self.nzind,self.nz )
frame1.TRIG = frame1.tree_ctrl_1.AppendItem(frame1.SESSION, 'Trigger')
readwrite.writedata(frame1.ind, '/home/danc/trig')
self.Hide()
# end of class MyFrameTRIG
class MyFrameCH(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrameCH.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.combo_box_3 = wx.ComboBox(self, -1, choices=["MEG", "TRIGGER", "REFERENCE", "EEG", "UTIL", "DERIVED", "SHORTED", "EXTERNAL"], style=wx.CB_DROPDOWN|wx.CB_DROPDOWN)
self.button_13 = wx.Button(self, -1, "Get Channels")
self.list_box_1 = wx.ListBox(self, -1, choices=[])
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_TEXT, self.getchoice, self.combo_box_3)
self.Bind(wx.EVT_BUTTON, self.getchtype, self.button_13)
# end wxGlade
self.Bind(wx.EVT_CLOSE, self.OnClose)
def __set_properties(self):
# begin wxGlade: MyFrameCH.__set_properties
self.SetTitle("Load Channels")
self.combo_box_3.SetSelection(0)
self.list_box_1.SetMinSize((139, 263))
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrameCH.__do_layout
sizer_15 = wx.BoxSizer(wx.HORIZONTAL)
sizer_16 = wx.BoxSizer(wx.VERTICAL)
sizer_16.Add(self.combo_box_3, 0, wx.ALL, 8)
sizer_16.Add(self.button_13, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 9)
sizer_15.Add(sizer_16, 1, wx.EXPAND, 0)
sizer_15.Add(self.list_box_1, 0, 0, 0)
self.SetSizer(sizer_15)
sizer_15.Fit(self)
self.Layout()
# end wxGlade
def OnClose(self, event):
print 'closing win'
#del self.tbIcon
self.Hide()
def getchoice(self, event): # wxGlade: MyFrameCH.<event_handler>
print "Event handler `getchoice' "
event.Skip()
def getchtype(self, event): # wxGlade: MyFrameCH.<event_handler>
print "Event handler `getchtype' "
if self.combo_box_3.GetCurrentSelection() == 0:
frame1.ch = channel.index(frame1.datapdf, 'meg')
if self.combo_box_3.GetCurrentSelection() == 1:
frame1.ch = channel.index(frame1.datapdf, 'trig')
if self.combo_box_3.GetCurrentSelection() == 2:
frame1.ch = channel.index(frame1.datapdf, 'ref')
if self.combo_box_3.GetCurrentSelection() == 3:
frame1.ch = channel.index(frame1.datapdf, 'eeg')
if self.combo_box_3.GetCurrentSelection() == 4:
frame1.ch = channel.index(frame1.datapdf, 'util')
if self.combo_box_3.GetCurrentSelection() == 5:
frame1.ch = channel.index(frame1.datapdf, 'derived')
if self.combo_box_3.GetCurrentSelection() == 6:
frame1.ch = channel.index(frame1.datapdf, 'shorted')
if self.combo_box_3.GetCurrentSelection() == 7:
frame1.ch = channel.index(frame1.datapdf, 'ext')
#READ THE DATA
frame1.d.getdata(0, frame1.d.pnts_in_file, chindex=frame1.ch.channelindexhdr)
frame1.SetPyData(event, 'd')
try:
frame1.tree_ctrl_1.Delete(frame1.CHANNEL)
except AttributeError:
print 'cant delete item'
frame1.CHANNEL = frame1.tree_ctrl_1.AppendItem(frame1.MEGDATA, 'Channels')
self.Hide()
# end of class MyFrameCH
class MyFrameAVG(wx.Frame):
def __init__(self, *args, **kwds):
# content of this block not found: did you rename this class?
pass
self.Bind(wx.EVT_CLOSE, self.OnClose)
def __set_properties(self):
# content of this block not found: did you rename this class?
pass
def __do_layout(self):
# content of this block not found: did you rename this class?
pass
def averagedata(self, event): # wxGlade: MyFrameAVG.<event_handler>
print "Event handler `averagedata' not implemented"
event.Skip()
# end of class MyFrameAVG
class MyFrameSP(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrameSP.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.frameSP_statusbar = self.CreateStatusBar(2, 0)
self.label_10 = wx.StaticText(self, -1, "Source Projection Method", style=wx.ALIGN_CENTRE)
self.label_12 = wx.StaticText(self, -1, "Manual:")
self.label_11 = wx.StaticText(self, -1, "x,y,z")
self.text_ctrl_3 = wx.TextCtrl(self, -1, "")
self.label_13 = wx.StaticText(self, -1, "qx,qz,qz")
self.text_ctrl_4 = wx.TextCtrl(self, -1, "")
self.button_12 = wx.Button(self, -1, "Run Projection from Manual Specs")
self.static_line_4 = wx.StaticLine(self, -1)
self.label_14 = wx.StaticText(self, -1, "Custom Weight From Selection:")
self.label_22 = wx.StaticText(self, -1, "make sure to select new data from \n the main window, if you wish to \n apply weights to it!")
self.button_20 = wx.ToggleButton(self, -1, "Generate Weight from Plot")
self.text_ctrl_9 = wx.TextCtrl(self, -1, "seconds")
self.text_ctrl_10 = wx.TextCtrl(self, -1, "epochs")
self.button_22 = wx.Button(self, -1, "Weights from Time")
self.text_ctrl_8 = wx.TextCtrl(self, -1, "freq in hz")
self.button_23 = wx.Button(self, -1, "Weights from Freq")
self.combo_box_2 = wx.ComboBox(self, -1, choices=["", "Selection", "MEG data"], style=wx.CB_DROPDOWN)
self.button_21 = wx.Button(self, -1, "Get Selected Data to Apply Weights")
self.button_3 = wx.Button(self, -1, "Apply Weights to Posted")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.getmanualsp, self.button_12)
self.Bind(wx.EVT_TOGGLEBUTTON, self.pickfromplot, self.button_20)
self.Bind(wx.EVT_BUTTON, self.manualtime, self.button_22)
self.Bind(wx.EVT_BUTTON, self.manualfreq, self.button_23)
self.Bind(wx.EVT_TEXT, self.getchoice, self.combo_box_2)
self.Bind(wx.EVT_COMBOBOX, self.getchoice, self.combo_box_2)
self.Bind(wx.EVT_BUTTON, self.refreshstatus, self.button_21)
self.Bind(wx.EVT_BUTTON, self.getweightchoice, self.button_3)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrameSP.__set_properties
self.SetTitle("Source Projection")
self.SetBackgroundColour(wx.Colour(143, 143, 188))
self.frameSP_statusbar.SetStatusWidths([-1, -1])
# statusbar fields
frameSP_statusbar_fields = ["No Weights:", "Data Posted:"]
for i in range(len(frameSP_statusbar_fields)):
self.frameSP_statusbar.SetStatusText(frameSP_statusbar_fields[i], i)
self.label_10.SetFont(wx.Font(12, wx.MODERN, wx.ITALIC, wx.BOLD, 0, "Sans"))
self.label_12.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD, 1, ""))
self.label_14.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD, 1, ""))
self.combo_box_2.SetBackgroundColour(wx.Colour(207, 194, 129))
self.combo_box_2.Enable(False)
self.combo_box_2.Hide()
self.combo_box_2.SetSelection(-1)
self.button_3.Enable(False)
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrameSP.__do_layout
sizer_2 = wx.BoxSizer(wx.VERTICAL)
sizer_30 = wx.BoxSizer(wx.HORIZONTAL)
sizer_29 = wx.BoxSizer(wx.HORIZONTAL)
sizer_28 = wx.BoxSizer(wx.HORIZONTAL)
sizer_12 = wx.BoxSizer(wx.HORIZONTAL)
sizer_2.Add(self.label_10, 0, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 7)
sizer_2.Add(self.label_12, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_12.Add(self.label_11, 0, 0, 0)
sizer_12.Add(self.text_ctrl_3, 0, 0, 0)
sizer_12.Add(self.label_13, 0, 0, 0)
sizer_12.Add(self.text_ctrl_4, 0, 0, 0)
sizer_2.Add(sizer_12, 1, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 10)
sizer_2.Add(self.button_12, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 0)
sizer_2.Add(self.static_line_4, 0, wx.ALL|wx.EXPAND, 22)
sizer_2.Add(self.label_14, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_28.Add(self.label_22, 0, wx.BOTTOM, 16)
sizer_28.Add(self.button_20, 0, 0, 0)
sizer_2.Add(sizer_28, 1, wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_29.Add(self.text_ctrl_9, 0, 0, 0)
sizer_29.Add(self.text_ctrl_10, 0, 0, 0)
sizer_29.Add(self.button_22, 0, 0, 0)
sizer_2.Add(sizer_29, 1, 0, 0)
sizer_30.Add(self.text_ctrl_8, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_30.Add(self.button_23, 0, 0, 0)
sizer_2.Add(sizer_30, 1, 0, 0)
sizer_2.Add(self.combo_box_2, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_2.Add(self.button_21, 0, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 16)
sizer_2.Add(self.button_3, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 10)
self.SetSizer(sizer_2)
sizer_2.Fit(self)
self.Layout()
# end wxGlade
self.Bind(wx.EVT_CLOSE, self.OnClose)
def OnClose(self, event):
print 'closing win'
#del self.tbIcon
self.Hide()
def getmanualsp(self, event): # wxGlade: MyFrameSP.<event_handler>
print "Event handler `getmanualsp' not implemented"
from meg import leadfield, sourcespaceprojection
from numpy import array
from pdf2py import readwrite
qxqyqz = self.text_ctrl_4.GetLineText(0)
xyz = self.text_ctrl_3.GetLineText(0)
print xyz
lf = leadfield.calc(frame1.datapdf, frame1.ch, grid=array(eval(xyz)))
frame1.projection = sourcespaceprojection.calc(data=frame1.d.data_block, L=lf.leadfield, qn=array(eval(qxqyqz)))
frame1.SetStatusText("Projection Done")
frame1.PROJECTION = frame1.tree_ctrl_1.AppendItem(frame1.PROCESSES, 'projection')
print 'saving ssp', os.path.basename(frame1.megpath)
readwrite.writedata(frame1.projection, os.path.dirname(frame1.megpath)+'/manualssp')
def getweightchoice(self, event): # wxGlade: MyFrameSP.<event_handler>
print "Event handler `getweightchoice' not implemented"
from pdf2py import readwrite
from meg import sourcespaceprojection
weight=eval('frame1.'+frame1.selitem+'[frame1.x,frame1.chindex]')
self.SetStatusText("weight size: %s" % shape(weight), 0)
frame1.projection = sourcespaceprojection.calc(eval('frame1.'+frame1.selitem+'[:,frame1.chindex]'), weight=weight)
readwrite.writedata(frame1.projection, '/home/danc/spdata')
print shape(frame1.projection), type(frame1.projection)
frame1.SetStatusText("Projection Done")
frame1.PROJECTION = frame1.tree_ctrl_1.AppendItem(frame1.PROCESSES, 'projection')
self.Hide()
def getchoice(self, event): # wxGlade: MyFrameSP.<event_handler>
print "Event handler `getchoice' not implemented"
print self.combo_box_2.GetStringSelection() #== 'MEG data':
if self.combo_box_2.GetStringSelection() == 'Selection':
try:
frame1.plot2ddata(eval('frame1.'+frame1.selitem)) #check if something selected
except AttributeError:
print 'nothing selected'
dlg = wx.MessageDialog(self, 'Nothing workspace data selected from PyMEG left window. Do that first', 'Selection error', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
self.combo_box_2.SetSelection(0)
if self.combo_box_2.GetStringSelection() == 'MEG data':
frame1.plotdata(event)
print 'computing projection on weights'
if self.combo_box_2.GetStringSelection() == 'MEG average':
#frame1.data2plot
frame1.plotdata(event, data=frame1.avg[:,frame1.chantypeind == 'meg'])
print 'computing projection on weights from average'
if self.combo_box_2.GetStringSelection() == 'MEG epoch':
#frame1.data2plot
frame1.plotdata(event, data=frame1.epoch[:,frame1.chantypeind == 'meg'])
print 'computing projection on weights from epochs'
if self.combo_box_2.GetStringSelection() == 'fftpow':
pass
if self.combo_box_2.GetSelection() > 0:
self.button_3.Enable(True)
def pickfromplot(self, event): # wxGlade: MyFrameSP.<event_handler>
print "Event handler `pickfromplot' "
try:
frame1.plot2ddata(eval('frame1.'+frame1.selitem)) #check if something selected
self.button_3.Enable(True)
except AttributeError:
print 'nothing selected'
dlg = wx.MessageDialog(self, 'Nothing workspace data selected from PyMEG left window. Do that first', 'Selection error', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
self.combo_box_2.SetSelection(0)
def refreshstatus(self, event): # wxGlade: MyFrameSP.<event_handler>
print "Event handler `refreshstatus' "
self.SetStatusText("Data selected: %s" % frame1.selitem, 1)
def manualtime(self, event): # wxGlade: MyFrameSP.<event_handler>
print "Event handler `manualtime' not implemented"
indtoval = fftmeg.nearest(frame1.timeaxis, float(self.text_ctrl_9.GetLineText(0))) * int(float(self.text_ctrl_10.GetLineText(0)))
print 'indextovalue',indtoval
frame1.x = indtoval
self.button_3.Enable(True)
def manualfreq(self, event): # wxGlade: MyFrameSP.<event_handler>
from meg import fftmeg
print "Event handler `manualfreq' not implemented"
indtoval = str(self.text_ctrl_8.GetLineText(0))
indtoval = fftmeg.nearest(frame1.timeaxis, float(self.text_ctrl_8.GetLineText(0)))
print 'indextovalue',indtoval
frame1.x = indtoval
self.button_3.Enable(True)
# end of class MyFrameSP
class MyFrameCOREG(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrameCOREG.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.frameCOREG_statusbar = self.CreateStatusBar(1, 0)
self.button_2_copy = wx.Button(self, -1, "Load Analyze MRI Volume")
self.button_3_copy = wx.Button(self, -1, "coregister fiducials")
self.checkbox_1_copy = wx.CheckBox(self, -1, "nas")
self.checkbox_2_copy = wx.CheckBox(self, -1, "lpa")
self.checkbox_3_copy = wx.CheckBox(self, -1, "rpa")
self.save_index = wx.Button(self, -1, "save index points")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.loadmri, self.button_2_copy)
self.Bind(wx.EVT_BUTTON, self.coregistermri, self.button_3_copy)
self.Bind(wx.EVT_CHECKBOX, self.getnas, self.checkbox_1_copy)
self.Bind(wx.EVT_CHECKBOX, self.getlpa, self.checkbox_2_copy)
self.Bind(wx.EVT_CHECKBOX, self.getrpa, self.checkbox_3_copy)
self.Bind(wx.EVT_BUTTON, self.saveindexpoints, self.save_index)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrameCOREG.__set_properties
self.SetTitle("coregister MRI")
self.SetBackgroundColour(wx.Colour(143, 143, 188))
self.frameCOREG_statusbar.SetStatusWidths([-1])
# statusbar fields
frameCOREG_statusbar_fields = ["frameCOREG_statusbar"]
for i in range(len(frameCOREG_statusbar_fields)):
self.frameCOREG_statusbar.SetStatusText(frameCOREG_statusbar_fields[i], i)
self.button_3_copy.Enable(False)
self.checkbox_1_copy.Enable(False)
self.checkbox_2_copy.Enable(False)
self.checkbox_3_copy.Enable(False)
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrameCOREG.__do_layout
sizer_11 = wx.BoxSizer(wx.VERTICAL)
sizer_2_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_11.Add(self.button_2_copy, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 9)
sizer_11.Add(self.button_3_copy, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 9)
sizer_2_copy.Add(self.checkbox_1_copy, 0, wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 0)
sizer_2_copy.Add(self.checkbox_2_copy, 0, wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 0)
sizer_2_copy.Add(self.checkbox_3_copy, 0, wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 0)
sizer_11.Add(sizer_2_copy, 1, wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 9)
sizer_11.Add(self.save_index, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
self.SetSizer(sizer_11)
sizer_11.Fit(self)
self.Layout()
self.Centre()
# end wxGlade
self.Bind(wx.EVT_CLOSE, self.OnClose)
def OnClose(self, event):
print 'closing win'
#del self.tbIcon
self.Hide()
def loadmri(self, event): # wxGlade: MyFrameCOREG.<event_handler>
print "Event handler `loadmri' "
try:
self.SetStatusText("MRI Loaded: %s" % frame1.mrimypath,0)
self.button_2_copy.Enable(False)
self.button_3_copy.Enable(True)
except AttributeError:
frame1.openmri(event)
self.button_2_copy.Enable(False)
def coregistermri(self, event): # wxGlade: MyFrameCOREG.<event_handler>
from mri import viewmri
print "Event handler `coregistermri'"
self.checkbox_1_copy.Enable(True)
self.checkbox_2_copy.Enable(True)
self.checkbox_3_copy.Enable(True)
self.mrpnts = viewmri.display(frame1.nim, pixdim=frame1.nim.voxdim)
def getnas(self, event): # wxGlade: MyFrameCOREG.<event_handler>
print "Event handler `getnas' "
frame1.nas = (array([self.mrpnts.ind3,self.mrpnts.ind2,self.mrpnts.ind1])*frame1.VoxDim).round()
self.SetStatusText("nasion %s" % unicode(frame1.nas))
def getlpa(self, event): # wxGlade: MyFrameCOREG.<event_handler>
print "Event handler `getlpa'"
frame1.lpa = (array([self.mrpnts.ind3,self.mrpnts.ind2,self.mrpnts.ind1])*frame1.VoxDim).round()
self.SetStatusText("lpa %s" % unicode(frame1.lpa))
def getrpa(self, event): # wxGlade: MyFrameCOREG.<event_handler>
print "Event handler `getrpa'"
frame1.rpa = (array([self.mrpnts.ind3,self.mrpnts.ind2,self.mrpnts.ind1])*frame1.VoxDim).round()
self.SetStatusText("rpa %s" % unicode(frame1.rpa))
def saveindexpoints(self, event): # wxGlade: MyFrameCOREG.<event_handler>
print "Event handler `saveindexpoints' "
ind = str([frame1.lpa,frame1.rpa,frame1.nas]).replace(' ','')
print ind
frame1.nim.setDescription(ind)
print 'saving index points in mri', ind
print frame1.mripath
frame1.nim.save(str(frame1.mripath))
# end of class MyFrameCOREG
class MyFrameWEIGHTFIT(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrameWEIGHTFIT.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.frame_4_statusbar = self.CreateStatusBar(1, 0)
self.label_6 = wx.StaticText(self, -1, "Generate localization from which:", style=wx.ALIGN_CENTRE)
self.label_7 = wx.StaticText(self, -1, "time point in data")
self.button_8 = wx.Button(self, -1, "Plot Data")
self.label_8 = wx.StaticText(self, -1, "ICA components")
self.button_9 = wx.Button(self, -1, "Run ICA")
self.label_9 = wx.StaticText(self, -1, "Already computed weight")
self.button_10 = wx.Button(self, -1, "Retrieve Selection")
self.button_11 = wx.Button(self, -1, "Localize")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.pickfromplot, self.button_8)
self.Bind(wx.EVT_BUTTON, self.weightfit, self.button_11)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrameWEIGHTFIT.__set_properties
self.SetTitle("frameWEIGHT")
self.frame_4_statusbar.SetStatusWidths([-1])
# statusbar fields
frame_4_statusbar_fields = ["frame_4_statusbar"]
for i in range(len(frame_4_statusbar_fields)):
self.frame_4_statusbar.SetStatusText(frame_4_statusbar_fields[i], i)
self.button_11.SetBackgroundColour(wx.Colour(143, 143, 188))
# end wxGlade
self.Bind(wx.EVT_CLOSE, self.OnClose)
def __do_layout(self):
# begin wxGlade: MyFrameWEIGHTFIT.__do_layout
sizer_10 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_2 = wx.GridSizer(3, 2, 2, 2)
sizer_10.Add(self.label_6, 0, wx.EXPAND, 0)
grid_sizer_2.Add(self.label_7, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_2.Add(self.button_8, 0, 0, 0)
grid_sizer_2.Add(self.label_8, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_2.Add(self.button_9, 0, 0, 0)
grid_sizer_2.Add(self.label_9, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_2.Add(self.button_10, 0, 0, 0)
sizer_10.Add(grid_sizer_2, 1, wx.EXPAND, 0)
sizer_10.Add(self.button_11, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 15)
self.SetSizer(sizer_10)
sizer_10.Fit(self)
self.Layout()
# end wxGlade
def OnClose(self, event):
print 'closing win'
self.Hide()
def pickfromplot(self, event): # wxGlade: MyFrameWEIGHTFIT.<event_handler>
print "Event handler `pickfromplot'"
frame1.openfilecheck(event)
frame1.plotdata(event)
def weightfit(self, event): # wxGlade: MyFrameWEIGHTFIT.<event_handler>
print "Event handler `weightfit' "
frame1.openfilecheck(event)
try:
frame1.lf
except AttributeError:
dlg = wx.MessageDialog(self, 'No leadfields detected. Do that first', 'Leadfield detection error', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
frame1.leadfieldgen(event)
data2plot = eval('frame1.'+frame1.selitem)
print 'calculating weights from', str(frame1.selitem), 'at index value', str(frame1.x)
wmat = data2plot[frame1.x,:]
print 'weight shape',shape(wmat)
print 'fitting weights'
w = weightfit.calc(frame1.datapdf, frame1.lf.leadfield, wmat)
print 'Done. Fit result size is', shape(w.corr_mat)
self.SetStatusText("Fit Completed %s" % unicode(shape(w.corr_mat)))
frame1.SetStatusText("Fit Completed %s" % unicode(shape(w.corr_mat)))
frame1.FIT = frame1.tree_ctrl_1.AppendItem(frame1.PROCESSES, 'fit')
frame1.fit = w;
try:
frame1.fitimage = sourcesolution2img.build(w.corr_mat, frame1.dec)
viewmri.display(frame1.fitimage[0], colormap=cm.hot)
frame1.FITIMAGE = frame1.tree_ctrl_1.AppendItem(frame1.PROCESSES, 'fitimage')
except NameError:
print 'no mri to make image from'
# end of class MyFrameWEIGHTFIT
class MyFrameICA(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrameICA.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.label_5 = wx.StaticText(self, -1, "independent components: ", style=wx.ALIGN_CENTRE)
self.text_ctrl_5 = wx.TextCtrl(self, -1, "10", style=wx.TE_CENTRE)
self.button_7 = wx.Button(self, -1, "Run ICA", style=wx.BU_EXACTFIT)
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.runica, self.button_7)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrameICA.__set_properties
self.SetTitle("Independent Component Analysis")
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrameICA.__do_layout
sizer_8 = wx.BoxSizer(wx.VERTICAL)
sizer_9 = wx.BoxSizer(wx.HORIZONTAL)
sizer_9.Add(self.label_5, 0, 0, 0)
sizer_9.Add(self.text_ctrl_5, 0, 0, 0)
sizer_8.Add(sizer_9, 1, 0, 0)
sizer_8.Add(self.button_7, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
self.SetSizer(sizer_8)
sizer_8.Fit(self)
self.Layout()
self.Centre()
# end wxGlade
def runica(self, event): # wxGlade: MyFrameICA.<event_handler>
print "Event handler `runica'"
frame1.openfilecheck(event)
print 'running ICA'
r.library('fastICA')
ts = time.time()
frame1.ica = r.fastICA(frame1.d.data_block.T, int(self.text_ctrl_5.GetLineText(0)), verbose='TRUE')
self.Hide()
telapsed = time.time()-ts
print 'done. elapsed time', telapsed, 'seconds'
frame1.ICA = frame1.tree_ctrl_1.AppendItem(frame1.MEGDATA, 'ICA')
frame1.SetStatusText("ICA components %s" % unicode(shape(frame1.ica)))
# end of class MyFrameICA
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.label_1 = wx.StaticText(self, -1, "Time Freq Values")
self.label_4 = wx.StaticText(self, -1, "data")
self.combo_box_1 = wx.ComboBox(self, -1, choices=["raw data", "projection"], style=wx.CB_DROPDOWN)
self.label_4_copy = wx.StaticText(self, -1, "channel label")
self.text_ctrl_chl = wx.TextCtrl(self, -1, "'A1'")
self.label_4_copy_1 = wx.StaticText(self, -1, "cycles")
self.text_ctrl_cyc = wx.TextCtrl(self, -1, "[3.0, 0.5]")
self.label_4_copy_2 = wx.StaticText(self, -1, "frequency range")
self.text_ctrl_freqr = wx.TextCtrl(self, -1, "[5.0, 100]")
self.label_4_copy_3 = wx.StaticText(self, -1, "padratio")
self.text_ctrl_padr = wx.TextCtrl(self, -1, "4")
self.label_4_copy_4 = wx.StaticText(self, -1, "timesout")
self.text_ctrl_timout = wx.TextCtrl(self, -1, "200")
self.label_4_copy_5 = wx.StaticText(self, -1, "frames")
self.text_ctrl_4_copy_3 = wx.TextCtrl(self, -1, "[3.0, 0.5]")
self.label_4_copy_6 = wx.StaticText(self, -1, "trials")
self.text_ctrl_4_copy_4 = wx.TextCtrl(self, -1, "None")
self.label_4_copy_7 = wx.StaticText(self, -1, "sample rate")
self.text_ctrl_4_copy_5 = wx.TextCtrl(self, -1, "None")
self.timef_run = wx.Button(self, -1, "Run Timef")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_TEXT, self.getchoice, self.combo_box_1)
self.Bind(wx.EVT_BUTTON, self.timefhandle, self.timef_run)
# end wxGlade
self.Bind(wx.EVT_CLOSE, self.OnClose)
def __set_properties(self):
# begin wxGlade: MyFrame.__set_properties
self.SetTitle("TimeFreq Parameters")
self.SetBackgroundColour(wx.Colour(143, 143, 188))
self.label_1.SetFont(wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "Sans"))
self.combo_box_1.SetBackgroundColour(wx.Colour(195, 203, 113))
self.combo_box_1.SetSelection(-1)
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrame.__do_layout
sizer_7 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_1 = wx.GridSizer(11, 2, 0, 0)
sizer_7.Add(self.label_1, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 0)
grid_sizer_1.Add(self.label_4, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.combo_box_1, 0, 0, 0)
grid_sizer_1.Add(self.label_4_copy, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.text_ctrl_chl, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_1.Add(self.label_4_copy_1, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.text_ctrl_cyc, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_1.Add(self.label_4_copy_2, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.text_ctrl_freqr, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_1.Add(self.label_4_copy_3, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.text_ctrl_padr, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_1.Add(self.label_4_copy_4, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.text_ctrl_timout, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_1.Add(self.label_4_copy_5, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.text_ctrl_4_copy_3, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_1.Add(self.label_4_copy_6, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.text_ctrl_4_copy_4, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_1.Add(self.label_4_copy_7, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.text_ctrl_4_copy_5, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_7.Add(grid_sizer_1, 1, 0, 0)
sizer_7.Add(self.timef_run, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 0)
self.SetSizer(sizer_7)
sizer_7.Fit(self)
self.Layout()
# end wxGlade
def OnClose(self, event):
print 'closing win'
self.Hide()
def timefhandle(self, event): # wxGlade: MyFrame.<event_handler>
from meg import timef
from pdf2py import readwrite
print "Event handler `timefhandle' "
frame1.openfilecheck(event)
if self.combo_box_1.GetCurrentSelection() == 0:
data = frame1.datapdf
chl = str(eval(self.text_ctrl_chl.GetLineText(0)))
if self.combo_box_1.GetCurrentSelection() == 1:
print 'sp shape',shape(frame1.projection)
data = frame1.projection
chl=None
srate=1/frame1.p.hdr.header_data.sample_period
frames=int(frame1.p.hdr.epoch_data[0].epoch_duration/frame1.p.hdr.header_data.sample_period)-1
trials=size(frame1.p.hdr.epoch_data)
cyc = array(eval(self.text_ctrl_cyc.GetLineText(0)))
freqr = array(eval(self.text_ctrl_freqr.GetLineText(0)))
padr = int(eval(self.text_ctrl_padr.GetLineText(0)))
timout = int(eval(self.text_ctrl_timout.GetLineText(0)))
frame1.t = timef.initialize()
print type(data),shape(data)
frame1.t.calc(data=data, chlabel=chl, cycles=cyc, freqrange=freqr, padratio=padr, timesout=timout,frames=frames, trials=trials, srate=srate)
frame1.TIMEF = frame1.tree_ctrl_1.AppendItem(frame1.PROCESSES, 'timef')
frame1.TIMEFpow = frame1.tree_ctrl_1.AppendItem(frame1.TIMEF, 'induced_power')
frame1.TIMEFpow = frame1.tree_ctrl_1.AppendItem(frame1.TIMEF, 'induced_powerlog')
frame1.TIMEFplf = frame1.tree_ctrl_1.AppendItem(frame1.TIMEF, 'phaselocking_factor')
frame1.TIMEFplf = frame1.tree_ctrl_1.AppendItem(frame1.TIMEF, 'power_of_continious_data')
self.Hide()
print 'saving tft', os.path.basename(frame1.megpath)
readwrite.writedata(frame1.t, os.path.dirname(frame1.megpath)+'/tft')
def getchoice(self, event): # wxGlade: MyFrame.<event_handler>
print "Event handler `getchoice' not implemented"
print self.combo_box_1.GetCurrentSelection()
if self.combo_box_1.GetCurrentSelection() == 0:
self.text_ctrl_chl.Enable(True)
pass
if self.combo_box_1.GetCurrentSelection() == 1:
print 'sp shape',shape(frame1.projection)
self.text_ctrl_chl.Enable(False)
# end of class MyFrame
class MyFrameGrid(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrameGrid.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.sizer_6_staticbox = wx.StaticBox(self, -1, "Decimation Factor for Brain Space")
self.frame_2_statusbar = self.CreateStatusBar(1, 0)
self.label_2 = wx.StaticText(self, -1, "Manual Specification of Grid Points:\nEnter X,Y,Z coordinates (mm)\nex. [[55,-65,82.3],[52.2,63.4,84]]", style=wx.ALIGN_CENTRE)
self.text_ctrl_2 = wx.TextCtrl(self, -1, "[]", style=wx.TE_CENTRE)
self.button_1 = wx.Button(self, -1, "Manual Add of Grid Points", style=wx.BU_BOTTOM)
self.label_3 = wx.StaticText(self, -1, "Source Space Volume Method", style=wx.ALIGN_CENTRE)
self.button_2 = wx.Button(self, -1, "Load Head MRI Volume")
self.button_4 = wx.Button(self, -1, "Load Extracted Brain Volume")
self.text_ctrl_1 = wx.TextCtrl(self, -1, "10")
self.button_5 = wx.Button(self, -1, "Generate Source Space Grid")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.gridgetxyz, self.button_1)
self.Bind(wx.EVT_BUTTON, self.gridloadmri, self.button_2)
self.Bind(wx.EVT_BUTTON, self.loadbrain, self.button_4)
self.Bind(wx.EVT_BUTTON, self.gridcalc, self.button_5)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrameGrid.__set_properties
self.SetTitle("Grid Selection")
self.SetBackgroundColour(wx.Colour(143, 143, 188))
self.frame_2_statusbar.SetStatusWidths([-1])
# statusbar fields
frame_2_statusbar_fields = ["frame_2_statusbar"]
for i in range(len(frame_2_statusbar_fields)):
self.frame_2_statusbar.SetStatusText(frame_2_statusbar_fields[i], i)
self.text_ctrl_2.SetMinSize((180, 37))
self.button_1.SetBackgroundColour(wx.Colour(128, 128, 128))
self.button_4.Enable(False)
self.text_ctrl_1.Enable(False)
self.button_5.SetBackgroundColour(wx.Colour(128, 128, 128))
# end wxGlade
self.Bind(wx.EVT_CLOSE, self.OnClose)
def __do_layout(self):
# begin wxGlade: MyFrameGrid.__do_layout
sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
sizer_5 = wx.BoxSizer(wx.VERTICAL)
sizer_6 = wx.StaticBoxSizer(self.sizer_6_staticbox, wx.HORIZONTAL)
sizer_4 = wx.BoxSizer(wx.VERTICAL)
sizer_4.Add(self.label_2, 0, wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 0)
sizer_4.Add(self.text_ctrl_2, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 0)
sizer_4.Add(self.button_1, 0, wx.ALIGN_BOTTOM|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 0)
sizer_3.Add(sizer_4, 1, wx.ALIGN_BOTTOM|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL|wx.SHAPED, 0)
sizer_3.Add((20, 20), 0, wx.ALL|wx.SHAPED, 2)
sizer_5.Add(self.label_3, 0, wx.TOP|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 19)
sizer_5.Add(self.button_2, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 11)
sizer_5.Add(self.button_4, 0, wx.TOP|wx.ALIGN_CENTER_HORIZONTAL, 8)
sizer_6.Add(self.text_ctrl_1, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 0)
sizer_5.Add(sizer_6, 1, wx.BOTTOM|wx.EXPAND, 5)
sizer_5.Add(self.button_5, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 0)
sizer_3.Add(sizer_5, 1, wx.EXPAND, 6)
self.SetSizer(sizer_3)
sizer_3.Fit(self)
self.Layout()
self.Centre()
# end wxGlade
def OnClose(self, event):
print 'closing win'
self.Hide()
def gridgetxyz(self, event): # wxGlade: MyFrameGrid.<event_handler>
print "Event handler `gridgetxyz'"
pnts = array(eval(self.text_ctrl_2.GetLineText(0)))
print pnts
if len(pnts.shape) == 1:
pnts = array([pnts])
if shape(pnts)[1] != 3:
print 'need pairs of 3 points ([x,y,z])'
numgridpnts = pnts.shape
frame1.grid = pnts.T
frame2.SetStatusText("number of pnts %s" % unicode(numgridpnts))
frame1.GRID = frame1.tree_ctrl_1.AppendItem(frame1.PROCESSES, 'grid')
self.Hide()
def gridloadmri(self, event): # wxGlade: MyFrameGrid.<event_handler>
print "Event handler `gridloadmri'"
try:
frameCOREG.mrpnts
except AttributeError:
frameCOREG.loadmri(event)
time.sleep(1)
frameCOREG.Show()
frameCOREG.button_3_copy.Enable(True)
self.button_4.Enable(True)
self.text_ctrl_1.Enable(True)
frame2.SetStatusText("MRI Loaded: %s" % frame1.mrimypath,0)
def loadbrain(self, event): # wxGlade: MyFrameGrid.<event_handler>
print "Event handler `loadbrain' not implemented"
frame1.openbrain(event)
def gridcalc(self, event): # wxGlade: MyFrameGrid.<event_handler>
print "Event handler `gridcalc'"
frame1.dec = img.decimate(frame1.brain, eval(self.text_ctrl_1.GetLineText(0)))
[t,r] = transform.meg2mri(frame1.lpa,frame1.rpa,frame1.nas)
megxyz = transform.mri2meg(t,r,frame1.dec.mrixyz)
try:
datapdf = frame1.megpath
except AttributeError:
frame1.openfile(event)
datapdf = frame1.megpath
print frame1.megpath
self.scaledmegxyz = transform.scalesourcespace(datapdf, megxyz)
frame2.SetStatusText("rpa %s" % unicode(shape(self.scaledmegxyz)))
print shape(self.scaledmegxyz)
frame1.grid = self.scaledmegxyz.T
frame1.GRID = frame1.tree_ctrl_1.AppendItem(frame1.PROCESSES, 'grid')
self.Hide()
# end of class MyFrameGrid
class MyFrame1(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame1.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.window_1 = wx.SplitterWindow(self, -1, style=wx.SP_NOBORDER)
# Menu Bar
self.framewithnotebook_menubar = wx.MenuBar()
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(1, "New Session", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(2, "Load MEG", "Load 4D MEG File", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(3, "Load MRI", "load mri file", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(4, "Load Stripped MRI", "Load Brain Extracted Volume", wx.ITEM_NORMAL)
wxglade_tmp_menu.AppendSeparator()
wxglade_tmp_menu.Append(7, "Load Dipole File(s)", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(8, "load hs_file", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.AppendSeparator()
wxglade_tmp_menu.Append(9, "Project Info", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(10, "Macro", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(11, "Save Workspace", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.AppendSeparator()
self.Exit = wx.MenuItem(wxglade_tmp_menu, wx.NewId(), "Exit PyMEG", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.AppendItem(self.Exit)
self.framewithnotebook_menubar.Append(wxglade_tmp_menu, "File")
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(12, "Channels", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(13, "Events", "", wx.ITEM_NORMAL)
self.framewithnotebook_menubar.Append(wxglade_tmp_menu, "Edit")
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu_sub = wx.Menu()
wxglade_tmp_menu_sub.Append(150, "Data-Tap", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.AppendMenu(wx.NewId(), "Acquisition", wxglade_tmp_menu_sub, "")
wxglade_tmp_menu.Append(30, "Epoch and or Average Data", "resize data", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(31, "Epoch Data", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(32, "Remove DC offset", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(33, "Coregister MRI", "coregister mri fiducials", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(34, "LeadField", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(35, "Source Space Grid", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(36, "Source Projection", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(37, "Source Simulation", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(38, "ICA", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(39, "Time Frequency Transform", "compute time freq transform using Morlet Wavelets", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(40, "Fast Fourier Transform", "fft", wx.ITEM_NORMAL)
wxglade_tmp_menu_sub = wx.Menu()
wxglade_tmp_menu_sub.Append(41, "Weight Matrix", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.AppendMenu(wx.NewId(), "Localize", wxglade_tmp_menu_sub, "")
wxglade_tmp_menu.Append(42, "Bad Channel Detection", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(43, "Dipole Density", "", wx.ITEM_NORMAL)
self.framewithnotebook_menubar.Append(wxglade_tmp_menu, "Tools")
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu_sub = wx.Menu()
wxglade_tmp_menu_sub.Append(50, "Channel Data", "", wx.ITEM_NORMAL)
wxglade_tmp_menu_sub.Append(51, "ERP", "", wx.ITEM_NORMAL)
wxglade_tmp_menu_sub.Append(52, "headshape", "", wx.ITEM_NORMAL)
wxglade_tmp_menu_sub.Append(53, "sensors", "", wx.ITEM_NORMAL)
wxglade_tmp_menu_sub.Append(54, "fiducials", "", wx.ITEM_NORMAL)
wxglade_tmp_menu_sub.Append(55, "headshape,sensors,fiducials", "", wx.ITEM_NORMAL)
wxglade_tmp_menu_sub.Append(56, "headshape,fids,dipoles", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.AppendMenu(wx.NewId(), "MEG", wxglade_tmp_menu_sub, "")
wxglade_tmp_menu_sub = wx.Menu()
wxglade_tmp_menu_sub.Append(57, "2D", "", wx.ITEM_NORMAL)
wxglade_tmp_menu_sub.Append(58, "3D", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.AppendMenu(wx.NewId(), "MRI", wxglade_tmp_menu_sub, "")
self.framewithnotebook_menubar.Append(wxglade_tmp_menu, "Plot")
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(200, "psel", "", wx.ITEM_NORMAL)
wxglade_tmp_menu.Append(201, "acquisition menu", "", wx.ITEM_NORMAL)
self.framewithnotebook_menubar.Append(wxglade_tmp_menu, "MSI-tools")
self.exportitems = wx.Menu()
self.itemtovtk = wx.MenuItem(self.exportitems, wx.NewId(), "Item to VTK", "", wx.ITEM_NORMAL)
self.exportitems.AppendItem(self.itemtovtk)
self.framewithnotebook_menubar.Append(self.exportitems, "Export")
wxglade_tmp_menu = wx.Menu()
self.framewithnotebook_menubar.Append(wxglade_tmp_menu, "Help")
self.SetMenuBar(self.framewithnotebook_menubar)
# Menu Bar end
self.framewithnotebook_statusbar = self.CreateStatusBar(3, 0)
# Tool Bar
self.frame1_toolbar = wx.ToolBar(self, -1, style=wx.TB_HORIZONTAL|wx.TB_DOCKABLE|wx.TB_3DBUTTONS|wx.TB_TEXT|wx.TB_NODIVIDER|wx.TB_NOALIGN)
self.SetToolBar(self.frame1_toolbar)
self.frame1_toolbar.AddLabelTool(101, "MEG load", wx.Bitmap("filelight.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "Load Data File", "")
self.frame1_toolbar.AddLabelTool(102, "PlotMEG", wx.Bitmap("plotdata2.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "2D Plot", "")
self.frame1_toolbar.AddLabelTool(103, "megcontour", wx.Bitmap("contour3.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "Contour Plot Of Data Point", "")
self.frame1_toolbar.AddLabelTool(105, "Plot Selected", wx.Bitmap("fityk.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "", "")
self.frame1_toolbar.AddSeparator()
self.frame1_toolbar.AddLabelTool(104, "MRI load", wx.Bitmap("brain1.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "LoadMR", "")
self.frame1_toolbar.AddLabelTool(106, "MACRO", wx.Bitmap("brain2.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "", "")
# Tool Bar end
self.tree_ctrl_1 = wx.TreeCtrl(self.window_1, -1, style=wx.TR_HAS_BUTTONS|wx.TR_NO_LINES|wx.TR_EDIT_LABELS|wx.TR_MULTIPLE|wx.TR_MULTIPLE|wx.TR_EXTENDED|wx.TR_DEFAULT_STYLE)
self.list_ctrl_1 = wx.ListCtrl(self.window_1, -1, style=wx.LC_REPORT|wx.LC_AUTOARRANGE|wx.LC_EDIT_LABELS|wx.SUNKEN_BORDER)
self.button_6 = wx.Button(self, -1, "Delete Item")
self.static_line_2 = wx.StaticLine(self, -1)
self.static_line_1 = wx.StaticLine(self, -1)
self.text = wx.StaticText(self, -1, "argument")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_MENU, self.newsession, id=1)
self.Bind(wx.EVT_MENU, self.openfile, id=2)
self.Bind(wx.EVT_MENU, self.openmri, id=3)
self.Bind(wx.EVT_MENU, self.openbrain, id=4)
self.Bind(wx.EVT_MENU, self.loaddipoles, id=7)
self.Bind(wx.EVT_MENU, self.loadhs, id=8)
self.Bind(wx.EVT_MENU, self.projectutils, id=9)
self.Bind(wx.EVT_MENU, self.batch, id=10)
self.Bind(wx.EVT_MENU, self.saveworkspace, id=11)
self.Bind(wx.EVT_MENU, self.quitapp, self.Exit)
self.Bind(wx.EVT_MENU, self.loadchan, id=12)
self.Bind(wx.EVT_MENU, self.getevents, id=13)
self.Bind(wx.EVT_MENU, self.tapwin, id=150)
self.Bind(wx.EVT_MENU, self.cutdata, id=30)
self.Bind(wx.EVT_MENU, self.epochdata, id=31)
self.Bind(wx.EVT_MENU, self.offsetcorrect, id=32)
self.Bind(wx.EVT_MENU, self.coregistermri, id=33)
self.Bind(wx.EVT_MENU, self.leadfieldgen, id=34)
self.Bind(wx.EVT_MENU, self.sourcespacegrid, id=35)
self.Bind(wx.EVT_MENU, self.sourceprojection, id=36)
self.Bind(wx.EVT_MENU, self.ica, id=38)
self.Bind(wx.EVT_MENU, self.timef, id=39)
self.Bind(wx.EVT_MENU, self.fft, id=40)
self.Bind(wx.EVT_MENU, self.weightfit, id=41)
self.Bind(wx.EVT_MENU, self.badch, id=42)
self.Bind(wx.EVT_MENU, self.dipoledensity, id=43)
self.Bind(wx.EVT_MENU, self.plot2ddata, id=50)
self.Bind(wx.EVT_MENU, self.plotheadshape, id=52)
self.Bind(wx.EVT_MENU, self.plotsensors, id=53)
self.Bind(wx.EVT_MENU, self.plotindex, id=54)
self.Bind(wx.EVT_MENU, self.plothssensind, id=55)
self.Bind(wx.EVT_MENU, self.plothsinddips, id=56)
self.Bind(wx.EVT_MENU, self.mri2D, id=57)
self.Bind(wx.EVT_MENU, self.mri3D, id=58)
self.Bind(wx.EVT_MENU, self.psel, id=200)
self.Bind(wx.EVT_MENU, self.ape, id=201)
self.Bind(wx.EVT_TOOL, self.openfile, id=101)
self.Bind(wx.EVT_TOOL, self.plotdata, id=102)
self.Bind(wx.EVT_TOOL, self.megcontour, id=103)
self.Bind(wx.EVT_TOOL, self.plotselected, id=105)
self.Bind(wx.EVT_TOOL, self.openmri, id=104)
self.Bind(wx.EVT_TOOL, self.batch, id=106)
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.treeitemact, self.tree_ctrl_1)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.listitemselected, self.list_ctrl_1)
self.Bind(wx.EVT_BUTTON, self.treedelitem, self.button_6)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrame1.__set_properties
self.SetTitle("PyMEG GUI")
self.SetSize((645, 668))
self.SetToolTipString("pymegGUI")
self.framewithnotebook_statusbar.SetStatusWidths([-1, -1, -1])
# statusbar fields
framewithnotebook_statusbar_fields = ["Status:", "No value:", "No element:"]
for i in range(len(framewithnotebook_statusbar_fields)):
self.framewithnotebook_statusbar.SetStatusText(framewithnotebook_statusbar_fields[i], i)
self.frame1_toolbar.SetToolBitmapSize((10, 10))
self.frame1_toolbar.Realize()
self.tree_ctrl_1.SetBackgroundColour(wx.Colour(143, 143, 188))
self.list_ctrl_1.SetBackgroundColour(wx.Colour(137, 137, 180))
self.list_ctrl_1.SetToolTipString("data properties")
self.window_1.SetMinSize((645, 237))
self.button_6.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD, 1, ""))
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrame1.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
self.window_1.SplitVertically(self.tree_ctrl_1, self.list_ctrl_1)
sizer_1.Add(self.window_1, 1, wx.EXPAND, 0)
sizer_1.Add(self.button_6, 0, 0, 0)
sizer_1.Add(self.static_line_2, 0, wx.EXPAND, 0)
sizer_1.Add(self.static_line_1, 0, wx.EXPAND, 0)
sizer_1.Add(self.text, 0, wx.EXPAND, 0)
self.SetSizer(sizer_1)
sizer_1.SetSizeHints(self)
self.Layout()
# end wxGlade
def replaceitem(self, event, parent, item, text):
try:
self.tree_ctrl_1.Delete(item)
except AttributeError:
print 'nothing 2 replace'
item = self.tree_ctrl_1.AppendItem(parent, text)
def createworkspace(self):
try:
self.WORKSPACE
except AttributeError:
self.WORKSPACE = self.tree_ctrl_1.AddRoot('Workspace')
def openfile(self, event, batch='none'): # wxGlade: MyFrame1.<event_handler>
from pdf2py import data, pdf
print 'opening file'
try:
self.WORKSPACE
except AttributeError:
self.WORKSPACE = self.tree_ctrl_1.AddRoot('Workspace')
self.SESSION = self.tree_ctrl_1.AppendItem(self.WORKSPACE, 'Session')
self.DATA = self.tree_ctrl_1.AppendItem(self.SESSION, 'DataChannels')
try:
self.MEG# = self.tree_ctrl_1.AppendItem(self.SESSION, 'MEG')
except AttributeError:
self.MEG = self.tree_ctrl_1.AppendItem(self.SESSION, 'MEG')
if batch != 'none':
self.megpath = path = batch
else:
dlg = wx.FileDialog(self, "Select a 4D MEG file", os.getcwd(), "", "*", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.megpath = path = dlg.GetPath()
dlg.Destroy()
self.megmypath = mypath = os.path.basename(self.megpath)
self.filename = self.megmypath
self.SetStatusText("Data Loaded: %s" % self.megmypath,0)
try:
self.tree_ctrl_1.Delete(self.MEGDATA)
except AttributeError:
print 'cant delete item'
self.MEGDATA = self.tree_ctrl_1.AppendItem(self.MEG, 'data_block')#mypath)
self.PROCESSES = frame1.tree_ctrl_1.AppendItem(frame1.MEG, 'PROCESSES')
self.list_ctrl_1.InsertColumn(0, 'Val')
self.list_ctrl_1.InsertColumn(0, 'Val')
self.list_ctrl_1.InsertColumn(0, 'Data')
self.list_ctrl_1.SetColumnWidth(0, 140)
self.list_ctrl_1.SetColumnWidth(1, 193)
self.list_ctrl_1.SetColumnWidth(2, 140)
self.datapdf = self.megpath
self.d = data.read(self.megpath)
try:
self.ch
except AttributeError:
print 'select channels first'
frameCHAN.Show()
self.p = pdf.read(self.datapdf)
try:
self.p.hs
self.HS = self.tree_ctrl_1.AppendItem(self.MEGDATA, 'headshape')
except AttributeError:
print 'no headshape to load'
try:
self.p.cfg
self.CFG = self.tree_ctrl_1.AppendItem(self.MEGDATA, 'config')
except AttributeError:
print 'no config file to load'
dataprefix = 'd'
self.HDR = self.tree_ctrl_1.AppendItem(self.MEGDATA, 'header')
self.tree_ctrl_1.AppendItem(self.HDR, 'epoch_data')
self.tree_ctrl_1.AppendItem(self.HDR, 'event_data')
self.tree_ctrl_1.AppendItem(self.HDR, 'header_data')
self.tree_ctrl_1.AppendItem(self.HDR, 'header_offset')
self.CR = self.tree_ctrl_1.AppendItem(self.HDR, 'channel_ref_data')
self.tree_ctrl_1.AppendItem(self.CR, 'attributes')
self.tree_ctrl_1.AppendItem(self.CR, 'chan_no')
self.tree_ctrl_1.AppendItem(self.CR, 'checksum')
self.tree_ctrl_1.AppendItem(self.CR, 'index')
self.tree_ctrl_1.AppendItem(self.CR, 'scale')
self.tree_ctrl_1.AppendItem(self.CR, 'valid_min_max')
self.tree_ctrl_1.AppendItem(self.CR, 'whatisit')
self.tree_ctrl_1.AppendItem(self.CR, 'yaxis_label')
self.tree_ctrl_1.AppendItem(self.CR, 'ymax')
self.tree_ctrl_1.AppendItem(self.CR, 'ymin')
def sessionhandler(self, event):
pdb = []; cdb = []
print self.tree_ctrl_1.GetChildrenCount(self.SESSION)
cookie = -1
if self.tree_ctrl_1.ItemHasChildren(self.SESSION) == True:
parent, cookie = self.tree_ctrl_1.GetFirstChild( self.SESSION )
for i in arange(self.tree_ctrl_1.GetChildrenCount(self.SESSION)):
if self.tree_ctrl_1.ItemHasChildren(parent) == False:
child, cookie = self.tree_ctrl_1.GetNextChild( parent, cookie )
print self.tree_ctrl_1.GetItemText(parent),'no child',self.tree_ctrl_1.GetItemText(child)
else:
child, cookie = self.tree_ctrl_1.GetFirstChild( parent )
print self.tree_ctrl_1.GetItemText(parent),'has child',self.tree_ctrl_1.GetItemText(child)
if self.tree_ctrl_1.ItemHasChildren(child) == True:
parent = child
else:
parent = child
print pdb
print cdb
def openfilecheck(self, event):
try:
self.datapdf# = self.megpath
except AttributeError:
dlg = wx.MessageDialog(self, 'First you need to load MEG data file', 'MEG file error', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
self.openfile(event)
datapdf = self.megpath
def closeapp(self, event): # wxGlade: MyFrame1.<event_handler>
print "Closing Pymeg"
self.Close(True)
def testevent(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `press' not implemented!"
self.button_1.SetLabel("Ouch!")
self.label_1.SetLabel("Ouch!")
event.Skip()
def quitapp(self, event): # wxGlade: MyFrame1.<event_handler>
print "Closing Application"
self.Close(True)
def plotdata(self, event, data=None): # wxGlade: MyFrame1.<event_handler>
print "plotting data", self.megmypath
from pylab import figure, plot, connect, show
def event_response(event):
print event.name
print event.xdata
self.SetStatusText("You selected time point: %s" % event.xdata)
if data == None:
self.SetPyData(int(event.xdata))
else:
self.SetPyData(int(event.xdata), data=data)
#self.treebeginlabel('plotdata')
if data == None:
print 'no item selected'
#data = self.d.data_block
figure();plot(data);
cid = connect('button_press_event', event_response)
show()
def toolbartest(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `toolbartest' not implemented"
event.Skip()
def SetPyData(self, event, data=None): # wxGlade: MyFrame1.<event_handler>
self.x = event
try:
self.dataitems
except AttributeError:
self.dataitems = []
self.dataitems.append(data)
else:
self.dataitems.append(data)
#print 'dataitems',self.dataitems
def megcontour(self, event, data=None): # wxGlade: MyFrame1.<event_handler>
print "Event handler `megcontour'"
figure()
data2plot = eval('frame1.'+frame1.selitem)
print 'plotting', str(frame1.selitem)
try:
megcontour.display(data2plot[self.x,frame1.chantypeind == 'meg'], frame1.ch.chanlocs, subplot='on')
except AttributeError:
dlg = wx.MessageDialog(self, 'No Signal Channels Loaded', 'signal ch error', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def GetPyData(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `GetPyData' not implemented"
event.Skip()
def treedelitem(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `treedelitem' "
sel = self.tree_ctrl_1.GetSelection()
self.tree_ctrl_1.Delete(sel)
event.Skip()
def treeitemact(self, event): # wxGlade: MyFrame1.<event_handler>
self.list_ctrl_1.DeleteAllItems()
self.itemID = self.tree_ctrl_1.GetSelection()
self.selitem = selitem = self.tree_ctrl_1.GetItemText(self.itemID)
self.rootitemID = self.tree_ctrl_1.GetRootItem()
self.rootitem = rootitem = self.tree_ctrl_1.GetItemText(self.rootitemID)
self.parentitemID = self.tree_ctrl_1.GetItemParent(self.itemID)
self.parentitem = parentitem = self.tree_ctrl_1.GetItemText(self.parentitemID)
self.SetStatusText("You selected data: %s" % selitem)
self.tree_ctrl_1.ExpandAllChildren(self.itemID)
print selitem, rootitem, parentitem
print 'extracting', selitem
from pdf2py import listparse
if parentitem == 'MEG':
data2pass = self.d
items = listparse.megdata(parentitem, selitem, data2pass)
if selitem == 'header':
data2pass = self.d.hdr
items = listparse.header(parentitem, selitem, data2pass)
if parentitem == 'channel_ref_data':
data2pass = self.d.hdr
items = listparse.channel_ref_data(parentitem, selitem, data2pass)
if selitem == 'epoch_data':
data2pass = self.d.hdr.epoch_data[0]
items = listparse.epoch_data(parentitem, selitem, data2pass)
if selitem == 'event_data':
data2pass = self.d.hdr.event_data
items = listparse.event_data(parentitem, selitem, data2pass)
if selitem == 'header_data':
data2pass = self.d.hdr.header_data
items = listparse.header_data(parentitem, selitem, data2pass)
if selitem == 'headshape':
data2pass = self.p.hs
items = listparse.headshape(parentitem, selitem, data2pass)
if selitem == 'config':
data2pass = self.p.cfg
items = listparse.config(parentitem, selitem, data2pass)
if selitem == 'mr_header':
data2pass = self.mr.header
items = listparse.mr_header(parentitem, selitem, data2pass)
if selitem == 'leadfields':
data2pass = self.lf.leadfield
items = listparse.leadfields(parentitem, selitem, data2pass)
if selitem == 'grid':
data2pass = self.grid
items = listparse.grid(parentitem, selitem, data2pass)
if selitem == 'timef':
data2pass = frametimef.t
items = listparse.timef(parentitem, selitem, data2pass)
if selitem == 'ICA':
data2pass = self.ica
items = listparse.ica(parentitem, selitem, data2pass)
self.tree_ctrl_1.AppendItem(self.ICA, 'Activation Matrix')
self.tree_ctrl_1.AppendItem(self.ICA, 'Weight Matrix')
if selitem == 'Gradiometer Channels':
data2pass = frameCHAN.meg
items = listparse.channels(parentitem, selitem, data2pass)
if selitem == 'Trigger Channels':
data2pass = frameCHAN.trig
items = listparse.channels(parentitem, selitem, data2pass)
if selitem == 'fftpow':
data2pass = frame1.fftpow
items = listparse.fftpow(parentitem, selitem, frame1.fftfreqs, frame1.fftpow)
if selitem == 'fit':
data2pass = frame1.fit
items = listparse.fit(parentitem, selitem, data2pass)
if selitem == 'Project_Utils':
frame1.projectstats()
if selitem == 'dipoles':
from pylab import figure,subplot,scatter,show, plot, legend
print 'test'
figure()
subplot(2,2,1);scatter(self.points[:,0], self.points[:,1])
subplot(2,2,2);scatter(self.points[:,0], self.points[:,2])
subplot(2,2,3);scatter(self.points[:,1], self.points[:,2])
subplot(2,2,4);plot(self.dips['params']);legend(self.dips['labels'])
show()
if selitem == 'DiskUsage_by_PID':
# make a square figure and axes
figure(1, figsize=(6,6))
ax = axes([0.1, 0.1, 0.8, 0.8])
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
fracs = [15,30,45, 10]
explode=(0, 0.05, 0, 0)
pie(fracs, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True)
title('Raining Hogs and Dogs', bbox={'facecolor':'0.8', 'pad':5})
show()
try:
for i in items:
index = self.list_ctrl_1.InsertStringItem(sys.maxint, i[0]) #num_items, selitem)
self.list_ctrl_1.SetStringItem(index, 1, i[1])
num_items = self.list_ctrl_1.GetItemCount()
except UnboundLocalError:
print 'no matching item'
def textentry(self, event): # wxGlade: MyFrame1.<event_handler>
dlg = wx.TextEntryDialog(self, 'Enter some text','Text Entry')
dlg.SetValue("Default")
if dlg.ShowModal() == wx.ID_OK:
self.SetStatusText('You entered: %s\n' % dlg.GetValue())
dlg.Destroy()
def buttonhand(self, event): # wxGlade: MyFrame1.<event_handler>
print self.tx.GetValue()
def loadchan(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `loadchan' "
try:
self.datapdf
except AttributeError:
print 'no data loaded yet'
dlg = wx.MessageDialog(self, 'I dont think you meant to do that... No Data Loade Yet', 'data error', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
frame1.openfile(event)
frameCHAN.Show()
def loadchan2(self, event):
print "Event handler `loadchan' "
def loadfunction(event):
print 'loading channels', chchoice.GetStringSelection()
pnl2.DestroyChildren()
try:
self.megpath
except AttributeError:
st3 = wx.StaticText(pnl2, 1, 'No Path to Data File')
self.openfile('null')
self.ch = channel.index(self.megpath, chchoice.GetStringSelection())
frm.SetStatusText('Number of Ch Loaded: '+str(self.ch.channelsortedlabels.shape[0]))
st3 = wx.TextCtrl(pnl2, -1, str(self.ch.channelsortedlabels),style=wx.TE_MULTILINE, size=(205,195))
self.tree_ctrl_1.AppendItem(self.MEGPROC,'Channels: '+str(chchoice.GetStringSelection()))
frm = wx.Frame(None, title="Load Channels")
hbox = wx.BoxSizer(wx.VERTICAL)
pnl1 = wx.Panel(frm, -1)
st = wx.StaticText(pnl1, -1, 'Select Channels:')
hbox.Add(st, 1)
chchoice = wx.Choice(pnl1, -1, choices=["meg", "ref", "trig"])
hbox.Add(chchoice,1)
loadbutton = wx.Button(pnl1, -1, 'Load')
hbox.Add(loadbutton, 1)
hbox2 = wx.BoxSizer(wx.VERTICAL)
hbox.Add(hbox2, 0, wx.LEFT | wx.TOP, 10)
pnl1.SetSizer(hbox)
pnl1.Fit()
pnl2 = wx.Panel(frm, -1, (150, 20), (210, 210), style=wx.SUNKEN_BORDER)
st3 = wx.StaticText(pnl2, -1, 'No Channels Loaded Yet')
frm.Show()
frm.Centre()
loadbutton.Bind(wx.EVT_BUTTON, loadfunction)
frm.CreateStatusBar()
def openmri(self, event): # wxGlade: MyFrame1.<event_handler>
from mri import mr2nifti, img
from numpy import ndarray
print "Event handler `openmri'"
try:
self.WORKSPACE #= self.tree_ctrl_1.AddRoot('Workspace')
except AttributeError:
self.WORKSPACE = self.tree_ctrl_1.AddRoot('Workspace')
try:
self.MRI #= self.tree_ctrl_1.AddRoot('Workspace')
except AttributeError:
self.MRI = self.tree_ctrl_1.AppendItem(self.WORKSPACE, 'MRI')
dlg = wx.FileDialog(self, "Select an MRI file", os.getcwd(), "", "*", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.mripath = path = dlg.GetPath()
dlg.Destroy()
print path
self.mrimypath = mypath = os.path.basename(path)
self.SetStatusText("MRI Loaded: %s" % mypath,0)
self.MRDATA = self.tree_ctrl_1.AppendItem(self.MRI, mypath)
self.list_ctrl_1.InsertColumn(0, 'Data')
self.list_ctrl_1.InsertColumn(0, 'Value')
self.list_ctrl_1.SetColumnWidth(0, 140)
self.list_ctrl_1.SetColumnWidth(1, 193)
else:
dlg.Destroy()
return
if str(self.mripath).find('img') != -1: #img
self.mr = self.nim = img.read(str(self.mripath))
print 'reading analyze'
elif str(self.mripath).find('nii.gz') != -1: #nii.gz
print 'reading compressed nifti'
self.mr = self.nim = img.read(str(self.mripath))
print self.mr.pixdim, self.mr.getQForm()
elif str(self.mripath).find('nii') != -1: #nii
print 'reading nifti'
self.mr = self.nim = img.read(str(self.mripath))
else: #try dicom
from mri import pydicom
pathtodicom = os.path.dirname(str(self.mripath))
pre = os.path.basename(self.mripath)[0:2]
self.mr = pydicom.read(pathtodicom, prefix=pre)
text = "Select a save file name"
suffix='.nii.gz'; filter='*.nii.gz'
for i in self.mr.seqdict.keys():
dialog = wx.FileDialog(None, text, os.getcwd(), suffix, filter, wx.SAVE)
if dialog.ShowModal() == wx.ID_OK:
fn = (dialog.GetPaths())
print fn
mr2nifti.start(self.mr, str(fn[0]))
else:
print 'Nothing was choosen'
dialog.Destroy()
return
dataprefix = 'm'
self.HDR = self.tree_ctrl_1.AppendItem(self.MRDATA, 'mr_header')
try:
if type(eval(self.nim.description)[0]) == ndarray: #index points saved here. major hack
self.lpa = eval(self.nim.description)[0]
self.rpa = eval(self.nim.description)[1]
self.nas = eval(self.nim.description)[2]
dlg = wx.MessageDialog(self, 'Your file is already Coregistered', 'MRI file Info', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
except SyntaxError:
dlg = wx.MessageDialog(self, 'Your file is not Coregistered', 'MRI file error', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
#self.VoxDim = abs(sum(self.mr.getQForm()[0:3],axis=1)) #get vox dims from QForm matrix
self.VoxDim = self.mr.voxdim[::-1] #get vox dims from reversed voxdim
def openbrain(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `openbrain'"
try:
self.WORKSPACE #= self.tree_ctrl_1.AddRoot('Workspace')
except AttributeError:
self.WORKSPACE = self.tree_ctrl_1.AddRoot('Workspace')
try:
self.MRI #= self.tree_ctrl_1.AddRoot('Workspace')
except AttributeError:
self.MRI = self.tree_ctrl_1.AppendItem(self.WORKSPACE, 'MRI')
dlg = wx.FileDialog(self, "Select a Analyze Brain Extracted MRI file", os.getcwd(), "", "*.img", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.brainpath = path = dlg.GetPath()
self.brainmypath = mypath = os.path.basename(path)
self.SetStatusText("MRI Brain Loaded: %s" % mypath,0)
self.MRIBrain = self.tree_ctrl_1.AppendItem(frame1.WORKSPACE, 'MRI_Brain')
self.MRDATA = self.tree_ctrl_1.AppendItem(self.MRIBrain, mypath)
#self.MRIPROC = self.tree_ctrl_1.AppendItem(self.MRI, 'Processes')
self.list_ctrl_1.InsertColumn(0, 'Data')
self.list_ctrl_1.InsertColumn(0, 'Value')
self.list_ctrl_1.SetColumnWidth(0, 140)
self.list_ctrl_1.SetColumnWidth(1, 153)
dlg.Destroy()
self.mr = self.brain = img.read(str(self.brainpath))
self.HDR = self.tree_ctrl_1.AppendItem(self.MRDATA, 'mr_header')
def coregistermri(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `coregistermri'"
print self.mripath
try:
self.mripath
except AttributeError:
dlg = wx.MessageDialog(self, 'First you need to load MRI data file', 'MRI file error', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
self.openmri(event)
#viewmri.display(self.nim)
frameCOREG.Show()
frameCOREG.loadmri(event)
def leadfieldgen(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `leadfieldgen'"
try:
self.datapdf# = self.megpath
except AttributeError:
dlg = wx.MessageDialog(self, 'First you need to load MEG data file', 'MEG file error', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
self.openfile(event)
datapdf = self.megpath
try:
self.grid
except AttributeError:
dlg = wx.MessageDialog(self, 'No grid points detected. Do that first.', 'Grid error', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
frame2.Show()
#frame2.gridcalc(event)
self.lf = leadfield.calc(self.datapdf, self.ch, self.grid)
#frame1.LF = frame1.tree_ctrl_1.AppendItem(frame1.MEGDATA, 'grid')
self.LF = self.tree_ctrl_1.AppendItem(self.PROCESSES, 'leadfields')
numlf = shape(self.lf.leadfield)
self.leadfields = self.lf.leadfield
print numlf
self.SetStatusText("LeadFields Calculated: %s" % unicode(numlf))
def sourcespacegrid(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `sourcespacegrid' "
frame2.Show()
def timef(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `timef'"
frametimef.Show()
def plotselected(self, event): # wxGlade: MyFrame1.<event_handler>
from pdf2py import headshape
from pylab import figure, show, plot, imshow, colorbar
from numpy import real
print "Event handler `plotselected'"
selitem = self.tree_ctrl_1.GetItemText(self.itemID)
try:
data2plot = eval('frame1.'+selitem)
except AttributeError:
print "can't find data matching that item"
pass
if selitem == 'data_block':
frame1.numplots = frame1.d.numofepochs
frame1.timeaxis = frame1.d.wintime
self.plot2ddata(event)
if selitem == 'induced_power':
t = self.t
figure();imshow(real(t.P), \
extent=(int(t.timevals[0]), int(t.timevals[-1]), int(t.freqrange[1]), \
int(t.freqrange[0])) \
,aspect='auto')
colorbar()
show()
if selitem == 'induced_powerlog':
t = self.t
figure();imshow(real(t.Plog), \
extent=(int(t.timevals[0]), int(t.timevals[-1]), int(t.freqrange[1]), \
int(t.freqrange[0])) \
,aspect='auto')
colorbar()
show()
if selitem == 'phaselocking_factor':
t = self.t
figure();imshow(abs(t.itcvals), \
extent=(int(t.timevals[0]), int(t.timevals[-1]), int(t.freqrange[1]), \
int(t.freqrange[0])) \
,aspect='auto')
colorbar()
show()
if selitem == 'power_of_continious_data':
t = self.t
figure();imshow(abs(t.tmpallallepochs), \
extent=(int(t.timevals[0]), int(t.timevals[-1]), int(t.freqrange[1]), \
int(t.freqrange[0])) \
,aspect='auto')
colorbar()
show()
if selitem == 'ICA':
figure();plot(self.ica['A'].T);show();
if selitem == 'headshape':
self.plotheadshape(event)
if selitem == 'grid':
plotvtk.display(self.grid)
if selitem == 'Activation Matrix':
figure()
for i in arange(shape(self.ica['A'])[0]):
plot(self.ica['A'][i], label=str(i))
legend()
show()
if selitem == 'Weight Matrix':
megcontour.display(self.ica['S'].T, self.ch.chanlocs, subplot='on')
if selitem == 'projection':
#self.plot2ddata(event)
figure();plot(self.projection);show()
if selitem == 'avg':
#self.plotdata(event, frame1.avg)
self.data2plot = frame1.avg
self.plot2ddata(event)
if selitem == 'epoch':
self.data2plot = frame1.data_blockepoch
frame1.numplots = frameCUT.numofepochs
frame1.timeaxis = frameCUT.timeaxis
self.plot2ddata(frame1.epoch)
if selitem == 'Gradiometer Channels':
self.plotsensors(event)
if selitem == 'Gradiometer Data':
self.plotdata(event, self.d.data_block[:,self.chantypeind == 'meg'])
if selitem == 'Trigger Data':
self.plotdata(event, self.d.data_block[:,self.chantypeind == 'trig'])
if selitem == 'fftpow':
frame1.timeaxis = frame1.fftfreqs
self.plot2ddata(event)
frame1.numplots = 1
frame2DPLOT.checkbox_10.Enable(True)
#figure();plot(frame1.fftfreqs, frame1.fftpow);show()
#figure();plot(self.ica['A'].T);show();
if selitem == 'offset':
frame1.numplots = frame1.d.numofepochs
frame1.timeaxis = frame1.d.wintime
self.plot2ddata(event)
if selitem == 'leadfields':
print 'how the hell do you expect me to plot this?'
if selitem == 'fit':
plotvtk.display(self.fit)
if selitem == 'fitimage':
viewmri.display(frame1.fitimage[0], colormap=cm.hot)
def ica(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `ica' "
frameICA.Show()
def weightfit(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `weightfit' "
frameWEIGHT.Show()
def plotheadshape(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `plotheadshape' "
from meg import plotvtk
plotvtk.display(self.p.hs.hs_point)
def plotsensors(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `plotsensors' "
from meg import sensors, plotvtk
s=sensors.locations(self.datapdf)
plotvtk.display(s.megchlpos)
def plotindex(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `plotindex' "
from meg import plotvtk
from numpy import hstack
ind=hstack([self.p.hs.index_lpa,self.p.hs.index_rpa,self.p.hs.index_nasion]).reshape(3,3)
plotvtk.display(ind)
def plothssensind(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `plothssensind' "
from meg import sensors, plotvtk
from numpy import hstack
ind=hstack([self.p.hs.index_lpa,self.p.hs.index_rpa,self.p.hs.index_nasion]).reshape(3,3)
s=sensors.locations(self.datapdf)
plotvtk.display(self.p.hs.hs_point, s.megchlpos, ind)
def plothsinddips(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `plothsinddips'"
try:
ind=hstack([self.p.hs.index_lpa,self.p.hs.index_rpa,self.p.hs.index_nasion]).reshape(3,3)
except AttributeError:
print 'prerequisite headshape file not found. first load'
dlg = wx.MessageDialog(self, 'First you need to load headshape data file', 'hs file error', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
self.loadhs(event)
ind=hstack([self.p.hs.index_lpa,self.p.hs.index_rpa,self.p.hs.index_nasion]).reshape(3,3)
s = self.points/1000
plotvtk.display(self.p.hs.hs_point, s, ind)
def mri2D(self, event): # wxGlade: MyFrame1.<event_handler>
from mri import viewmri
print "Event handler `mri2D' "
try:
viewmri.display(self.nim)
except AttributeError:
dlg = wx.MessageDialog(self, 'First you need to load MRI data file', 'MRI file error', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
self.openmri(event)
viewmri.display(self.nim)
def mri3D(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `mri3D'"
try:
viewmri.display(self.nim)
except AttributeError:
dlg = wx.MessageDialog(self, 'First you need to load MRI data file', 'MRI file error', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
self.openmri(event)
mr2vtk.convert(self.nim.data, path='~/', filename='tmpmrvtk')
vtkview.vtkrender(d1=os.getenv('HOME')+'/tmpmrvtk.vtk')
def newsession(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `newsession'"
try:
self.WORKSPACE
except AttributeError:
self.WORKSPACE = self.tree_ctrl_1.AddRoot('Workspace')
try:
self.SESSION
self.id = 1
except AttributeError:
self.id = 1
self.SESSION = self.tree_ctrl_1.AppendItem(self.WORKSPACE, 'Session'+str(self.id))
else:
self.id = self.id+1
self.SESSION = self.tree_ctrl_1.AppendItem(self.WORKSPACE, 'Session'+str(self.id))
def saveworkspace(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `saveworkspace' "
self.sessionhandler(event)
def sourceprojection(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `sourceprojection' not implemented"
frameSP.Show()
def cutdata(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `averagedata' not implemented"
frameCUT.Show()
def getevents(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `getevents' "
pass
frameTRIG.Show()
def offsetcorrect(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `offsetcorrect'"
try:
eval('self.'+frame1.selitem)
except AttributeError:
print 'item doesnt exist'
return
frame1.OFFSET = frame1.tree_ctrl_1.AppendItem(frame1.PROCESSES, 'offset')
frame1.offset = offset.correct(eval('self.'+frame1.selitem))
frame1.offsetepochs = frame1.data_blockepochs
def epochdata(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `epochdata' "
#frameEPOCH.Show()
frameCUT.Show()
def plot2ddata(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `plot2ddata' not implemented"
frame2DPLOT.Show()
def recordmacro(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `recordmacro' not implemented"
event.Skip()
def playbackmacro(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `playbackmacro' not implemented"
#/home/danc/vault/decrypted/programming/python/pymeg/gui/e,rfhp1.0Hz,COH
def listitemselected(self, event): # wxGlade: MyFrame1.<event_handler>
self.listind = self.list_ctrl_1.GetFocusedItem()
self.listitem = self.list_ctrl_1.GetItem(self.listind,1).GetText()
self.listdata = self.list_ctrl_1.GetItem(self.listind,0).GetText()
self.SetStatusText("You selected value: %s" % self.listitem, 1)
self.SetStatusText("You selected value: %s" % self.listdata, 2)
self.listitem = str(self.listitem)
self.listdata = str(self.listdata)
def fft(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `fft' "
frameFFT.Show()
#sourcesolution2img.build
def badch(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `badch' "
frameBADCH.Show()
def batch(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `batch' "
#self.openfile(event, batch='/home/danc/vault/decrypted/programming/python/pymeg/gui/data/0868ball/ballbounc3/10%01%08@11:08/1/e,rfDC')
#self.openfile(event, batch='/home/danc/vault/decrypted/programming/python/pymeg/gui/e,rfhp1.0Hz,COH')
#self.openfile(event, batch='/home/danc/python/data/1001/e,rfhp1.0Hz,COH')
self.openfile(event, batch='/home/danc/data/0611/0611piez/e,rfhp1.0Hz,COH')
#self.openfile(event, batch='/home/danc/vault/decrypted/programming/python/pymeg/gui/data/0888nback/c,rfhp0.1Hz')
frameCHAN.signalbutton.SetValue(True)
frameCHAN.getchind(event)
frameCHAN.loadchannels(event)
frame1.selitem = 'data_block'
frameFFT.Show()
frameFFT.radio_box_1.SetSelection(1)
frameFFT.getselected(event)
frameFFT.text_ctrl_11.SetValue('5')
frameFFT.runfft(event)
#frame2DPLOT.Show()
return
frameBADCH.calcbadch(event)
frameBADCH.Show()
frameBADCH.removechsel(event)
def psel(self, event): # wxGlade: MyFrame1.<event_handler>
subprocess.call('psel')
def ape(self, event): # wxGlade: MyFrame1.<event_handler>
subprocess.call('ape')
def tapwin(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `tapwin' "
frameTAPWIN.Show()
def projectutils(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `projectutils' not implemented"
try:
self.WORKSPACE
except AttributeError:
self.WORKSPACE = self.tree_ctrl_1.AddRoot('Workspace')
self.PROJECT = self.tree_ctrl_1.AppendItem(self.WORKSPACE, 'Project_Utils')
self.DUPID = self.tree_ctrl_1.AppendItem(self.PROJECT, 'DiskUsage_by_PID')
self.list_ctrl_1.InsertColumn(0, 'Val')
self.list_ctrl_1.InsertColumn(0, 'Val')
self.list_ctrl_1.InsertColumn(0, 'Data')
self.list_ctrl_1.SetColumnWidth(0, 140)
self.list_ctrl_1.SetColumnWidth(1, 193)
self.list_ctrl_1.SetColumnWidth(2, 140)
def projectstats(self):
statdict = {}
statdict['project'] = 'test'
stage = os.environ['STAGE']
p = subprocess.Popen('du -s '+stage, shell=True, stdout=subprocess.PIPE)
out = p.stdout.readlines()
statdict['Disk Usage'] = str(int(out[0].split('\t')[0])/1000.0)+'MB'
statdict['Disk Allocated'] = '10GB'
statdict['Free Space'] = str(1-(int(out[0].split('\t')[0])/1000.0)/10000)+'%'
projectdu()
try:
for i in statdict:
index = self.list_ctrl_1.InsertStringItem(sys.maxint, i) #num_items, selitem)
self.list_ctrl_1.SetStringItem(index, 1, statdict[i])
num_items = self.list_ctrl_1.GetItemCount()
except UnboundLocalError:
print 'no matching item'
def loaddipolereport(self, event): # wxGlade: MyFrame1.<event_handler>
print "use load Dipole instead`loaddipolereport' "
#~ self.createworkspace()
#~ dlg = wx.FileDialog(self, "Select a Dipole Report file(s)", os.getcwd(), "", "*", wx.MULTIPLE)
#~ datafile = []
#~ if dlg.ShowModal() == wx.ID_OK:
#~ dlg.Destroy()
#~ for i in range(0, len(dlg.GetPaths())):
#~ datafile.append(str(dlg.GetPaths()[i]))
#~ print 'Selected:', datafile
#~ else:
#~ print 'Nothing was selected.'
#~ dlg.Destroy()
def loaddipoles(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `loaddipoles'"
from pdf2py import readwrite,lA2array
self.createworkspace()
from meg import dipole
#datafile = file.open()
dlg = wx.FileDialog(self, "Select a Dipole file(s)", os.getcwd(), "",wildcard = "Dipole File (*lA)|*lA|Dipole Report(*.drf)|*.drf")
#dlg = wx.FileDialog(self, "Select a Dipole file(s)", os.getcwd(), "", "*lA|*txt", wx.MULTIPLE)
datafile = []
if dlg.ShowModal() == wx.ID_OK:
dlg.Destroy()
for i in range(0, len(dlg.GetPaths())):
datafile.append(str(dlg.GetPaths()[i]))
print 'Selected:', datafile
else:
print 'Nothing was selected.'
dlg.Destroy()
self.points = array([])
self.dips = {}
self.dips['params'] = array([])
self.gof = array([])
for i in datafile:
if datafile[0].split(',')[-1] == 'lA': #pdf
lA = lA2array.calc(i)
self.points = append(self.points, lA.dips[:,1:4]*1000) #xyz from meters to mm
else:
lA = dipole.parsereport(i)
self.points = append(self.points, lA.dips[:,1:4]*10) #xyz from cm to mm
self.dips['params'] = append(self.dips['params'], lA.dips[:,:]) #xyz in
sz = size(lA.dips[:,:],1)
print shape(self.points)
gof_ind = lA.labels.index('GoF')
self.gof = append(self.gof, lA.dips[:,gof_ind])
self.points = self.points.reshape(len(self.points)/3,3)
self.dips['params'] = self.dips['params'].reshape(len(self.dips['params'])/sz,sz)
self.dips['labels'] = lA.labels
readwrite.writedata(self.dips, os.path.dirname(i)+'/'+'ALLDIPS')
#~ if datafile[0].split(',')[-1] == 'lA': #pdf
#~
#~
#~
#~ self.points = array([])
#~ self.dips = {}
#~ self.dips['params'] = array([])
#~ self.gof = array([])
#~ for i in datafile:
#~ lA = lA2array.calc(i)
#~ #readwrite.writedata(lA, os.path.dirname(i)+'/'+os.path.basename(i))
#~ self.points = append(self.points, lA.dips[:,1:4]*1000) #xyz in mm
#~ self.dips['params'] = append(self.dips['params'], lA.dips[:,:]) #xyz in
#~ sz = size(lA.dips[:,:],1)
#~ print shape(self.points)
#~ gof_ind = lA.labels.index('GoF')
#~ self.gof = append(self.gof, lA.dips[:,gof_ind])
#~ #self.dips = array(self.points)
#~ self.points = self.points.reshape(len(self.points)/3,3)
#~ self.dips['params'] = self.dips['params'].reshape(len(self.dips['params'])/sz,sz)
#~ self.dips['labels'] = lA.labels
#~ readwrite.writedata(self.dips, os.path.dirname(i)+'/'+'ALLDIPS')
#~
#~ #figure();plot(self.points);show()
#~
#~ else:#OR dipole report
#~ for i in datafile:
#~ lA = dipole.parsereport(i)
#~ self.points = append(self.points, lA.dips[:,1:4]*1000) #xyz in mm
self.DIPOLES = self.tree_ctrl_1.AppendItem(self.WORKSPACE, 'dipoles')
def dipoledensity(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `dipoledensity'"
try: self.points
except AttributeError:
print 'prerequisite dipole file not found'
self.loaddipoles(event)
try: self.mr
except AttributeError:
print 'prerequisite mri file not found'
self.openmri(event)
try:
self.lpa
except AttributeError:
dlg = wx.MessageDialog(self, 'Your file is not Coregistered', 'MRI file error', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
#dlg.Destroy()
frameCOREG.Show()
frameCOREG.loadmri(event)
frameDENSITY.lpa_loc.SetLabel(str(frame1.lpa))
frameDENSITY.rpa_loc.SetLabel(str(frame1.rpa))
frameDENSITY.nas_loc.SetLabel(str(frame1.nas))
frameDENSITY.numdipolesval.SetLabel(str(size(frame1.points,0)))
frameDENSITY.Show()
def loadhs(self, event): # wxGlade: MyFrame1.<event_handler>
print "Event handler `loadhs'"
dlg = wx.FileDialog(self, "Select a HS file", os.getcwd(), "", "*", wx.OPEN)
print dlg.GetPath()
if dlg.ShowModal() == wx.ID_OK:
hsfile = dlg.GetPath()
self.p = headshape.read(str(hsfile))
self.p.hs = self.p
dlg.Destroy
# end of class MyFrame1
if __name__ == "__main__":
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
frame1 = MyFrame1(None, -1, "")
app.SetTopWindow(frame1)
frame1.Show()
frameGUAGE = Guage(None, -1, "")
#frameGUAGE.Show()
frame2 = MyFrameGrid(None, -1, "")
frametimef = MyFrame(None, -1, "")
frameICA = MyFrameICA(None, -1, "")
frameWEIGHT = MyFrameWEIGHTFIT(None, -1, "")
frameCOREG = MyFrameCOREG(None, -1, "")
frameSP = MyFrameSP(None, -1, "")
frameCH = MyFrameCH(None, -1, "")
frameTRIG = MyFrameTRIG(None, -1, "")
frameCUT = MyFrameCUT(None, -1, "")
frameEPOCH = MyFrameEPOCH(None, -1, "")
frameCHAN = MyFrameCHAN(None, -1, "")
frame2DPLOT = MyFrame2DPLOT(None, -1, "")
frameFFT = MyFrameFFT(None, -1, "")
frameBADCH = MyFrameBADCH(None, -1, "")
frameTAPWIN = TAPWIN(None, -1, "")
frameDENSITY = MyFrameDENSITY(None, -1, "")
#frameDENSITY.Show()
#frame1.batch(None)
app.MainLoop()
|
badbytes/pymeg
|
gui/wx/PyMEG.py
|
Python
|
gpl-3.0
| 150,866
|
[
"VTK"
] |
0472c48e61ea04f49de83e84068a0f49d58c80f8d2edb1104c057a0569869c40
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import absolute_import
from numpy.testing import assert_
import functools
from MDAnalysis.tests.datafiles import (
TPR,
TPR400, TPR402, TPR403, TPR404, TPR405, TPR406, TPR407,
TPR450, TPR451, TPR452, TPR453, TPR454, TPR455, TPR455Double,
TPR460, TPR461, TPR502, TPR504, TPR505, TPR510, TPR510_bonded,
TPR2016, TPR2016_bonded,
)
from MDAnalysisTests.topology.base import ParserBase
import MDAnalysis.topology.TPRParser
class TPRAttrs(ParserBase):
parser = MDAnalysis.topology.TPRParser.TPRParser
expected_attrs = ['ids', 'names', 'resids', 'resnames']
guessed_attrs = ['elements']
class TestTPR(TPRAttrs):
"""
this test the data/adk_oplsaa.tpr which is of tpx version 58
"""
filename = TPR
expected_n_atoms = 47681
expected_n_residues = 11302
expected_n_segments = 3
# The follow test the same system grompped by different version of gromacs
# FORMAT: TPRABC, where numbers ABC indicates the version of gromacs that
# generates the corresponding tpr file
class TPRBase(TPRAttrs):
expected_n_atoms = 2263
expected_n_residues = 230
expected_n_segments = 2
# All these classes should be generated in a loop. Yet, nose test generation
# seems to work only with functions, and not with classes.
class TestTPR400(TPRBase):
filename = TPR400
class TestTPR402(TPRBase):
filename = TPR402
class TestTPR403(TPRBase):
filename = TPR403
class TestTPR404(TPRBase):
filename = TPR404
class TestTPR405(TPRBase):
filename = TPR405
class TestTPR406(TPRBase):
filename = TPR406
class TestTPR407(TPRBase):
filename = TPR407
class TestTPR450(TPRBase):
filename = TPR450
class TestTPR451(TPRBase):
filename = TPR451
class TestTPR452(TPRBase):
filename = TPR452
class TestTPR453(TPRBase):
filename = TPR453
class TestTPR454(TPRBase):
filename = TPR454
class TestTPR455(TPRBase):
filename = TPR455
class TPRDouble(TPRAttrs):
expected_n_atoms = 21692
expected_n_residues = 4352
expected_n_segments = 7
class TestTPR455Double(TPRDouble):
filename = TPR455Double
class TPR46xBase(TPRAttrs):
expected_n_atoms = 44052
expected_n_residues = 10712
expected_n_segments = 8
class TestTPR460(TPR46xBase):
filename = TPR460
class TestTPR461(TPR46xBase):
filename = TPR461
class TestTPR502(TPRBase):
filename = TPR502
class TestTPR504(TPRBase):
filename = TPR504
class TestTPR505(TPRBase):
filename = TPR505
class TestTPR510(TPRBase):
filename = TPR510
class TPR2016(TPRBase):
filename = TPR2016
def _test_is_in_topology(name, elements, topology_path, topology_section):
"""
Test if an interaction appears as expected in the topology
"""
universe = MDAnalysis.Universe(topology_path)
parser = MDAnalysis.topology.TPRParser.TPRParser(topology_path)
top = parser.parse()
for element in elements:
assert_(element in getattr(top, topology_section).values,
'Interaction type "{}" not found'.format(name))
def test_all_bonds():
"""Test that all bond types are parsed as expected"""
topologies = (TPR510_bonded, TPR2016_bonded)
bonds = {'BONDS':[(0, 1)], 'G96BONDS':[(1, 2)], 'MORSE':[(2, 3)],
'CUBICBONDS':[(3, 4)], 'CONNBONDS':[(4, 5)], 'HARMONIC':[(5, 6)],
'FENEBONDS':[(6, 7)], 'RESTRAINTPOT':[(7, 8)],
'TABBONDS':[(8, 9)], 'TABBONDSNC':[(9, 10)],
'CONSTR':[(10, 11)], 'CONSTRNC':[(11, 12)],}
bond_type_in_topology = functools.partial(_test_is_in_topology,
topology_section='bonds')
for topology in topologies:
for bond_type, elements in bonds.items():
yield (bond_type_in_topology, bond_type, elements, topology)
def test_all_angles():
topologies = (TPR510_bonded, TPR2016_bonded)
angles = {'ANGLES':[(0, 1, 2)], 'G96ANGLES':[(1, 2, 3)],
'CROSS_BOND_BOND':[(2, 3, 4)], 'CROSS_BOND_ANGLE':[(3, 4, 5)],
'UREY_BRADLEY':[(4, 5, 6)], 'QANGLES':[(5, 6, 7)],
'RESTRANGLES':[(6, 7, 8)], 'TABANGLES':[(7, 8, 9)],}
angle_type_in_topology = functools.partial(_test_is_in_topology,
topology_section='angles')
for topology in topologies:
for angle_type, elements in angles.items():
yield (angle_type_in_topology, angle_type, elements, topology)
def test_all_dihedrals():
topologies = (TPR510_bonded, TPR2016_bonded)
dihs = {'PDIHS':[(0, 1, 2, 3), (1, 2, 3, 4), (7, 8, 9, 10)],
'RBDIHS':[(4, 5, 6, 7)], 'RESTRDIHS':[(8, 9, 10, 11)],
'CBTDIHS':[(9, 10, 11, 12)], 'FOURDIHS':[(6, 7, 8, 9)],
'TABDIHS':[(10, 11, 12, 13)],}
dih_type_in_topology = functools.partial(_test_is_in_topology,
topology_section='dihedrals')
for topology in topologies:
for dih_type, elements in dihs.items():
yield (dih_type_in_topology, dih_type, elements, topology)
def test_all_impropers():
topologies = (TPR510_bonded, TPR2016_bonded)
imprs = {'IDIHS':[(2, 3, 4, 5), (3, 4, 5, 6)], 'PIDIHS':[(5, 6, 7, 8)]}
impr_type_in_topology = functools.partial(_test_is_in_topology,
topology_section='impropers')
for topology in topologies:
for impr_type, elements in imprs.items():
yield (impr_type_in_topology, impr_type, elements, topology)
|
kain88-de/mdanalysis
|
testsuite/MDAnalysisTests/topology/test_tprparser.py
|
Python
|
gpl-2.0
| 6,550
|
[
"Gromacs",
"MDAnalysis"
] |
5ec39900b18981f5c82f557d19fbfb33bd50cc9e08c19c982dfc2fe65271be98
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
This module contains tests for the SongShow Plus song importer.
"""
import os
from unittest import TestCase
from mock import patch, MagicMock
from openlp.plugins.songs.lib import VerseType
from openlp.plugins.songs.lib.foilpresenterimport import FoilPresenter
TEST_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', '/resources/foilpresentersongs'))
class TestFoilPresenter(TestCase):
"""
Test the functions in the :mod:`foilpresenterimport` module.
"""
#TODO: The following modules still need tests written for
# xml_to_song
# _child
# _process_authors
# _process_cclinumber
# _process_comments
# _process_copyright
# _process_lyrics
# _process_songbooks
# _process_titles
# _process_topics
def setUp(self):
self.child_patcher = patch('openlp.plugins.songs.lib.foilpresenterimport.FoilPresenter._child')
self.clean_song_patcher = patch('openlp.plugins.songs.lib.foilpresenterimport.clean_song')
self.objectify_patcher = patch('openlp.plugins.songs.lib.foilpresenterimport.objectify')
self.process_authors_patcher = \
patch('openlp.plugins.songs.lib.foilpresenterimport.FoilPresenter._process_authors')
self.process_cclinumber_patcher = \
patch('openlp.plugins.songs.lib.foilpresenterimport.FoilPresenter._process_cclinumber')
self.process_comments_patcher = \
patch('openlp.plugins.songs.lib.foilpresenterimport.FoilPresenter._process_comments')
self.process_lyrics_patcher = \
patch('openlp.plugins.songs.lib.foilpresenterimport.FoilPresenter._process_lyrics')
self.process_songbooks_patcher = \
patch('openlp.plugins.songs.lib.foilpresenterimport.FoilPresenter._process_songbooks')
self.process_titles_patcher = \
patch('openlp.plugins.songs.lib.foilpresenterimport.FoilPresenter._process_titles')
self.process_topics_patcher = \
patch('openlp.plugins.songs.lib.foilpresenterimport.FoilPresenter._process_topics')
self.re_patcher = patch('openlp.plugins.songs.lib.foilpresenterimport.re')
self.song_patcher = patch('openlp.plugins.songs.lib.foilpresenterimport.Song')
self.song_xml_patcher = patch('openlp.plugins.songs.lib.foilpresenterimport.SongXML')
self.translate_patcher = patch('openlp.plugins.songs.lib.foilpresenterimport.translate')
self.mocked_child = self.child_patcher.start()
self.mocked_clean_song = self.clean_song_patcher.start()
self.mocked_objectify = self.objectify_patcher.start()
self.mocked_process_authors = self.process_authors_patcher.start()
self.mocked_process_cclinumber = self.process_cclinumber_patcher.start()
self.mocked_process_comments = self.process_comments_patcher.start()
self.mocked_process_lyrics = self.process_lyrics_patcher.start()
self.mocked_process_songbooks = self.process_songbooks_patcher.start()
self.mocked_process_titles = self.process_titles_patcher.start()
self.mocked_process_topics = self.process_topics_patcher.start()
self.mocked_re = self.re_patcher.start()
self.mocked_song = self.song_patcher.start()
self.mocked_song_xml = self.song_xml_patcher.start()
self.mocked_translate = self.translate_patcher.start()
self.mocked_child.return_value = 'Element Text'
self.mocked_translate.return_value = 'Translated String'
self.mocked_manager = MagicMock()
self.mocked_song_import = MagicMock()
def tearDown(self):
self.child_patcher.stop()
self.clean_song_patcher.stop()
self.objectify_patcher.stop()
self.process_authors_patcher.stop()
self.process_cclinumber_patcher.stop()
self.process_comments_patcher.stop()
self.process_lyrics_patcher.stop()
self.process_songbooks_patcher.stop()
self.process_titles_patcher.stop()
self.process_topics_patcher.stop()
self.re_patcher.stop()
self.song_patcher.stop()
self.song_xml_patcher.stop()
self.translate_patcher.stop()
def create_foil_presenter_test(self):
"""
Test creating an instance of the FoilPresenter class
"""
# GIVEN: A mocked out "manager" and "SongImport" instance
mocked_manager = MagicMock()
mocked_song_import = MagicMock()
# WHEN: An FoilPresenter instance is created
foil_presenter_instance = FoilPresenter(mocked_manager, mocked_song_import)
# THEN: The instance should not be None
self.assertIsNotNone(foil_presenter_instance, 'FoilPresenter instance should not be none')
def no_xml_test(self):
"""
Test calling xml_to_song with out the xml argument
"""
# GIVEN: A mocked out "manager" and "SongImport" as well as an foil_presenter instance
mocked_manager = MagicMock()
mocked_song_import = MagicMock()
foil_presenter_instance = FoilPresenter(mocked_manager, mocked_song_import)
# WHEN: xml_to_song is called without valid an argument
for arg in [None, False, 0, '']:
result = foil_presenter_instance.xml_to_song(arg)
# Then: xml_to_song should return False
self.assertEqual(result, None, 'xml_to_song should return None when called with %s' % arg)
def encoding_declaration_removal_test(self):
"""
Test that the encoding declaration is removed
"""
# GIVEN: A reset mocked out re and an instance of foil_presenter
self.mocked_re.reset()
foil_presenter_instance = FoilPresenter(self.mocked_manager, self.mocked_song_import)
# WHEN: xml_to_song is called with a string with an xml encoding declaration
foil_presenter_instance.xml_to_song('<?xml version="1.0" encoding="UTF-8"?>\n<foilpresenterfolie>')
# THEN: the xml encoding declaration should have been stripped
self.mocked_re.compile.sub.called_with('\n<foilpresenterfolie>')
def no_encoding_declaration_test(self):
"""
Check that the xml sting is left intact when no encoding declaration is made
"""
# GIVEN: A reset mocked out re and an instance of foil_presenter
self.mocked_re.reset()
foil_presenter_instance = FoilPresenter(self.mocked_manager, self.mocked_song_import)
# WHEN: xml_to_song is called with a string without an xml encoding declaration
foil_presenter_instance.xml_to_song('<foilpresenterfolie>')
# THEN: the string shiuld have been left intact
self.mocked_re.compile.sub.called_with('<foilpresenterfolie>')
def process_lyrics_no_verses_test(self):
"""
Test that _process_lyrics handles song files that have no verses.
"""
# GIVEN: A mocked foilpresenterfolie with no attribute strophe, a mocked song and a
# foil presenter instance
self.process_lyrics_patcher.stop()
self.mocked_song_xml.reset()
mock_foilpresenterfolie = MagicMock()
del mock_foilpresenterfolie.strophen.strophe
mocked_song = MagicMock()
foil_presenter_instance = FoilPresenter(self.mocked_manager, self.mocked_song_import)
# WHEN: _process_lyrics is called
result = foil_presenter_instance._process_lyrics(mock_foilpresenterfolie, mocked_song)
# THEN: _process_lyrics should return None and the song_import logError method should have been called once
self.assertIsNone(result)
self.mocked_song_import.logError.assert_called_once_with('Element Text', 'Translated String')
self.process_lyrics_patcher.start()
|
marmyshev/item_title
|
tests/functional/openlp_plugins/songs/test_foilpresenterimport.py
|
Python
|
gpl-2.0
| 9,878
|
[
"Brian"
] |
1a1b36c85bc2dc4d65d156c1c673cbacd652348ace998aeaebc29550a1c4e423
|
########################################################################
# $HeadURL$
########################################################################
""" DIRAC FileCatalog plugin class to manage file metadata. This contains only
non-indexed metadata for the moment.
"""
__RCSID__ = "$Id$"
# import time
import types
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.Utilities import queryTime
from DIRAC.Core.Utilities.List import intListToString
FILE_STANDARD_METAKEYS = [ 'SE', 'CreationDate', 'ModificationDate', 'LastAccessDate', 'User'
'Group', 'Path', 'Name' ]
class FileMetadata:
_tables = {}
_tables["FC_FileMeta"] = { "Fields": {
"FileID": "INTEGER NOT NULL",
"MetaKey": "VARCHAR(31) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT 'Noname'",
"MetaValue": "VARCHAR(31) NOT NULL DEFAULT 'Noname'"
},
"UniqueIndexes": { "FileID": ["MetaKey"] }
}
_tables["FC_FileMetaFields"] = { "Fields": {
"MetaID": "INT AUTO_INCREMENT",
"MetaName": "VARCHAR(64) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL",
"MetaType": "VARCHAR(128) NOT NULL"
},
"PrimaryKey": "MetaID"
}
def __init__(self,database = None):
self.db = None
if database is not None:
self.setDatabase( database )
def setDatabase( self, database ):
self.db = database
result = self.db._createTables( self._tables )
if not result['OK']:
gLogger.error( "Failed to create tables", str( self._tables.keys() ) )
elif result['Value']:
gLogger.info( "Tables created: %s" % ','.join( result['Value'] ) )
return result
##############################################################################
#
# Manage Metadata fields
#
##############################################################################
def addMetadataField( self, pname, ptype, credDict ):
""" Add a new metadata parameter to the Metadata Database.
pname - parameter name, ptype - parameter type in the MySQL notation
"""
if pname in FILE_STANDARD_METAKEYS:
return S_ERROR( 'Illegal use of reserved metafield name' )
result = self.db.dmeta.getMetadataFields( credDict )
if not result['OK']:
return result
if pname in result['Value'].keys():
return S_ERROR( 'The metadata %s is already defined for Directories' % pname )
result = self.getFileMetadataFields( credDict )
if not result['OK']:
return result
if pname in result['Value'].keys():
if ptype.lower() == result['Value'][pname].lower():
return S_OK( 'Already exists' )
else:
return S_ERROR( 'Attempt to add an existing metadata with different type: %s/%s' %
( ptype, result['Value'][pname] ) )
valueType = ptype
if ptype == "MetaSet":
valueType = "VARCHAR(64)"
req = "CREATE TABLE FC_FileMeta_%s ( FileID INTEGER NOT NULL, Value %s, PRIMARY KEY (FileID), INDEX (Value) )" \
% ( pname, valueType )
result = self.db._query( req )
if not result['OK']:
return result
result = self.db._insert( 'FC_FileMetaFields', ['MetaName', 'MetaType'], [pname, ptype] )
if not result['OK']:
return result
metadataID = result['lastRowId']
result = self.__transformMetaParameterToData( pname )
if not result['OK']:
return result
return S_OK( "Added new metadata: %d" % metadataID )
def deleteMetadataField( self, pname, credDict ):
""" Remove metadata field
"""
req = "DROP TABLE FC_FileMeta_%s" % pname
result = self.db._update( req )
error = ''
if not result['OK']:
error = result["Message"]
req = "DELETE FROM FC_FileMetaFields WHERE MetaName='%s'" % pname
result = self.db._update( req )
if not result['OK']:
if error:
result["Message"] = error + "; " + result["Message"]
return result
def getFileMetadataFields( self, credDict ):
""" Get all the defined metadata fields
"""
req = "SELECT MetaName,MetaType FROM FC_FileMetaFields"
result = self.db._query( req )
if not result['OK']:
return result
metaDict = {}
for row in result['Value']:
metaDict[row[0]] = row[1]
return S_OK( metaDict )
###########################################################
#
# Set and get metadata for files
#
###########################################################
def setMetadata( self, path, metadict, credDict ):
""" Set the value of a given metadata field for the the given directory path
"""
result = self.getFileMetadataFields( credDict )
if not result['OK']:
return result
metaFields = result['Value']
result = self.db.fileManager._findFiles( [path] )
if not result['OK']:
return result
if result['Value']['Successful']:
fileID = result['Value']['Successful'][path]['FileID']
else:
return S_ERROR( 'File %s not found' % path )
for metaName, metaValue in metadict.items():
if not metaName in metaFields:
result = self.__setFileMetaParameter( fileID, metaName, metaValue, credDict )
else:
result = self.db._insert( 'FC_FileMeta_%s' % metaName, ['FileID', 'Value'], [fileID, metaValue] )
if not result['OK']:
if result['Message'].find( 'Duplicate' ) != -1:
req = "UPDATE FC_FileMeta_%s SET Value='%s' WHERE FileID=%d" % ( metaName, metaValue, fileID )
result = self.db._update( req )
if not result['OK']:
return result
else:
return result
return S_OK()
def removeMetadata( self, path, metadata, credDict ):
""" Remove the specified metadata for the given file
"""
result = self.getFileMetadataFields( credDict )
if not result['OK']:
return result
metaFields = result['Value']
result = self.db.fileManager._findFiles( [path] )
if not result['OK']:
return result
if result['Value']['Successful']:
fileID = result['Value']['Successful'][path]['FileID']
else:
return S_ERROR( 'File %s not found' % path )
failedMeta = {}
for meta in metadata:
if meta in metaFields:
# Indexed meta case
req = "DELETE FROM FC_FileMeta_%s WHERE FileID=%d" % ( meta, fileID )
result = self.db._update( req )
if not result['OK']:
failedMeta[meta] = result['Value']
else:
# Meta parameter case
req = "DELETE FROM FC_FileMeta WHERE MetaKey='%s' AND FileID=%d" % ( meta, fileID )
result = self.db._update( req )
if not result['OK']:
failedMeta[meta] = result['Value']
if failedMeta:
metaExample = failedMeta.keys()[0]
result = S_ERROR( 'Failed to remove %d metadata, e.g. %s' % ( len( failedMeta ), failedMeta[metaExample] ) )
result['FailedMetadata'] = failedMeta
else:
return S_OK()
def __getFileID( self, path ):
result = self.db.fileManager._findFiles( [path] )
if not result['OK']:
return result
if result['Value']['Successful']:
fileID = result['Value']['Successful'][path]['FileID']
else:
return S_ERROR( 'File not found' )
return S_OK( fileID )
def __setFileMetaParameter( self, fileID, metaName, metaValue, credDict ):
""" Set an meta parameter - metadata which is not used in the the data
search operations
"""
result = self.db._insert( 'FC_FileMeta',
['FileID', 'MetaKey', 'MetaValue'],
[fileID, metaName, str( metaValue )] )
return result
def setFileMetaParameter( self, path, metaName, metaValue, credDict ):
result = self.__getFileID( path )
if not result['OK']:
return result
fileID = result['Value']
return self.__setFileMetaParameter( fileID, metaName, metaValue, credDict )
def _getFileUserMetadataByID( self, fileIDList, credDict, connection = False ):
""" Get file user metadata for the list of file IDs
"""
# First file metadata
result = self.getFileMetadataFields( credDict )
if not result['OK']:
return result
metaFields = result['Value']
stringIDs = ','.join( [ '%s' % fId for fId in fileIDList ] )
metaDict = {}
for meta in metaFields:
req = "SELECT Value,FileID FROM FC_FileMeta_%s WHERE FileID in (%s)" % ( meta, stringIDs )
result = self.db._query( req, conn = connection )
if not result['OK']:
return result
for value, fileID in result['Value']:
metaDict.setdefault( fileID, {} )
metaDict[fileID][meta] = value
req = "SELECT FileID,MetaKey,MetaValue from FC_FileMeta where FileID in (%s)" % stringIDs
result = self.db._query( req, conn = connection )
if not result['OK']:
return result
for fileID, key, value in result['Value']:
metaDict.setdefault( fileID, {} )
metaDict[fileID][key] = value
return S_OK( metaDict )
def getFileUserMetadata( self, path, credDict ):
""" Get metadata for the given file
"""
# First file metadata
result = self.getFileMetadataFields( credDict )
if not result['OK']:
return result
metaFields = result['Value']
result = self.__getFileID( path )
if not result['OK']:
return result
fileID = result['Value']
metaDict = {}
metaTypeDict = {}
for meta in metaFields:
req = "SELECT Value,FileID FROM FC_FileMeta_%s WHERE FileID=%d" % ( meta, fileID )
result = self.db._query( req )
if not result['OK']:
return result
if result['Value']:
metaDict[meta] = result['Value'][0][0]
metaTypeDict[meta] = metaFields[meta]
result = self.getFileMetaParameters( path, credDict )
if result['OK']:
metaDict.update( result['Value'] )
for meta in result['Value']:
metaTypeDict[meta] = 'NonSearchable'
result = S_OK( metaDict )
result['MetadataType'] = metaTypeDict
return result
def __getFileMetaParameters( self, fileID, credDict ):
req = "SELECT FileID,MetaKey,MetaValue from FC_FileMeta where FileID=%d " % fileID
result = self.db._query( req )
if not result['OK']:
return result
if not result['Value']:
return S_OK( {} )
metaDict = {}
for fileID, key, value in result['Value']:
if metaDict.has_key( key ):
if type( metaDict[key] ) == types.ListType:
metaDict[key].append( value )
else:
metaDict[key] = [metaDict[key]].append( value )
else:
metaDict[key] = value
return S_OK( metaDict )
def getFileMetaParameters( self, path, credDict ):
""" Get meta parameters for the given file
"""
result = self.__getFileID( path )
if not result['OK']:
return result
fileID = result['Value']
return self.__getFileMetaParameters( fileID, credDict )
def __transformMetaParameterToData( self, metaname ):
""" Relocate the meta parameters of all the directories to the corresponding
indexed metadata table
"""
req = "SELECT FileID,MetaValue from FC_FileMeta WHERE MetaKey='%s'" % metaname
result = self.db._query( req )
if not result['OK']:
return result
if not result['Value']:
return S_OK()
insertValueList = []
for fileID, meta in result['Value']:
insertValueList.append( "( %d,'%s' )" % ( fileID, meta ) )
req = "INSERT INTO FC_FileMeta_%s (FileID,Value) VALUES %s" % ( metaname, ', '.join( insertValueList ) )
result = self.db._update( req )
if not result['OK']:
return result
req = "DELETE FROM FC_FileMeta WHERE MetaKey='%s'" % metaname
result = self.db._update( req )
return result
def __createMetaSelection( self, meta, value, table = '' ):
if type( value ) == types.DictType:
selectList = []
for operation, operand in value.items():
if operation in ['>', '<', '>=', '<=']:
if type( operand ) == types.ListType:
return S_ERROR( 'Illegal query: list of values for comparison operation' )
if type( operand ) in [types.IntType, types.LongType]:
selectList.append( "%sValue%s%d" % ( table, operation, operand ) )
elif type( operand ) == types.FloatType:
selectList.append( "%sValue%s%f" % ( table, operation, operand ) )
else:
selectList.append( "%sValue%s'%s'" % ( table, operation, operand ) )
elif operation == 'in' or operation == "=":
if type( operand ) == types.ListType:
vString = ','.join( [ "'" + str( x ) + "'" for x in operand] )
selectList.append( "%sValue IN (%s)" % ( table, vString ) )
else:
selectList.append( "%sValue='%s'" % ( table, operand ) )
elif operation == 'nin' or operation == "!=":
if type( operand ) == types.ListType:
vString = ','.join( [ "'" + str( x ) + "'" for x in operand] )
selectList.append( "%sValue NOT IN (%s)" % ( table, vString ) )
else:
selectList.append( "%sValue!='%s'" % ( table, operand ) )
selectString = ' AND '.join( selectList )
elif type( value ) == types.ListType:
vString = ','.join( [ "'" + str( x ) + "'" for x in value] )
selectString = "%sValue in %s" % ( table, vString )
else:
if value == "Any":
selectString = ''
else:
selectString = "%sValue='%s' " % ( table, value )
return S_OK( selectString )
def __findFilesForMetaValue( self, meta, value, dirList ):
""" Find files in the given list of directories corresponding to the given
selection criteria
"""
result = self.__createMetaSelection( meta, value, "M." )
if not result['OK']:
return result
selectString = result['Value']
dirString = ','.join( [ str( x ) for x in dirList] )
req = " SELECT F.FileID, F.DirID FROM FC_FileMeta_%s AS M, FC_Files AS F" % meta
if dirString:
req += " WHERE F.DirID in (%s)" % dirString
if selectString:
if dirString:
req += " AND %s AND F.FileID=M.FileID" % selectString
else:
req += " WHERE %s AND F.FileID=M.FileID" % selectString
result = self.db._query( req )
if not result['OK']:
return result
if not result['Value']:
return S_OK( [] )
fileList = []
for row in result['Value']:
fileID = row[0]
fileList.append( fileID )
return S_OK( fileList )
def __findFilesForSE( self, se, dirList ):
""" Find files in the given list of directories having replicas in the given se(s)
"""
seList = se
if type( se ) in types.StringTypes:
seList = [se]
seIDs = []
for se in seList:
result = self.db.seManager.getSEID( se )
if not result['OK']:
return result
seIDs.append( result['Value'] )
seString = intListToString( seIDs )
dirString = intListToString( dirList )
req = "SELECT F.FileID FROM FC_Files as F, FC_Replicas as R WHERE F.DirID IN (%s)" % dirString
req += " AND R.SEID IN (%s) AND F.FileID=R.FileID" % seString
result = self.db._query( req )
if not result['OK']:
return result
if not result['Value']:
return S_OK( [] )
fileList = []
for row in result['Value']:
fileID = row[0]
fileList.append( fileID )
return S_OK( fileList )
def __findFilesForStandardMetaValue( self, meta, value, dirList ):
""" Find files in the given list of directories corresponding to the given
selection criteria using standard file metadata
"""
return S_OK( [] )
def __buildSEQuery( self, storageElement ):
""" Return a tuple with table and condition to locate files in a given SE
"""
if not storageElement:
return S_OK( [] )
result = self.db.seManager.getSEID( storageElement )
if not result['OK']:
return result
seID = result['Value']
table = 'FC_Replicas'
query = '%%s.SEID = %s' % seID
return S_OK( [ ( table, query ) ] )
def __buildUserMetaQuery( self, userMetaDict ):
""" Return a list of tuples with tables and conditions to locate files for a given user Metadata
"""
if not userMetaDict:
return S_OK( [] )
result = []
for meta, value in userMetaDict.items():
table = 'FC_FileMeta_%s' % meta
if type( value ) in types.StringTypes and value.lower() == 'any':
# 'ANY'
query = ''
result.append( ( table, query ) )
elif type( value ) == types.ListType:
if not value:
query = ''
result.append( ( table, query ) )
else:
escapeValues = self.db._escapeValues( value )
if not escapeValues['OK']:
return escapeValues
query = '%%s.Value IN ( %s )' % ', '.join( escapeValues['Value'] )
result.append( ( table, query ) )
elif type( value ) == types.DictType:
for operation, operand in value.items():
if type( operand ) == types.ListType:
escapeValues = self.db._escapeValues( operand )
if not escapeValues['OK']:
return escapeValues
escapedOperand = ', '.join( escapeValues['Value'] )
elif type( operand ) in [types.IntType, types.LongType]:
escapedOperand = '%d' % operand
elif type( operand ) == types.FloatType:
escapedOperand = '%f' % operand
else:
escapedOperand = self.db._escapeString( operand )
if not escapedOperand['OK']:
return escapedOperand
escapedOperand = escapedOperand['Value']
if operation in ['>', '<', '>=', '<=']:
if type( operand ) == types.ListType:
return S_ERROR( 'Illegal query: list of values for comparison operation' )
else:
query = '%%s.Value %s %s' % ( operation, escapedOperand )
result.append( ( table, query ) )
elif operation == 'in' or operation == "=":
if type( operand ) == types.ListType:
query = '%%s.Value IN ( %s )' % escapedOperand
result.append( ( table, query ) )
else:
query = '%%s.Value = %s' % escapedOperand
result.append( ( table, query ) )
elif operation == 'nin' or operation == "!=":
if type( operand ) == types.ListType:
query = '%%s.Value NOT IN ( %s )' % escapedOperand
result.append( ( table, query ) )
else:
query = '%%s.Value != %s' % escapedOperand
result.append( ( table, query ) )
else:
escapedValue = self.db._escapeString( value )
if not escapedValue['OK']:
return escapedValue
query = '%%s.Value = %s' % escapedValue['Value']
result.append( ( table, query ) )
return S_OK( result )
def __buildStandardMetaQuery( self, standardMetaDict ):
result = []
return S_OK( result )
def __findFilesByMetadata( self, metaDict, dirList, credDict ):
""" Find a list of file IDs meeting the metaDict requirements and belonging
to directories in dirList
"""
# 1.- classify Metadata keys
storageElement = None
standardMetaDict = {}
userMetaDict = {}
for meta, value in metaDict.items():
if meta == "SE":
storageElement = value
elif meta in FILE_STANDARD_METAKEYS:
standardMetaDict[meta] = value
else:
userMetaDict[meta] = value
tablesAndConditions = []
# 2.- standard search
result = self.__buildStandardMetaQuery( standardMetaDict )
if not result['OK']:
return result
tablesAndConditions.extend( result['Value'] )
# 3.- user search
result = self.__buildUserMetaQuery( userMetaDict )
if not result['OK']:
return result
tablesAndConditions.extend( result['Value'] )
# 4.- SE constrain
result = self.__buildSEQuery( storageElement )
if not result['OK']:
return result
tablesAndConditions.extend( result['Value'] )
query = 'SELECT F.FileID FROM '
conditions = []
tables = [ 'FC_Files as F' ]
if dirList:
dirString = intListToString( dirList )
conditions.append( "F.DirID in (%s)" % dirString )
counter = 0
for table, condition in tablesAndConditions:
counter += 1
tables.append( '%s as M%d' % ( table, counter ) )
table = 'M%d' % counter
condition = condition % table + ' AND F.FileID = %s.FileID' % table
conditions.append( '( %s )' % condition )
query += ', '.join( tables )
if conditions:
query += ' WHERE %s' % ' AND '.join( conditions )
result = self.db._query( query )
if not result['OK']:
return result
if not result['Value']:
return S_OK( [] )
# fileList = [ row[0] for row in result['Value' ] ]
fileList = []
for row in result['Value']:
fileID = row[0]
fileList.append( fileID )
return S_OK( fileList )
@queryTime
def findFilesByMetadata( self, metaDict, path, credDict, extra = False ):
""" Find Files satisfying the given metadata
"""
if not path:
path = '/'
# 1.- Get Directories matching the metadata query
result = self.db.dmeta.findDirIDsByMetadata( metaDict, path, credDict )
if not result['OK']:
return result
dirList = result['Value']
dirFlag = result['Selection']
# 2.- Get known file metadata fields
# fileMetaDict = {}
result = self.getFileMetadataFields( credDict )
if not result['OK']:
return result
fileMetaKeys = result['Value'].keys() + FILE_STANDARD_METAKEYS
fileMetaDict = dict( item for item in metaDict.items() if item[0] in fileMetaKeys )
fileList = []
lfnIdDict = {}
lfnList = []
if dirFlag != 'None':
# None means that no Directory satisfies the given query, thus the search is empty
if dirFlag == 'All':
# All means that there is no Directory level metadata in query, full name space is considered
dirList = []
if fileMetaDict:
# 3.- Do search in File Metadata
result = self.__findFilesByMetadata( fileMetaDict, dirList, credDict )
if not result['OK']:
return result
fileList = result['Value']
elif dirList:
# 4.- if not File Metadata, return the list of files in given directories
return self.db.dtree.getFileLFNsInDirectoryByDirectory( dirList, credDict )
else:
# if there is no File Metadata and no Dir Metadata, return an empty list
lfnList = []
if fileList:
# 5.- get the LFN
result = self.db.fileManager._getFileLFNs( fileList )
if not result['OK']:
return result
lfnList = result['Value']['Successful'].values()
if extra:
lfnIdDict = result['Value']['Successful']
result = S_OK( lfnList )
if extra:
result['LFNIDDict'] = lfnIdDict
return result
|
Sbalbp/DIRAC
|
DataManagementSystem/DB/FileCatalogComponents/FileMetadata.py
|
Python
|
gpl-3.0
| 23,429
|
[
"DIRAC"
] |
af8557e41155b513f4bae1a58555fbfcb273141b042185344f68d22e9a2b9cb4
|
#! /usr/bin/env python
# This script reproduces the spike synchronization
# behavior of integrate-and-fire neurons in response to a subthreshold
# oscillation. This phenomenon is shown in Fig. 1 of
# C.D. Brody and J.J. Hopfield
# Simple Networks for Spike-Timing-Based Computation,
# with Application to Olfactory Processing
# Neuron 37, 843-852 (2003)
# Neurons receive a weak 35Hz oscillation, a gaussian noise current
# and an increasing DC. The time-locking capability is shown to
# depend on the input current given. The result is then plotted using pylab.
# All parameters are taken from the above paper.
#
# units are the usual NEST units: pA,pF,ms,mV,Hz
#
# Sven Schrader
import nest
import nest.raster_plot
import pylab
import numpy
# number of neurons
N=1000 #Jellyfish
#N=1000000 #Cockroach
#N=4000000 #Mouse cortex = 04:02:27
bias_begin=140. # bias current from...
bias_end=200. # ...to (ms)
T=600 # simulation time (ms)
def bias(n):
# constructs the dictionary with current ramp
return { 'I_e': (n * (bias_end-bias_begin)/N + bias_begin) }
driveparams = {'amplitude':50., 'frequency':35.}
noiseparams = {'mean':0.0, 'std':200.}
sdparams = { 'to_file':True, 'to_screen':False}
neuronparams = { 'tau_m':20., 'V_th':20., 'E_L':10.,
't_ref':2., 'V_reset':0., 'C_m':200., 'V_m':0.}
neurons = nest.Create('iaf_psc_alpha',N)
sd = nest.Create('spike_detector')
noise = nest.Create('noise_generator')
drive = nest.Create('ac_generator')
nest.SetStatus(drive, [driveparams] )
nest.SetStatus(noise, [noiseparams] )
nest.SetStatus(sd, [sdparams] )
nest.SetStatus(neurons, [neuronparams])
nest.SetStatus(neurons, map(bias, neurons))
nest.DivergentConnect(drive, neurons)
nest.DivergentConnect(noise, neurons)
nest.ConvergentConnect(neurons, sd)
nest.Simulate(T)
print "nest model processing complete with %s neurons" % N
nest.raster_plot.from_device(sd)
nest.raster_plot.show()
|
magnastrazh/NEUCOGAR
|
nest/dopamine/test_files/Brody_and_Hopfield_model.py
|
Python
|
gpl-2.0
| 1,958
|
[
"Gaussian",
"NEURON"
] |
0af47a4285e724b407bd5cfbec46b8b68b4cfe388682fb015c23d2dff1e4a0f3
|
MAX_COORD_INTEGER = 16384
def basenpc_coords(ent):
"""basenpc_coords returns the game coordinates of the given tarrasque BaseNPC-derived entity"""
cellwidth = 1 << ent.properties[(u'DT_BaseEntity', u'm_cellbits')]
x = ((ent.properties[(u'DT_DOTA_BaseNPC', u'm_cellX')] * cellwidth) - MAX_COORD_INTEGER)\
+ ent.properties[(u'DT_DOTA_BaseNPC', u'm_vecOrigin')][0]
y = ((ent.properties[(u'DT_DOTA_BaseNPC', u'm_cellY')] * cellwidth) - MAX_COORD_INTEGER)\
+ ent.properties[(u'DT_DOTA_BaseNPC', u'm_vecOrigin')][1]
return (x, y)
def baseent_coords(ent):
"""baseent_coords returns the game coordinates of the given tarrasque BaseEntity-derived entity (e.g. runes_"""
cellwidth = 1 << ent.properties[(u'DT_BaseEntity', u'm_cellbits')]
x = ((ent.properties[(u'DT_BaseEntity', u'm_cellX')] * cellwidth) - MAX_COORD_INTEGER)\
+ ent.properties[(u'DT_BaseEntity', u'm_vecOrigin')][0]
y = ((ent.properties[(u'DT_BaseEntity', u'm_cellY')] * cellwidth) - MAX_COORD_INTEGER)\
+ ent.properties[(u'DT_BaseEntity', u'm_vecOrigin')][1]
return (x, y)
def unitIdx(ent):
return ent.properties[(u'DT_DOTA_BaseNPC', u'm_iUnitNameIndex')]
class HeroNameDict(dict):
"""
Helper for converting between various hero names/UnitNameIndex
Usage:
HeroNameDict[1] returns hero with UnitNameIndex=1 (Axe)
HeroNameDict['npc_dota_hero_axe'] returns Axe
HeroNameDict['Axe'] returns Axe
HeroNameDict['DT_DOTA_Unit_Hero_Axe'] returns Axe
The returned dict contains 4 fields:
dt_name: "DT_DOTA_Unit_Hero_Axe"
id: 2
localized_name: "Axe"
name: "npc_dota_hero_axe"
Note:
UnitNameIndex != HeroID from the API or game files (npc_heroes.txt)
However, UnitNameIndex and HeroID do match for indexes/IDs over 30
Note:
The lookup is inefficient, but memoized
"""
# source: http://code.activestate.com/recipes/578231-probably-the-fastest-memoization-decorator-in-the-/
def __missing__(self, key):
ret = None
if isinstance(key, basestring):
if key.startswith('DT_'):
ret = self.__iter_til_find__('dt_name', key)
elif key.startswith('npc_'):
ret = self.__iter_til_find__('name', key)
else:
ret = self.__iter_til_find__('localized_name', key)
elif isinstance(key, int):
ret = self.__iter_til_find__('id', key)
else:
raise KeyError("key:{} must instead be a string or int".format(key))
self[key] = ret # memoize
return ret
def __iter_til_find__(self, field, key):
for elem in self._heroes:
if elem[field] == key:
return elem
raise KeyError("didn't find hero for field:{} key:{}".format(field, key))
def __str__(self):
return "<HeroNameDict object at {}>".format(hex(id(self)))
_heroes = \
[{'dt_name': 'DT_DOTA_Unit_Hero_AntiMage',
'id': 2,
'localized_name': 'Anti-Mage',
'name': 'npc_dota_hero_antimage'},
{'dt_name': 'DT_DOTA_Unit_Hero_Axe',
'id': 3,
'localized_name': 'Axe',
'name': 'npc_dota_hero_axe'},
{'dt_name': 'DT_DOTA_Unit_Hero_Bane',
'id': 4,
'localized_name': 'Bane',
'name': 'npc_dota_hero_bane'},
{'dt_name': 'DT_DOTA_Unit_Hero_Bloodseeker',
'id': 5,
'localized_name': 'Bloodseeker',
'name': 'npc_dota_hero_bloodseeker'},
{'dt_name': 'DT_DOTA_Unit_Hero_CrystalMaiden',
'id': 6,
'localized_name': 'Crystal Maiden',
'name': 'npc_dota_hero_crystal_maiden'},
{'dt_name': 'DT_DOTA_Unit_Hero_DrowRanger',
'id': 7,
'localized_name': 'Drow Ranger',
'name': 'npc_dota_hero_drow_ranger'},
{'dt_name': 'DT_DOTA_Unit_Hero_Earthshaker',
'id': 8,
'localized_name': 'Earthshaker',
'name': 'npc_dota_hero_earthshaker'},
{'dt_name': 'DT_DOTA_Unit_Hero_Juggernaut',
'id': 9,
'localized_name': 'Juggernaut',
'name': 'npc_dota_hero_juggernaut'},
{'dt_name': 'DT_DOTA_Unit_Hero_Mirana',
'id': 10,
'localized_name': 'Mirana',
'name': 'npc_dota_hero_mirana'},
{'dt_name': 'DT_DOTA_Unit_Hero_Nevermore',
'id': 11,
'localized_name': 'Shadow Fiend',
'name': 'npc_dota_hero_nevermore'},
{'dt_name': 'DT_DOTA_Unit_Hero_Morphling',
'id': 12,
'localized_name': 'Morphling',
'name': 'npc_dota_hero_morphling'},
{'dt_name': 'DT_DOTA_Unit_Hero_PhantomLancer',
'id': 13,
'localized_name': 'Phantom Lancer',
'name': 'npc_dota_hero_phantom_lancer'},
{'dt_name': 'DT_DOTA_Unit_Hero_Puck',
'id': 14,
'localized_name': 'Puck',
'name': 'npc_dota_hero_puck'},
{'dt_name': 'DT_DOTA_Unit_Hero_Pudge',
'id': 15,
'localized_name': 'Pudge',
'name': 'npc_dota_hero_pudge'},
{'dt_name': 'DT_DOTA_Unit_Hero_Razor',
'id': 16,
'localized_name': 'Razor',
'name': 'npc_dota_hero_razor'},
{'dt_name': 'DT_DOTA_Unit_Hero_SandKing',
'id': 17,
'localized_name': 'Sand King',
'name': 'npc_dota_hero_sand_king'},
{'dt_name': 'DT_DOTA_Unit_Hero_StormSpirit',
'id': 18,
'localized_name': 'Storm Spirit',
'name': 'npc_dota_hero_storm_spirit'},
{'dt_name': 'DT_DOTA_Unit_Hero_Sven',
'id': 19,
'localized_name': 'Sven',
'name': 'npc_dota_hero_sven'},
{'dt_name': 'DT_DOTA_Unit_Hero_Tiny',
'id': 20,
'localized_name': 'Tiny',
'name': 'npc_dota_hero_tiny'},
{'dt_name': 'DT_DOTA_Unit_Hero_VengefulSpirit',
'id': 21,
'localized_name': 'Vengeful Spirit',
'name': 'npc_dota_hero_vengefulspirit'},
{'dt_name': 'DT_DOTA_Unit_Hero_Windrunner',
'id': 22,
'localized_name': 'Windrunner',
'name': 'npc_dota_hero_windrunner'},
{'dt_name': 'DT_DOTA_Unit_Hero_Zuus',
'id': 23,
'localized_name': 'Zeus',
'name': 'npc_dota_hero_zuus'},
{'dt_name': 'DT_DOTA_Unit_Hero_Kunkka',
'id': 24,
'localized_name': 'Kunkka',
'name': 'npc_dota_hero_kunkka'},
{'dt_name': 'DT_DOTA_Unit_Hero_Lina',
'id': 25,
'localized_name': 'Lina',
'name': 'npc_dota_hero_lina'},
{'dt_name': 'DT_DOTA_Unit_Hero_Lich',
'id': 26,
'localized_name': 'Lich',
'name': 'npc_dota_hero_lich'},
{'dt_name': 'DT_DOTA_Unit_Hero_Lion',
'id': 27,
'localized_name': 'Lion',
'name': 'npc_dota_hero_lion'},
{'dt_name': 'DT_DOTA_Unit_Hero_ShadowShaman',
'id': 28,
'localized_name': 'Shadow Shaman',
'name': 'npc_dota_hero_shadow_shaman'},
{'dt_name': 'DT_DOTA_Unit_Hero_Slardar',
'id': 29,
'localized_name': 'Slardar',
'name': 'npc_dota_hero_slardar'},
{'dt_name': 'DT_DOTA_Unit_Hero_Tidehunter',
'id': 30,
'localized_name': 'Tidehunter',
'name': 'npc_dota_hero_tidehunter'},
{'dt_name': 'DT_DOTA_Unit_Hero_WitchDoctor',
'id': 31,
'localized_name': 'Witch Doctor',
'name': 'npc_dota_hero_witch_doctor'},
{'dt_name': 'DT_DOTA_Unit_Hero_Riki',
'id': 32,
'localized_name': 'Riki',
'name': 'npc_dota_hero_riki'},
{'dt_name': 'DT_DOTA_Unit_Hero_Enigma',
'id': 33,
'localized_name': 'Enigma',
'name': 'npc_dota_hero_enigma'},
{'dt_name': 'DT_DOTA_Unit_Hero_Tinker',
'id': 34,
'localized_name': 'Tinker',
'name': 'npc_dota_hero_tinker'},
{'dt_name': 'DT_DOTA_Unit_Hero_Sniper',
'id': 35,
'localized_name': 'Sniper',
'name': 'npc_dota_hero_sniper'},
{'dt_name': 'DT_DOTA_Unit_Hero_Necrolyte',
'id': 36,
'localized_name': 'Necrolyte',
'name': 'npc_dota_hero_necrolyte'},
{'dt_name': 'DT_DOTA_Unit_Hero_Warlock',
'id': 37,
'localized_name': 'Warlock',
'name': 'npc_dota_hero_warlock'},
{'dt_name': 'DT_DOTA_Unit_Hero_Beastmaster',
'id': 38,
'localized_name': 'Beastmaster',
'name': 'npc_dota_hero_beastmaster'},
{'dt_name': 'DT_DOTA_Unit_Hero_QueenOfPain',
'id': 39,
'localized_name': 'Queen of Pain',
'name': 'npc_dota_hero_queenofpain'},
{'dt_name': 'DT_DOTA_Unit_Hero_Venomancer',
'id': 40,
'localized_name': 'Venomancer',
'name': 'npc_dota_hero_venomancer'},
{'dt_name': 'DT_DOTA_Unit_Hero_FacelessVoid',
'id': 41,
'localized_name': 'Faceless Void',
'name': 'npc_dota_hero_faceless_void'},
{'dt_name': 'DT_DOTA_Unit_Hero_SkeletonKing',
'id': 42,
'localized_name': 'Skeleton King',
'name': 'npc_dota_hero_skeleton_king'},
{'dt_name': 'DT_DOTA_Unit_Hero_DeathProphet',
'id': 43,
'localized_name': 'Death Prophet',
'name': 'npc_dota_hero_death_prophet'},
{'dt_name': 'DT_DOTA_Unit_Hero_PhantomAssassin',
'id': 44,
'localized_name': 'Phantom Assassin',
'name': 'npc_dota_hero_phantom_assassin'},
{'dt_name': 'DT_DOTA_Unit_Hero_Pugna',
'id': 45,
'localized_name': 'Pugna',
'name': 'npc_dota_hero_pugna'},
{'dt_name': 'DT_DOTA_Unit_Hero_TemplarAssassin',
'id': 46,
'localized_name': 'Templar Assassin',
'name': 'npc_dota_hero_templar_assassin'},
{'dt_name': 'DT_DOTA_Unit_Hero_Viper',
'id': 47,
'localized_name': 'Viper',
'name': 'npc_dota_hero_viper'},
{'dt_name': 'DT_DOTA_Unit_Hero_Luna',
'id': 48,
'localized_name': 'Luna',
'name': 'npc_dota_hero_luna'},
{'dt_name': 'DT_DOTA_Unit_Hero_DragonKnight',
'id': 49,
'localized_name': 'Dragon Knight',
'name': 'npc_dota_hero_dragon_knight'},
{'dt_name': 'DT_DOTA_Unit_Hero_Dazzle',
'id': 50,
'localized_name': 'Dazzle',
'name': 'npc_dota_hero_dazzle'},
{'dt_name': 'DT_DOTA_Unit_Hero_Rattletrap',
'id': 51,
'localized_name': 'Clockwerk',
'name': 'npc_dota_hero_rattletrap'},
{'dt_name': 'DT_DOTA_Unit_Hero_Leshrac',
'id': 52,
'localized_name': 'Leshrac',
'name': 'npc_dota_hero_leshrac'},
{'dt_name': 'DT_DOTA_Unit_Hero_Furion',
'id': 53,
'localized_name': "Nature's Prophet",
'name': 'npc_dota_hero_furion'},
{'dt_name': 'DT_DOTA_Unit_Hero_Life_Stealer',
'id': 54,
'localized_name': 'Lifestealer',
'name': 'npc_dota_hero_life_stealer'},
{'dt_name': 'DT_DOTA_Unit_Hero_DarkSeer',
'id': 55,
'localized_name': 'Dark Seer',
'name': 'npc_dota_hero_dark_seer'},
{'dt_name': 'DT_DOTA_Unit_Hero_Clinkz',
'id': 56,
'localized_name': 'Clinkz',
'name': 'npc_dota_hero_clinkz'},
{'dt_name': 'DT_DOTA_Unit_Hero_Omniknight',
'id': 57,
'localized_name': 'Omniknight',
'name': 'npc_dota_hero_omniknight'},
{'dt_name': 'DT_DOTA_Unit_Hero_Enchantress',
'id': 58,
'localized_name': 'Enchantress',
'name': 'npc_dota_hero_enchantress'},
{'dt_name': 'DT_DOTA_Unit_Hero_Huskar',
'id': 59,
'localized_name': 'Huskar',
'name': 'npc_dota_hero_huskar'},
{'dt_name': 'DT_DOTA_Unit_Hero_NightStalker',
'id': 60,
'localized_name': 'Night Stalker',
'name': 'npc_dota_hero_night_stalker'},
{'dt_name': 'DT_DOTA_Unit_Hero_Broodmother',
'id': 61,
'localized_name': 'Broodmother',
'name': 'npc_dota_hero_broodmother'},
{'dt_name': 'DT_DOTA_Unit_Hero_BountyHunter',
'id': 62,
'localized_name': 'Bounty Hunter',
'name': 'npc_dota_hero_bounty_hunter'},
{'dt_name': 'DT_DOTA_Unit_Hero_Weaver',
'id': 63,
'localized_name': 'Weaver',
'name': 'npc_dota_hero_weaver'},
{'dt_name': 'DT_DOTA_Unit_Hero_Jakiro',
'id': 64,
'localized_name': 'Jakiro',
'name': 'npc_dota_hero_jakiro'},
{'dt_name': 'DT_DOTA_Unit_Hero_Batrider',
'id': 65,
'localized_name': 'Batrider',
'name': 'npc_dota_hero_batrider'},
{'dt_name': 'DT_DOTA_Unit_Hero_Chen',
'id': 66,
'localized_name': 'Chen',
'name': 'npc_dota_hero_chen'},
{'dt_name': 'DT_DOTA_Unit_Hero_Spectre',
'id': 67,
'localized_name': 'Spectre',
'name': 'npc_dota_hero_spectre'},
{'dt_name': 'DT_DOTA_Unit_Hero_DoomBringer',
'id': 69,
'localized_name': 'Doom',
'name': 'npc_dota_hero_doom_bringer'},
{'dt_name': 'DT_DOTA_Unit_Hero_AncientApparition',
'id': 68,
'localized_name': 'Ancient Apparition',
'name': 'npc_dota_hero_ancient_apparition'},
{'dt_name': 'DT_DOTA_Unit_Hero_Ursa',
'id': 70,
'localized_name': 'Ursa',
'name': 'npc_dota_hero_ursa'},
{'dt_name': 'DT_DOTA_Unit_Hero_SpiritBreaker',
'id': 71,
'localized_name': 'Spirit Breaker',
'name': 'npc_dota_hero_spirit_breaker'},
{'dt_name': 'DT_DOTA_Unit_Hero_Gyrocopter',
'id': 72,
'localized_name': 'Gyrocopter',
'name': 'npc_dota_hero_gyrocopter'},
{'dt_name': 'DT_DOTA_Unit_Hero_Alchemist',
'id': 73,
'localized_name': 'Alchemist',
'name': 'npc_dota_hero_alchemist'},
{'dt_name': 'DT_DOTA_Unit_Hero_Invoker',
'id': 74,
'localized_name': 'Invoker',
'name': 'npc_dota_hero_invoker'},
{'dt_name': 'DT_DOTA_Unit_Hero_Silencer',
'id': 75,
'localized_name': 'Silencer',
'name': 'npc_dota_hero_silencer'},
{'dt_name': 'DT_DOTA_Unit_Hero_Obsidian_Destroyer',
'id': 76,
'localized_name': 'Outworld Devourer',
'name': 'npc_dota_hero_obsidian_destroyer'},
{'dt_name': 'DT_DOTA_Unit_Hero_Lycan',
'id': 77,
'localized_name': 'Lycanthrope',
'name': 'npc_dota_hero_lycan'},
{'dt_name': 'DT_DOTA_Unit_Hero_Brewmaster',
'id': 78,
'localized_name': 'Brewmaster',
'name': 'npc_dota_hero_brewmaster'},
{'dt_name': 'DT_DOTA_Unit_Hero_Shadow_Demon',
'id': 79,
'localized_name': 'Shadow Demon',
'name': 'npc_dota_hero_shadow_demon'},
{'dt_name': 'DT_DOTA_Unit_Hero_LoneDruid',
'id': 80,
'localized_name': 'Lone Druid',
'name': 'npc_dota_hero_lone_druid'},
{'dt_name': 'DT_DOTA_Unit_Hero_ChaosKnight',
'id': 81,
'localized_name': 'Chaos Knight',
'name': 'npc_dota_hero_chaos_knight'},
{'dt_name': 'DT_DOTA_Unit_Hero_Meepo',
'id': 82,
'localized_name': 'Meepo',
'name': 'npc_dota_hero_meepo'},
{'dt_name': 'DT_DOTA_Unit_Hero_Treant',
'id': 83,
'localized_name': 'Treant Protector',
'name': 'npc_dota_hero_treant'},
{'dt_name': 'DT_DOTA_Unit_Hero_Ogre_Magi',
'id': 84,
'localized_name': 'Ogre Magi',
'name': 'npc_dota_hero_ogre_magi'},
{'dt_name': 'DT_DOTA_Unit_Hero_Undying',
'id': 85,
'localized_name': 'Undying',
'name': 'npc_dota_hero_undying'},
{'dt_name': 'DT_DOTA_Unit_Hero_Rubick',
'id': 86,
'localized_name': 'Rubick',
'name': 'npc_dota_hero_rubick'},
{'dt_name': 'DT_DOTA_Unit_Hero_Disruptor',
'id': 87,
'localized_name': 'Disruptor',
'name': 'npc_dota_hero_disruptor'},
{'dt_name': 'DT_DOTA_Unit_Hero_Nyx_Assassin',
'id': 88,
'localized_name': 'Nyx Assassin',
'name': 'npc_dota_hero_nyx_assassin'},
{'dt_name': 'DT_DOTA_Unit_Hero_Naga_Siren',
'id': 89,
'localized_name': 'Naga Siren',
'name': 'npc_dota_hero_naga_siren'},
{'dt_name': 'DT_DOTA_Unit_Hero_KeeperOfTheLight',
'id': 90,
'localized_name': 'Keeper of the Light',
'name': 'npc_dota_hero_keeper_of_the_light'},
{'dt_name': 'DT_DOTA_Unit_Hero_Wisp',
'id': 91,
'localized_name': 'Io',
'name': 'npc_dota_hero_wisp'},
{'dt_name': 'DT_DOTA_Unit_Hero_Visage',
'id': 92,
'localized_name': 'Visage',
'name': 'npc_dota_hero_visage'},
{'dt_name': 'DT_DOTA_Unit_Hero_Slark',
'id': 93,
'localized_name': 'Slark',
'name': 'npc_dota_hero_slark'},
{'dt_name': 'DT_DOTA_Unit_Hero_Medusa',
'id': 94,
'localized_name': 'Medusa',
'name': 'npc_dota_hero_medusa'},
{'dt_name': 'DT_DOTA_Unit_Hero_TrollWarlord',
'id': 95,
'localized_name': 'Troll Warlord',
'name': 'npc_dota_hero_troll_warlord'},
{'dt_name': 'DT_DOTA_Unit_Hero_Centaur',
'id': 96,
'localized_name': 'Centaur Warrunner',
'name': 'npc_dota_hero_centaur'},
{'dt_name': 'DT_DOTA_Unit_Hero_Magnataur',
'id': 97,
'localized_name': 'Magnus',
'name': 'npc_dota_hero_magnataur'},
{'dt_name': 'DT_DOTA_Unit_Hero_Shredder',
'id': 98,
'localized_name': 'Timbersaw',
'name': 'npc_dota_hero_shredder'},
{'dt_name': 'DT_DOTA_Unit_Hero_Bristleback',
'id': 99,
'localized_name': 'Bristleback',
'name': 'npc_dota_hero_bristleback'},
{'dt_name': 'DT_DOTA_Unit_Hero_Tusk',
'id': 100,
'localized_name': 'Tusk',
'name': 'npc_dota_hero_tusk'},
{'dt_name': 'DT_DOTA_Unit_Hero_Skywrath_Mage',
'id': 101,
'localized_name': 'Skywrath Mage',
'name': 'npc_dota_hero_skywrath_mage'},
{'dt_name': 'DT_DOTA_Unit_Hero_Abaddon',
'id': 102,
'localized_name': 'Abaddon',
'name': 'npc_dota_hero_abaddon'},
{'dt_name': 'DT_DOTA_Unit_Hero_Elder_Titan',
'id': 103,
'localized_name': 'Elder Titan',
'name': 'npc_dota_hero_elder_titan'},
{'dt_name': 'DT_DOTA_Unit_Hero_Legion_Commander',
'id': 104,
'localized_name': 'Legion Commander',
'name': 'npc_dota_hero_legion_commander'},
{'dt_name': 'DT_DOTA_Unit_Hero_EmberSpirit',
'id': 106,
'localized_name': 'Ember Spirit',
'name': 'npc_dota_hero_ember_spirit'},
{'dt_name': 'DT_DOTA_Unit_Hero_EarthSpirit',
'id': 107,
'localized_name': 'Earth Spirit',
'name': 'npc_dota_hero_earth_spirit'},
{'dt_name': 'DT_DOTA_Unit_Hero_AbyssalUnderlord',
'id': 108,
'localized_name': 'Abyssal Underlord',
'name': 'npc_dota_hero_abyssal_underlord'},
{'dt_name': 'DT_DOTA_Unit_Hero_Terrorblade',
'id': 109,
'localized_name': 'Terrorblade',
'name': 'npc_dota_hero_terrorblade'},
]
HeroNameDict = HeroNameDict()
|
grschafer/alacrity
|
alacrity/parsers/utils.py
|
Python
|
mit
| 18,327
|
[
"CRYSTAL",
"TINKER"
] |
99ba176dd812becb0e5909188864db030c8104380797f314f0585172e0f03a16
|
#!/usr/bin/env python
import unittest
from ct.crypto import error
from ct.crypto.asn1 import tag
from ct.crypto.asn1 import types
from ct.crypto.asn1 import type_test_base
class TagDecoratorTest(unittest.TestCase):
"""Test the automatic creation of tags."""
def test_universal_tag(self):
class Test(object):
tags = ()
tagger = types.Universal(5, tag.PRIMITIVE)
tagger(Test)
self.assertEqual(1, len(Test.tags))
expected_tag = tag.Tag(5, tag.UNIVERSAL, tag.PRIMITIVE)
self.assertEqual(expected_tag, Test.tags[0])
def test_explicit_tag(self):
class Test(object):
tags = ()
tagger1 = types.Explicit(5, tag_class=tag.APPLICATION)
tagger1(Test)
self.assertEqual(1, len(Test.tags))
expected_tag1 = tag.Tag(5, tag.APPLICATION, tag.CONSTRUCTED)
self.assertEqual(expected_tag1, Test.tags[0])
tagger2 = types.Explicit(3, tag_class=tag.CONTEXT_SPECIFIC)
tagger2(Test)
self.assertEqual(2, len(Test.tags))
self.assertEqual(expected_tag1, Test.tags[0])
expected_tag2 = tag.Tag(3, tag.CONTEXT_SPECIFIC, tag.CONSTRUCTED)
self.assertEqual(expected_tag2, Test.tags[1])
def test_implicit_tag(self):
class Test(object):
tags = ()
tagger = types.Implicit(5, tag_class=tag.APPLICATION)
# Cannot implicitly tag an untagged type.
self.assertRaises(TypeError, tagger, Test)
# Add a tag and try again.
Test.tags = (tag.Tag(0, tag.UNIVERSAL, tag.PRIMITIVE),)
tagger(Test)
self.assertEqual(1, len(Test.tags))
expected_tag = tag.Tag(5, tag.APPLICATION, tag.PRIMITIVE)
self.assertEqual(expected_tag, Test.tags[0])
# Repeat the test with a constructed encoding.
Test.tags = (tag.Tag(0, tag.UNIVERSAL, tag.CONSTRUCTED),)
tagger(Test)
self.assertEqual(1, len(Test.tags))
expected_tag = tag.Tag(5, tag.APPLICATION, tag.CONSTRUCTED)
self.assertEqual(expected_tag, Test.tags[0])
# A dummy class we use to test that values are encoded as tag-length-value
# triplets.
class Dummy(types.Simple):
# Fake.
tags = (tag.Tag(1, tag.UNIVERSAL, tag.PRIMITIVE),)
def _convert_value(cls, value):
if isinstance(value, str):
return value
raise TypeError("Can't make a dummy from %s" % type(value))
def _decode_value(self, buf, strict=True):
return buf
def _encode_value(self):
return self._value
def __str__(self):
# Inject a marker to test human_readable().
return "dummy!" + str(self._value)
# And a simple sequence to test some properties of constructe objects.
class DummySequence(types.Sequence):
LOOK = {True: types.Integer}
components = (
types.Component("bool", types.Boolean),
types.Component("int", types.Integer, optional=True),
types.Component("oct", types.OctetString, default="hi"),
types.Component("any", types.Any, defined_by="bool", lookup=LOOK)
)
class TagLengthValueTest(unittest.TestCase):
"""Test Tag-Length-Value encoding."""
def test_encode_decode_int(self):
signed_integer_encodings = (
(0, "00"),
(127, "7f"),
(128, "0080"),
(256, "0100"),
(-1, "ff"),
(-128, "80"),
(-129, "ff7f")
)
for value, enc in signed_integer_encodings:
self.assertEqual(types.encode_int(value).encode("hex"), enc)
self.assertEqual(types.decode_int(enc.decode("hex")), value)
unsigned_integer_encodings = (
(0, "00"),
(127, "7f"),
(128, "80"),
(256, "0100")
)
for value, enc in unsigned_integer_encodings:
self.assertEqual(
types.encode_int(value, signed=False).encode("hex"), enc)
self.assertEqual(
types.decode_int(enc.decode("hex"), signed=False), value)
def test_encode_read_length(self):
length_encodings = (
(0, "00"),
(1, "01"),
(38, "26"),
(127, "7f"),
(129, "8181"),
(201, "81c9"),
(65535, "82ffff"),
(65536, "83010000")
)
for value, enc in length_encodings:
self.assertEqual(types.encode_length(value).encode("hex"), enc)
self.assertEqual(types.read_length(enc.decode("hex")), (value, ""))
# Test that the reader stops after the specified number of bytes.
longer = enc + "00"
self.assertEqual(types.read_length(longer.decode("hex")),
(value, "\x00"))
longer = enc + "ff"
self.assertEqual(types.read_length(longer.decode("hex")),
(value, "\xff"))
# And test that it complains when there are not enough bytes.
shorter = enc[:-2]
self.assertRaises(error.ASN1Error,
types.read_length, shorter.decode("hex"))
def test_read_indefinite_length(self):
indef_length = "80".decode("hex")
self.assertRaises(error.ASN1Error, types.read_length, indef_length)
self.assertEqual(types.read_length(indef_length, strict=False),
(-1, ""))
self.assertEqual(types.read_length(indef_length + "hello", strict=False),
(-1, "hello"))
def test_encode_decode_read(self):
value = "hello"
d = Dummy(value=value)
enc = d.encode()
encoded_length = types.encode_length(len(value))
expected = Dummy.tags[0].value + encoded_length + value
self.assertEqual(expected.encode("hex"), enc.encode("hex"))
decoded_dummy = Dummy.decode(enc)
self.assertTrue(isinstance(decoded_dummy, Dummy))
self.assertEqual(decoded_dummy.value, value)
read_dummy, rest = Dummy.read(enc)
self.assertTrue(isinstance(read_dummy, Dummy))
self.assertEqual(read_dummy.value, value)
self.assertEqual("", rest)
def test_read_from_beginning(self):
value = "hello"
d = Dummy(value=value)
self.assertEqual("hello", d.value)
enc = d.encode()
encoded_length = types.encode_length(len(d.value))
expected = Dummy.tags[0].value + encoded_length + d.value
self.assertEqual(expected.encode("hex"), enc.encode("hex"))
longer_buffer = enc + "ello"
# We can't decode because there are leftover bytes...
self.assertRaises(error.ASN1Error, Dummy.decode, longer_buffer)
# ... but we can read from the beginning of the buffer.
read_dummy, rest = Dummy.read(longer_buffer)
self.assertTrue(isinstance(read_dummy, Dummy))
self.assertEqual("hello", read_dummy.value)
self.assertEqual("ello", rest)
def test_encode_decode_read_multiple_tags(self):
@types.Explicit(8)
class NewDummy(Dummy):
pass
value = "hello"
d = NewDummy(value=value)
enc = d.encode()
encoded_inner_length = types.encode_length(len(value))
inner = Dummy.tags[0].value + encoded_inner_length + value
encoded_length = types.encode_length(len(inner))
expected = NewDummy.tags[1].value + encoded_length + inner
self.assertEqual(expected.encode("hex"), enc.encode("hex"))
decoded_dummy = NewDummy.decode(enc)
self.assertTrue(isinstance(decoded_dummy, NewDummy))
self.assertEqual(decoded_dummy.value, value)
read_dummy, rest = NewDummy.read(enc)
self.assertTrue(isinstance(read_dummy, NewDummy))
self.assertEqual(read_dummy.value, value)
self.assertEqual("", rest)
indef_encoding = "a880010568656c6c6f0000".decode("hex")
self.assertRaises(error.ASN1Error, NewDummy.decode, indef_encoding)
self.assertEqual(NewDummy.decode(indef_encoding, strict=False),
NewDummy(value="hello"))
class BooleanTest(type_test_base.TypeTestBase):
asn1_type = types.Boolean
repeated = False
keyed = False
initializers = (
(False, 0),
(True, 1),
)
bad_initializers = (
# Everything is converted to a bool and accepted.
)
encode_test_vectors = (
(True, "0101ff"),
(False, "010100")
)
bad_encodings = (
# Empty value.
("0100"),
# Longer than 1 byte.
("01020000"),
("0102ffff"),
# Indefinite length
("0180ff0000")
)
bad_strict_encodings = (
# Nonzero byte for True.
(True, "010101"),
(True, "0101ab")
)
class IntegerTest(type_test_base.TypeTestBase):
asn1_type = types.Integer
repeated = False
keyed = False
initializers = (
(0,),
(1,),
(-1,),
(1000000,),
)
bad_initializers = (
# Everything that can be converted to an int is accepted.
)
encode_test_vectors = (
(0, "020100"),
(127, "02017f"),
(128, "02020080"),
(256, "02020100"),
(-1, "0201ff"),
(-128, "020180"),
(-129, "0202ff7f")
)
bad_encodings = (
# Empty value.
("0200"),
# Indefinite length.
("0280ff0000")
)
bad_strict_encodings = (
# Leading 0-octets.
(0, "02020000"),
(127, "0202007f"),
# Leading ff-octets.
(-1, "0202ffff"),
(-128, "0202ff80")
)
class OctetStringTest(type_test_base.TypeTestBase):
asn1_type = types.OctetString
repeated = False
keyed = False
initializers = (
("hello",),
("\xff\x00",),
)
bad_initializers = (
# Nothing exciting.
)
encode_test_vectors = (
# Empty strings are allowed.
("", "0400"),
("hello", "040568656c6c6f"),
("\xff\x00", "0402ff00")
)
bad_encodings = (
# Indefinite length.
("0480abcdef0000"),
)
bad_strict_encodings = ()
# Skip other string type tests as there's currently no exciting specialization
# for those.
class BitStringTest(type_test_base.TypeTestBase):
asn1_type = types.BitString
repeated = False
keyed = False
initializers = (
("",),
("0",),
("1",),
("010100010110",),
)
bad_initializers = (
("hello", ValueError),
("0123cdef", ValueError),
("\xff\x00", ValueError)
)
encode_test_vectors = (
# From the ASN.1 spec.
# 0a3b5f291cd
("00001010001110110101111100101001000111001101", "0307040a3b5f291cd0"),
# More test vectors with different amounts of padding
("", "030100"),
("0", "03020700"),
("1", "03020780"),
("0000000", "03020100"),
("0000001", "03020102"),
("1000000", "03020180"),
("00000000", "03020000"),
("11111111", "030200ff"),
("0000000001", "0303060040"),
)
bad_encodings = (
# Empty value - padding byte must always be present.
("0300"),
# Padding but no other bytes.
("030101"),
("030107"),
# Invalid padding value.
("030108"),
("030180"),
("03020800"),
("03028000"),
# Invalid padding bits.
("030201ff"),
("030205f0"),
("030207f0"),
# Indefinite length.
("038007800000")
)
bad_strict_encodings = ()
# Mix-in from object so the tests are not run for the base class itself.
class RepeatedTest(object):
def test_modify_repeated(self):
d = Dummy(value="world")
d2 = Dummy(value="hello")
s = self.asn1_type(value=[d])
self.assertFalse(s.modified())
original_enc = s.encode()
s[0] = d2
self.assertTrue(s.modified())
self.assertEqual(s, [d2])
self.assertNotEqual(s.encode(), original_enc)
del s[0]
self.assertTrue(s.modified())
self.assertFalse(list(s))
self.assertNotEqual(s.encode(), original_enc)
# Back to original; but the modified bit is never cleared.
s.append(d)
self.assertTrue(s.modified())
self.assertEqual(s, [d])
self.assertEqual(s.encode(), original_enc)
class SequenceOfTest(type_test_base.TypeTestBase, RepeatedTest):
# Test with a dummy class.
class SequenceOfDummies(types.SequenceOf):
component = Dummy
asn1_type = SequenceOfDummies
immutable = False
keyed = False
initializers = (
([Dummy(value="world"), Dummy(value="hello"), Dummy(value="\x00")],
["world", "hello", "\x00"],
[Dummy(value="world"), "hello", "\x00"]),
([], ()),
)
bad_initializers = (
# Can't coerce to Dummy.
([3], TypeError),
([True], TypeError),
# Can't iterate.
(True, TypeError)
)
encode_test_vectors = (
([], "3000"),
([Dummy(value="hello"), Dummy(value="\x00\xff")],
"300b010568656c6c6f010200ff"),
# Different order produces a different encoding.
([Dummy(value="\x00\xff"), Dummy(value="hello")],
"300b010200ff010568656c6c6f")
)
bad_encodings = (
# Bad element length.
"3003010200",
# Bad component tag.
"30020200",
# Indef length with no EOC.
"3080010568656c0000010200ff",
)
bad_strict_encodings = ()
def test_indefinite_length_encoding(self):
# We cannot use bad_strict_encodings because of the re-encoding bug:
# indefinite length is not preserved.
# For good measure, we add an EOC in the contents.
value = self.asn1_type([Dummy(value="hel\x00\x00"),
Dummy(value="\x00\xff")])
indef_length_encoding = "3080010568656c0000010200ff0000".decode("hex")
self.assertRaises(error.ASN1Error,
self.asn1_type.decode, indef_length_encoding)
o = self.asn1_type.decode(indef_length_encoding, strict=False)
self.assertEqual(o, value)
class SetOfTest(type_test_base.TypeTestBase, RepeatedTest):
class SetOfDummies(types.SetOf):
component = Dummy
asn1_type = SetOfDummies
immutable = False
keyed = False
initializers = (
([Dummy(value="world"), Dummy(value="\x00"), Dummy(value="world")],
["world", "\x00", "world"],
[Dummy(value="world"), "\x00", "world"]),
([], ()),
)
bad_initializers = (
# Can't coerce to Dummy.
([3], TypeError),
([True], TypeError),
# Can't iterate.
(True, TypeError)
)
encode_test_vectors = (
([], "3100"),
# Elements are sorted according to their encoding.
([Dummy(value="\x00\xff"), Dummy(value="hello")],
"310b010200ff010568656c6c6f"),
)
bad_encodings = (
# Bad element length.
"31010200",
# Bad component tag.
"31020200",
# Indef length with no EOC.
"3180010568656c0000010200ff",
)
bad_strict_encodings = (
)
def test_encoding_is_order_independent(self):
elems = [Dummy(value="world"), Dummy(value="hello")]
dummies = self.asn1_type(elems)
elems2 = [Dummy(value="hello"), Dummy(value="world")]
dummies2 = self.asn1_type(elems2)
# Encodings compare equal even though the sets don't.
self.assertEqual(dummies.encode(), dummies2.encode())
def test_indefinite_length_encoding(self):
# We cannot use bad_strict_encodings because of the re-encoding bug:
# indefinite length is not preserved.
# For good measure, we add an EOC in the contents.
value = self.asn1_type([Dummy(value="hel\x00\x00"),
Dummy(value="\x00\xff")])
indef_length_encoding = "3180010568656c0000010200ff0000".decode("hex")
self.assertRaises(error.ASN1Error,
self.asn1_type.decode, indef_length_encoding)
o = self.asn1_type.decode(indef_length_encoding, strict=False)
self.assertEqual(o, value)
class AnyTest(type_test_base.TypeTestBase):
asn1_type = types.Any
repeated = False
keyed = False
initializers = (
# Decoded and undecoded initializers.
# Test with a few simple types.
(types.Boolean(value=True).encode(), types.Boolean(value=True)),
(types.Integer(value=3).encode(), types.Integer(value=3)),
(types.OctetString("hello").encode(), types.OctetString("hello")),
# We don't currently check that the encoded value encodes a valid
# tag-length-value triplet, so this will also succeed,
("0000ff",),
("",)
)
bad_initializers = (
(types.Any("hello"), TypeError),
)
encode_test_vectors = (
# A Boolean True.
("\x01\x01\xff", "0101ff"),
# An Integer 3.
("\x02\x01\x03", "020103"),
# An octet string "hello".
("\x04\x05\x68\x65\x6c\x6c\x6f", "040568656c6c6f"),
)
bad_encodings = ()
bad_strict_encodings = ()
def test_decode_inner(self):
dummy = Dummy(value="hello")
a = types.Any(dummy)
self.assertTrue(a.decoded)
self.assertEqual(a.decoded_value, dummy)
enc = dummy.encode()
a2 = types.Any(enc)
self.assertFalse(a2.decoded)
self.assertEqual(a, a2)
self.assertEqual(a.value, a2.value)
a2.decode_inner(value_type=Dummy)
self.assertTrue(a2.decoded)
self.assertEqual(a2.decoded_value, dummy)
class ChoiceTest(type_test_base.TypeTestBase):
class MyChoice(types.Choice):
components = {
"bool": types.Boolean,
"int": types.Integer,
"oct": types.OctetString,
}
asn1_type = MyChoice
immutable = False
repeated = False
keyed = True
initializers = (
({"bool": types.Boolean(value=False)}, {"bool": False}),
({"int": types.Integer(value=3)}, {"int": 3}),
({"oct": types.OctetString(value="hello")}, {"oct": "hello"}),
({}, {"bool": None}, {"int": None}, {"oct": None})
)
bad_initializers = (
# Multiple values set at once.
({"bool": False, "int": 3}, ValueError),
# Invalid key.
({"boo": False}, ValueError),
)
encode_test_vectors = (
({"bool": True}, "0101ff"),
({"int": 3}, "020103"),
({"oct": "hello"}, "040568656c6c6f"),
)
bad_encodings = ()
bad_strict_encodings = ()
def test_modify(self):
m = self.MyChoice(value={"bool": True})
self.assertFalse(m.modified())
m["bool"] = False
self.assertTrue(m.modified())
self.assertFalse(m["bool"])
# Back to original; but the modified bit is never cleared.
m["bool"] = True
self.assertTrue(m.modified())
self.assertTrue(m["bool"])
class SequenceTest(type_test_base.TypeTestBase):
asn1_type = DummySequence
immutable = False
repeated = False
keyed = True
initializers = (
# Fully specified, Any can be decoded.
({"bool": True, "int": 3, "oct": "hello", "any": "\x02\x01\x05"},),
# Fully specified, Any cannot be decoded.
({"bool": False, "int": 3, "oct": "hello", "any": "\x02\x01\x05"},),
# Partially specified.
({"bool": True, "int": None, "oct": "hi", "any": None},
{"bool": True},),
({"bool": None, "int": 3, "oct": "hi", "any": None}, {"int": 3},),
# Setting the defaults is the same as setting nothing.
({"bool": None, "int": None, "oct": "hi", "any": None},
{"bool": None, "int": None, "oct": None, "any": None},
{},
{"oct": "hi"}),
)
bad_initializers = (
# Invalid key.
({"boo": False}, ValueError),
# Invalid component.
({"int": "hello"}, ValueError)
)
encode_test_vectors = (
({"bool": True, "int": 3, "oct": "hello", "any": "\x02\x01\x05"},
"30100101ff020103040568656c6c6f020105"),
# Missing optional.
({"bool": True, "oct": "hello", "any": "\x02\x01\x05"},
"300d0101ff040568656c6c6f020105"),
# Missing default.
({"bool": True, "int": 3, "any": "\x02\x01\x05"},
"30090101ff020103020105"),
# Default value set.
({"bool": True, "int": 3, "oct": "hi", "any": "\x02\x01\x05"},
"30090101ff020103020105"),
)
bad_encodings = (
# Indef length with no EOC.
"30800101ff020103040568656c0000020105",
)
bad_strict_encodings = ()
def test_modify(self):
s = DummySequence(value={"bool": True, "int": 2})
self.assertFalse(s.modified())
s["bool"] = False
self.assertTrue(s.modified())
self.assertFalse(s["bool"])
self.assertEqual(s["int"], 2)
# Back to original; but the modified bit is never cleared.
s["bool"] = True
self.assertTrue(s.modified())
self.assertTrue(s["bool"])
self.assertEqual(s["int"], 2)
def test_decode_any(self):
seq = self.asn1_type({"bool": True, "int": 3, "oct": "hello",
"any": "\x02\x01\x05"})
enc = seq.encode()
dec = self.asn1_type.decode(enc)
self.assertTrue(dec["any"].decoded)
self.assertEqual(dec["any"].decoded_value, 5)
# Lookup key not in dictionary.
seq = self.asn1_type({"bool": False, "int": 3, "oct": "hello",
"any": "\x02\x01\x05"})
enc = seq.encode()
seq = self.asn1_type.decode(enc)
self.assertFalse(seq["any"].decoded)
# Corrupt any.
# We don't currently verify the Any spec when creating an element.
seq = self.asn1_type({"bool": True, "int": 3, "oct": "hello",
"any": "\x01\x01\x05"})
enc = seq.encode()
# Can't decode in strict mode.
self.assertRaises(error.ASN1Error, self.asn1_type.decode, enc)
dec = self.asn1_type.decode(enc, strict=False)
self.assertFalse(dec["any"].decoded)
def test_indefinite_length_encoding(self):
# We cannot use bad_strict_encodings because of the re-encoding bug:
# indefinite length is not preserved.
# For good measure, we add an EOC in the contents.
value = self.asn1_type(value={"bool": True, "int": 3,
"oct": "hel\x00\x00",
"any": "\x02\x01\x05"})
indef_length_encoding = (
"30800101ff020103040568656c00000201050000".decode("hex"))
self.assertRaises(error.ASN1Error,
self.asn1_type.decode, indef_length_encoding)
o = self.asn1_type.decode(indef_length_encoding, strict=False)
self.assertEqual(o, value)
# Some attempted test coverage for recursive mutable types.
class RecursiveTest(type_test_base.TypeTestBase):
class SequenceOfSequence(types.SequenceOf):
component = DummySequence
asn1_type = SequenceOfSequence
immutable = False
repeated = True
keyed = False
initializers = (
# Fully specified sequence.
([{"bool": True, "int": 3, "oct": "hello", "any": "\x02\x01\x05"}],),
# Partially specified sequence.
([{"bool": True, "int": None, "oct": "hi", "any": None}],
[{"bool": True}],),
# Empty sequence.
([],)
)
bad_initializers = (
# Invalid key in component.
([{"boo": False}], ValueError),
# Invalid value in component.
([{"int": "hello"}], ValueError),
# Invalid component: not iterable.
(types.Boolean(True), TypeError),
# Invalid component: iterable but wrong components.
([types.Boolean(True)], TypeError)
)
encode_test_vectors = (
([{"bool": True, "int": 3, "oct": "hello", "any": "\x02\x01\x05"}],
"301230100101ff020103040568656c6c6f020105"),
)
bad_encodings = ()
bad_strict_encodings = ()
def test_modify_recursively(self):
d = DummySequence(value={"bool": True, "int":3, "any": "\x02\x01\x05"})
s = self.SequenceOfSequence(value=[d])
self.assertFalse(s.modified())
original_enc = s.encode()
# Modify subcomponent.
s[0]["bool"] = False
self.assertTrue(s.modified())
self.assertNotEqual(s.encode(), original_enc)
# Reset.
s[0]["bool"] = True
self.assertTrue(s.modified())
self.assertEqual(s.encode(), original_enc)
class PrintTest(unittest.TestCase):
def test_simple_human_readable(self):
dummy = Dummy("hello")
# Ensure there's some content.
self.assertTrue(str(dummy))
self.assertTrue(str(dummy) in dummy.human_readable(wrap=0))
def test_simple_human_readable_prints_label(self):
s = Dummy("hello").human_readable(label="world")
self.assertTrue("world" in s)
def test_simple_human_readable_lines_wrap(self):
dummy = Dummy(value="hello")
wrap = 3
for line in dummy.human_readable_lines(wrap=wrap):
self.assertTrue(len(line) <= wrap)
def test_string_value_int(self):
i = types.Integer(value=123456789)
self.assertTrue("123456789" in str(i))
def test_string_value_bool(self):
b = types.Boolean(value=True)
self.assertTrue("true" in str(b).lower())
b = types.Boolean(value=False)
self.assertTrue("false" in str(b).lower())
def test_string_value_string(self):
# Currently all string types are just str, with no encoding.
hello = "\x68\x65\x6c\x6c\x6f"
invalid_printable_char = "*"
opaque = "\xd7\xa9\xd7\x9c\xd7\x95\xd7\x9d"
string_types = [types.TeletexString, types.PrintableString,
types.UniversalString, types.UTF8String,
types.BMPString, types.IA5String,
types.VisibleString]
should_fail = {
hello: [],
invalid_printable_char: [types.PrintableString],
opaque: [types.PrintableString,
types.IA5String,
types.VisibleString],
}
strings = [hello, invalid_printable_char, opaque]
for t in string_types:
# TODO(laiqu) make this fail for strings other than printable, ia5
# and visible (and possibly make more specific character sets for
# ia5/visible).
for str_ in strings:
if t not in should_fail[str_]:
s = t(serialized_value=str_, strict=True)
self.assertTrue(str_ in str(s))
else:
self.assertRaises(error.ASN1Error, t,
serialized_value=str_, strict=True)
def test_string_value_bitstring(self):
# 0x1ae
b = str(types.BitString(value="0110101110"))
self.assertTrue("1" in b)
self.assertTrue("ae" in b.lower())
def test_string_value_octetstring(self):
b = str(types.OctetString(value="\x42\xac"))
self.assertTrue("42" in b)
self.assertTrue("ac" in b.lower())
def test_constructed_human_readable(self):
dummy = DummySequence({"bool": True, "int": 3})
s = dummy.human_readable(wrap=0)
self.assertTrue("bool" in s)
self.assertTrue("true" in s.lower())
self.assertTrue("int" in s)
self.assertTrue("3" in s)
# Present since a default is set.
self.assertTrue("oct" in s)
# Not present and no default.
self.assertFalse("any" in s)
if __name__ == '__main__':
unittest.main()
|
rep/certificate-transparency
|
python/ct/crypto/asn1/types_test.py
|
Python
|
apache-2.0
| 28,141
|
[
"exciting"
] |
046f0c5e6aac00bed9da73476966ce7e0ee49a8a5849ce997df6dad175bb0996
|
import time
from org.gumtree.gumnix.sics.control.events import DynamicControllerListenerAdapter
from org.gumtree.gumnix.sics.control import IStateMonitorListener
from org.gumtree.gumnix.sics.io import SicsProxyListenerAdapter
from org.eclipse.swt.events import DisposeListener
from org.eclipse.swt.widgets import TypedListener
#from org.gumtree.util.messaging import EventHandler
import sys, os
sys.path.append(str(os.path.dirname(get_project_path('Internal'))))
from Internal import sicsext, HISTORY_KEY_WORDS
from Internal.sicsext import *
from au.gov.ansto.bragg.nbi.ui.scripting import ConsoleEventHandler
from org.eclipse.swt.widgets import Display
from java.lang import Runnable
from java.lang import System
from java.io import File
from time import strftime, localtime
import traceback
sics.ready = False
__script__.title = 'Initialised'
__script__.version = ''
__data_folder__ = 'W:/data/current'
#__data_folder__ = 'Z:/testing/pelican'
__export_folder__ = 'W:/data/current/reports'
__buffer_log_file__ = __export_folder__
Dataset.__dicpath__ = get_absolute_path('/Internal/path_table')
System.setProperty('sics.data.path', __data_folder__)
try:
__dispose_all__(None)
except:
pass
fi = File(__buffer_log_file__)
if not fi.exists():
if not fi.mkdirs():
print 'Error: failed to make directory: ' + __buffer_log_file__
__history_log_file__ = __buffer_log_file__ + '/History.txt'
__buffer_log_file__ += '/LogFile.txt'
__buffer_logger__ = open(__buffer_log_file__, 'a')
__history_logger__ = open(__history_log_file__, 'a')
print 'Waiting for SICS connection'
while sics.getSicsController() == None:
time.sleep(1)
time.sleep(3)
__scan_status_node__ = sics.getSicsController().findComponentController('/commands/scan/runscan/feedback/status')
__scan_variable_node__ = sics.getSicsController().findComponentController('/commands/scan/runscan/scan_variable')
__save_count_node__ = sics.getSicsController().findComponentController('/experiment/save_count')
__file_name_node__ = sics.getSicsController().findComponentController('/experiment/file_name')
__file_status_node__ = sics.getSicsController().findComponentController('/experiment/file_status')
#saveCount = int(saveCountNode.getValue().getIntData())
__cur_status__ = str(__scan_status_node__.getValue().getStringData())
__file_name__ = str(__file_name_node__.getValue().getStringData())
class __Display_Runnable__(Runnable):
def __init__(self):
pass
def run(self):
global __UI__
global __dispose_listener__
__UI__.addDisposeListener(__dispose_listener__)
__file_to_add__ = None
__newfile_enabled__ = True
def add_dataset():
global __newfile_enabled__
if not __newfile_enabled__ :
return
if __file_to_add__ is None:
return
global __DATASOURCE__
try:
__DATASOURCE__.addDataset(__file_to_add__, True)
except:
print 'error in adding dataset: ' + __file_to_add__
class __SaveCountListener__(DynamicControllerListenerAdapter):
def __init__(self):
self.saveCount = __save_count_node__.getValue().getIntData()
pass
def valueChanged(self, controller, newValue):
global __file_to_add__
newCount = int(newValue.getStringData());
if newCount != self.saveCount:
self.saveCount = newCount;
try:
axis_name.value = __scan_variable_node__.getValue().getStringData()
except:
pass
try:
checkFile = File(__file_name_node__.getValue().getStringData());
checkFile = File(__data_folder__ + "/" + checkFile.getName());
__file_to_add__ = checkFile.getAbsolutePath();
if not checkFile.exists():
print "The target file :" + __file_to_add__ + " can not be found";
return
runnable = __Display_Runnable__()
runnable.run = add_dataset
Display.getDefault().asyncExec(runnable)
except:
print 'failed to add dataset ' + __file_to_add__
__saveCountListener__ = __SaveCountListener__()
__save_count_node__.addComponentListener(__saveCountListener__)
def update_buffer_log_folder():
global __buffer_log_file__, __export_folder__, __buffer_logger__, __history_log_file__, __history_logger__
__buffer_log_file__ = __export_folder__
fi = File(__buffer_log_file__)
if not fi.exists():
if not fi.mkdirs():
print 'Error: failed to make directory: ' + __buffer_log_file__
__history_log_file__ = __buffer_log_file__ + '/History.txt'
__buffer_log_file__ += '/LogFile.txt'
if __buffer_logger__:
__buffer_logger__.close()
__buffer_logger__ = open(__buffer_log_file__, 'a')
if __history_logger__:
__history_logger__.close()
__history_logger__ = open(__history_log_file__, 'a')
def __run_script__(dss):
pass
class __State_Monitor__(IStateMonitorListener):
def __init__(self):
pass
def stateChanged(state, infoMessage):
print state
print infoMessage
pass
def __dispose__():
pass
# __scan_status_node__.removeComponentListener(__statusListener__)
# __m2_node__.removeComponentListener(__m2_listener__)
# __s1_node__.removeComponentListener(__s1_listener__)
# __s2_node__.removeComponentListener(__s2_listener__)
# __a2_node__.removeComponentListener(__a2_listener__)
def __load_experiment_data__():
basename = sicsext.getBaseFilename()
fullname = str(System.getProperty('sics.data.path') + '/' + basename)
df.datasets.clear()
ds = df[fullname]
data = ds[str(data_name.value)]
axis = ds[str(axis_name.value)]
if data.size > axis.size:
data = data[:axis.size]
ds2 = Dataset(data, axes=[axis])
ds2.title = ds.id
ds2.location = fullname
Plot1.set_dataset(ds2)
Plot1.x_label = axis_name.value
Plot1.y_label = str(data_name.value)
Plot1.title = str(data_name.value) + ' vs ' + axis_name.value
Plot1.pv.getPlot().setMarkerEnabled(True)
# This function is called when pushing the Run button in the control UI.
def __std_run_script__(fns):
# Use the provided resources, please don't remove.
global Plot1
global Plot2
global Plot3
# check if a list of file names has been given
if (fns is None or len(fns) == 0) :
print 'no input datasets'
else :
for fn in fns:
# load dataset with each file name
ds = Plot1.ds
if ds != None and len(ds) > 0:
if ds[0].location == fn:
return
df.datasets.clear()
ds = df[fn]
dname = str(data_name.value)
bm2 = ds[dname]
qm = ds[str(axis_name.value)]
ds2 = Dataset(bm2, axes=[qm])
ds2.title = ds.id
ds2.location = fn
Plot1.set_dataset(ds2)
Plot1.x_label = axis_name.value
Plot1.y_label = dname
Plot1.title = dname + ' vs ' + axis_name.value
Plot1.pv.getPlot().setMarkerEnabled(True)
peak_pos.value = float('NaN')
fit_curve()
def __dataset_added__(fns = None):
pass
def __std_fit_curve__():
global Plot1
ds = Plot1.ds
if ds is None or len(ds) == 0:
slog('Error: no curve to fit in Plot1.')
return
for d in ds:
if d.title == 'fitting':
Plot1.remove_dataset(d)
d0 = ds[0]
try:
fitting = Fitting(GAUSSIAN_FITTING)
fitting.set_histogram(d0)
res = fitting.fit()
res.var[:] = 0
res.title = 'fitting'
Plot1.add_dataset(res)
slog(str(fitting.params))
mean = fitting.mean
slog('POS_OF_PEAK=' + str(mean))
slog('FWHM=' + str(2.35482 * math.fabs(fitting.params['sigma'])))
peak_pos.value = mean
except:
slog('failed to fit with Gaussian curve.')
return
def previous_step():
load_script(previous_file)
def next_step():
load_script(next_file)
def logBook(text):
global __buffer_logger__
global __history_logger__
try:
tsmp = strftime("[%Y-%m-%d %H:%M:%S]", localtime())
__buffer_logger__.write(tsmp + ' ' + text + '\n')
__buffer_logger__.flush()
for item in HISTORY_KEY_WORDS:
if text.startswith(item):
__history_logger__.write(tsmp + ' ' + text + '\n')
__history_logger__.flush()
except:
traceback.print_exc(file=sys.stdout)
print 'failed to log'
def slog(text):
logln(text + '\n')
logBook(text)
class BatchStatusListener(SicsProxyListenerAdapter):
def __init__(self):
pass
def proxyConnected(self):
pass
def proxyConnectionReqested(self):
pass
def proxyDisconnected(self):
pass
def messageReceived(self, message, channelId):
if str(channelId) == 'rawBatch':
logBook(message)
def messageSent(self, message, channelId):
pass
try:
sics.SicsCore.getSicsManager().proxy().removeProxyListener(__batch_status_listener__)
except:
pass
__batch_status_listener__ = BatchStatusListener()
sics.SicsCore.getSicsManager().proxy().addProxyListener(__batch_status_listener__)
class SICSConsoleEventHandler(ConsoleEventHandler):
def __init__(self, topic):
ConsoleEventHandler.__init__(self, topic)
def handleEvent(self, event):
data = str(event.getProperty('sentMessage'))
logBook(data)
__sics_console_event_handler_sent__ = SICSConsoleEventHandler('org/gumtree/ui/terminal/telnet/sent')
__sics_console_event_handler_received__ = SICSConsoleEventHandler('org/gumtree/ui/terminal/telnet/received')
__sics_console_event_handler_sent__.activate()
__sics_console_event_handler_received__.activate()
class __Dispose_Listener__(DisposeListener):
def __init__(self):
pass
def widgetDisposed(self, event):
pass
def __dispose_all__(event):
global __batch_status_listener__
global __sics_console_event_handler_sent__
global __sics_console_event_handler_received__
global __statusListener__
global __save_count_node__
global __saveCountListener__
sics.SicsCore.getSicsManager().proxy().removeProxyListener(__batch_status_listener__)
__sics_console_event_handler_sent__.deactivate()
__sics_console_event_handler_received__.deactivate()
__save_count_node__.removeComponentListener(__saveCountListener__)
if __buffer_logger__:
__buffer_logger__.close()
if __history_logger__:
__history_logger__.close()
__dispose_listener__ = __Dispose_Listener__()
__dispose_listener__.widgetDisposed = __dispose_all__
__display_run__ = __Display_Runnable__()
Display.getDefault().asyncExec(__display_run__)
sics.ready = True
load_script('KKB_Scan_v3.py')
|
Gumtree/Kookaburra_scripts
|
Internal/Initialise_scan.py
|
Python
|
epl-1.0
| 11,345
|
[
"Gaussian"
] |
b3d8edd6695991a242315810b15d6495a79478879ead764e7ce6eef2b47515ae
|
#!/usr/bin/env python3
from olctools.accessoryFunctions.accessoryFunctions import MetadataObject
from genemethods.geneseekr.geneseekr import GeneSeekr
from genemethods.geneseekr.blast import BLAST
import multiprocessing
from glob import glob
from time import time
import os
test_path = os.path.abspath(os.path.dirname(__file__))
__author__ = 'adamkoziol'
def variables():
v = MetadataObject()
datapath = os.path.join(test_path, 'testdata')
v.sequencepath = os.path.join(datapath, 'sequences')
v.targetpath = os.path.join(datapath, 'databases', 'card_aa')
v.reportpath = os.path.join(datapath, 'reports')
v.cutoff = 70
v.evalue = '1E-05'
v.align = False
v.unique = False
v.resfinder = False
v.virulencefinder = False
v.numthreads = multiprocessing.cpu_count()
v.start = time()
return v
def method_init(analysistype, program, align, unique):
global var
var = variables()
var.analysistype = analysistype
var.program = program
var.align = align
var.unique = unique
method = BLAST(var)
return method
blastx_method = method_init(analysistype='geneseekr',
program='blastx',
align=True,
unique=True)
def test_parser():
assert os.path.basename(blastx_method.targets[0]) == 'amr.tfa'
def test_combined_files():
assert os.path.isfile(blastx_method.combinedtargets)
def test_strains():
assert os.path.isfile(blastx_method.strains[0])
def test_strain():
assert os.path.basename(blastx_method.strains[0]) == '2018-SEQ-0552.fasta'
def test_makeblastdb():
global geneseekr
geneseekr = GeneSeekr()
geneseekr.makeblastdb(fasta=blastx_method.combinedtargets,
program=blastx_method.program)
assert os.path.isfile(os.path.join(var.targetpath, 'combinedtargets.psq'))
def test_variable_populate():
global targetfolders
global targetfiles
global records
targetfolders, targetfiles, records = \
geneseekr.target_folders(metadata=blastx_method.metadata,
analysistype=blastx_method.analysistype)
def test_targetfolders():
assert os.path.basename(list(targetfolders)[0]) == 'card_aa'
def test_targetfiles():
assert targetfiles[0] == blastx_method.combinedtargets
def test_records():
assert records[targetfiles[0]]['yojI']
def test_blastx():
global blastx_report
blastx_method.metadata = geneseekr.run_blast(metadata=blastx_method.metadata,
analysistype=blastx_method.analysistype,
program=blastx_method.program,
outfmt=blastx_method.outfmt,
evalue=blastx_method.evalue,
num_threads=blastx_method.cpus)
blastx_report = os.path.join(var.reportpath, '2018-SEQ-0552_blastx_geneseekr.tsv')
assert os.path.isfile(blastx_report)
def test_enhance_report_parsing():
geneseekr.parseable_blast_outputs(metadata=blastx_method.metadata,
analysistype=blastx_method.analysistype,
fieldnames=blastx_method.fieldnames,
program=blastx_method.program)
header = open(blastx_report).readline()
assert header.split('\t')[0] == 'query_id'
def test_blastx_results():
with open(blastx_report) as blast_results:
next(blast_results)
data = blast_results.readline()
results = data.split('\t')
assert int(results[2]) >= 50
def test_blast_parse():
blastx_method.metadata = geneseekr.unique_parse_blast(metadata=blastx_method.metadata,
analysistype=blastx_method.analysistype,
fieldnames=blastx_method.fieldnames,
cutoff=blastx_method.cutoff,
program=blastx_method.program)
for sample in blastx_method.metadata:
assert sample.geneseekr.queryranges['Contig_54_76.3617'] == [[29664, 31283], [11054, 11845]]
def test_filter():
blastx_method.metadata = geneseekr.filter_unique(metadata=blastx_method.metadata,
analysistype=blastx_method.analysistype)
for sample in blastx_method.metadata:
assert sample.geneseekr.blastlist[0]['percentidentity'] >= 70
def test_dict_create():
blastx_method.metadata = geneseekr.dict_initialise(metadata=blastx_method.metadata,
analysistype=blastx_method.analysistype)
for sample in blastx_method.metadata:
assert type(sample.geneseekr.protseq) is dict
def test_target_folders():
global targetfolders, targetfiles, records
targetfolders, targetfiles, records = \
geneseekr.target_folders(metadata=blastx_method.metadata,
analysistype=blastx_method.analysistype)
assert records[targetfiles[0]]['yojI']
def test_report_creation():
blastx_method.metadata = geneseekr.reporter(metadata=blastx_method.metadata,
analysistype=blastx_method.analysistype,
reportpath=blastx_method.reportpath,
align=blastx_method.align,
records=records,
program=blastx_method.program,
cutoff=blastx_method.cutoff)
def test_report_csv():
global geneseekr_csv
geneseekr_csv = os.path.join(blastx_method.reportpath, 'geneseekr_blastx.csv')
assert os.path.isfile(geneseekr_csv)
def test_detailed_report_csv():
global geneseekr_detailed_csv
geneseekr_detailed_csv = os.path.join(blastx_method.reportpath, 'geneseekr_blastx_detailed.csv')
assert os.path.isfile(geneseekr_detailed_csv)
def test_report_xls():
global geneseekr_xls
geneseekr_xls = os.path.join(blastx_method.reportpath, 'geneseekr_blastx.xlsx')
assert os.path.isfile(geneseekr_xls)
def test_parse_results():
for sample in blastx_method.metadata:
assert sample.geneseekr.blastresults['OXA_12'] == 91.86
def test_aaseq():
for sample in blastx_method.metadata:
assert sample.geneseekr.blastlist[0]['query_sequence'][:5] == 'MELLS' or \
sample.geneseekr.blastlist[0]['query_sequence'][:5] == 'MSRIL'
def test_fasta_create():
global fasta_file
geneseekr.export_fasta(metadata=blastx_method.metadata,
analysistype=blastx_method.analysistype,
reportpath=blastx_method.reportpath,
cutoff=blastx_method.cutoff,
program=blastx_method.program)
fasta_file = os.path.join(var.reportpath, '2018-SEQ-0552_geneseekr.fasta')
assert os.path.isfile(fasta_file)
header = open(fasta_file, 'r').readline().rstrip()
assert header == '>2018-SEQ-0552_OXA_12'
def test_combined_targets_clean():
os.remove(blastx_method.combinedtargets)
def test_makeblastdb_clean():
databasefiles = glob(os.path.join(var.targetpath, 'combinedtargets.p*'))
for dbfile in databasefiles:
os.remove(dbfile)
def test_remove_blastx_report():
os.remove(blastx_report)
def test_remove_geneseekr_csv():
os.remove(geneseekr_csv)
def test_remove_fasta_file():
os.remove(fasta_file)
def test_removed_detailed_geneseekr_csv():
os.remove(geneseekr_detailed_csv)
def test_remove_geneseekr_xls():
os.remove(geneseekr_xls)
def test_remove_report_path():
os.rmdir(blastx_method.reportpath)
|
OLC-Bioinformatics/GeneSeekr
|
tests/test_blastx.py
|
Python
|
mit
| 8,006
|
[
"BLAST"
] |
71f9a5fab138a54afba9aa10516ffbd9f015c024be25ea028057ea4b456d1ddf
|
from __future__ import print_function, division
import math
import numpy as np
from sklearn.cluster import MiniBatchKMeans
class hard_EM_GMM(object):
""" A class for performing hard-EM clustering into a Gaussian
mixture model.
Hard-EM clustering is like 'normal' EM clustering, but each
object can only 'belong' to a single cluster, i.e. instead of
membership probabilities being stored for each object, we
instead just record the cluster with the maximum membersip
probability.
Attributes
----------
Ndata : int
The number of data points
Parameters
----------
X : ndarray(Ndata, Ndim)
The observed data
Nclusters : int
The number of clusters/components
"""
def __init__(self, X, Nclusters):
self.X = X
fallback_sigma = np.cov(self.X, rowvar=False)
self.Ndata = X.shape[0]
self.Ndim = X.shape[1]
self.Nclusters = Nclusters
self.clusters = []
for n in range(Nclusters):
self.clusters.append(EMGMM_cluster(self.Ndim, self.Ndata,
fallback_sigma))
self.assignments = np.zeros(self.Ndata, dtype=np.int)
self.__mus = None
self.__sigmas = None
self.__weights = None
def random_seed(self):
""" random_seed()
Start the clusters by 'seeding' them with an individual
datum each. Then filter all other data onto their
nearest cluster.
Parameters
----------
None
Returns
-------
None
"""
seed_inds = np.random.choice(np.arange(self.Ndata), self.Nclusters,
replace=False)
for i, index in enumerate(seed_inds):
self.assignments[index] = i
# Set data for each cluster and then params
self.clusters[i].add_datum(self.X[index])
self.clusters[i].set_params()
# Assign all data to clusters
self.assign_data()
def kmeans_init(self):
""" kmeans_init()
Use a k-means clustering to provide the initial
assignments for the EM.
Parameters
----------
None
Returns
-------
None
"""
mbk = MiniBatchKMeans(init='k-means++', n_clusters=self.Nclusters,
batch_size=50)
mbk.fit(self.X)
self.assignments = mbk.labels_.copy()
for i in range(self.Ndata):
self.clusters[self.assignments[i]].add_datum(self.X[i])
def assign_data(self):
""" assign_data()
assign the data points onto the best fitting
cluster for each (the E-step)
Parameters
----------
None
Returns
-------
None
"""
for cluster in self.clusters:
cluster.clear_data()
max_logprob = np.zeros(self.Ndata) - np.inf
max_j = np.zeros(self.Ndata, dtype=np.int) - 1
for j in range(self.Nclusters):
logprob = self.clusters[j].logprob(self.X)
mask = logprob > max_logprob
max_logprob[mask] = logprob[mask]
max_j[mask] = j
self.assignments = max_j
for i in range(self.Ndata):
self.clusters[max_j[i]].add_datum(self.X[i])
def set_params(self):
""" set_params()
Set the parameters of each cluster to their maaximum
likelihood values (the M-step)
Parameters
----------
None
Returns
-------
None
"""
for i in range(self.Nclusters):
self.clusters[i].set_params()
def fit(self, Nsteps):
""" fit(Nsteps)
Fit the GMM to the data using Nsteps iteations of
hard EM
Parameters
----------
Nsteps : int
The number of steps of EM to perform
Returns
-------
None
"""
for i in range(Nsteps):
self.set_params()
self.assign_data()
@property
def clustered_data(self):
""" mus
Return the clustered data
Returns
-------
data : list (ndarray(..., Ndim))
A list of ndarrays, each array contains the data assigned
to a single cluster
data_uncerts : list (ndarray(..., Ndim, Ndim))
A list of ndarrays, each array contains the uncertainties
on the data assigned to a single cluster
"""
data = []
for i in range(self.Nclusters):
mask = self.assignments == i
if np.sum(mask) > 0:
data.append(self.X[mask])
return data
@property
def mus(self):
""" mus
Return the moments of the clusters
Returns
-------
mus : ndarray(Nclusters, Ndim)
The means of the clusters, the first index iterates
over the clusters
"""
self.__mus = np.zeros((self.Nclusters, self.Ndim))
for i in range(self.Nclusters):
self.__mus[i] = self.clusters[i].mu
return self.__mus
@property
def sigmas(self):
""" sigmas
The covaraince matrices of the clusters
Returns
-------
sigmas : ndarray(Nclusters, Ndim, Ndim)
The covariance matrices of the clusters, the first
index iterates over the clusters
"""
self.__sigmas = np.zeros((self.Nclusters, self.Ndim, self.Ndim))
for i in range(self.Nclusters):
self.__sigmas[i] = self.clusters[i].sigma
return self.__sigmas
@property
def weights(self):
""" weights
Return the weights of the clusters
Returns
-------
weights : ndarray(Nclusters, Ndim)
The weights of the clusters, the index iterates
over the clusters
"""
self.__weights = np.zeros(self.Nclusters)
for i in range(self.Nclusters):
self.__weights[i] = self.clusters[i].weight
return self.__weights
@classmethod
def init_fit(cls, X, Nclusters, Nsteps, init_method='kmeans'):
""" init_fit(X, Nclusters, Nsteps)
Factory method to init and then perform hard-EM
fitting on data
Parameters
----------
X : ndarray(Ndata, Ndim)
The observed data
Nclusters : int
The number of clusters/components
Nsteps : int
The number of steps of EM to perform
init_method : str or function
The method used to provide the initial assignment of
data to clusters. Can be 'kmeans', 'random' or a user
supplied function
Returns
-------
EM_obj : hard_EMGMM
A hard_EMGMM object on which Nsteps iterations of
hard-EM have been performed
"""
EM_obj = cls(X, Nclusters)
if init_method == 'kmeans':
EM_obj.kmeans_init()
elif init_method == 'random':
EM_obj.random_seed()
else:
EM_obj = init_method(X, Nclusters)
EM_obj.fit(Nsteps)
return EM_obj
class EMGMM_cluster(object):
""" A class that describes an individual cluster in a GMM
scheme that is found/refined by EM
Attributes
----------
mu : ndarray(Ndim)
The mean vector of the cluster
sigma : ndarray(Ndim, Ndim)
The covariance of each cluster
weight : float
The weight of the cluster
Parameters
----------
Ndim : int
The number of dimens of the space in which the cluster is
defined
Ndata : int
The total number of data points to be clustered
fallback_sigma : int
a sigma to fall back on if the number of points
in the cluster is 1
"""
def __init__(self, Ndim, Ndata, fallback_sigma):
self.Ndim = Ndim
self.Ndata = Ndata
self.fallback_sigma = fallback_sigma
self.mu = np.zeros(self.Ndim)
self.sigma = np.zeros((self.Ndim, self.Ndim))
self.weight = 0.
self.data = []
def clear_data(self):
""" clear_data()
Clear the contents of data and data_uncerts
Parameters
----------
None
Returns
-------
None
"""
self.data = []
def add_datum(self, datum):
""" add_datum(datum, datum_uncert)
Add a datum to the cluster
"""
self.data.append(datum)
def set_params(self):
""" set_params(X, X_uncert)
Set the parameters of the cluster given the
noisy data assigned to it
Parameters
----------
None
Returns
-------
None
"""
if len(self.data) > 0:
self.mu = np.mean(self.data, axis=0)
else:
self.mu = np.zeros(self.Ndim)
if len(self.data) > 1:
self.sigma = np.cov(self.data, rowvar=False)
else:
self.sigma = self.fallback_sigma
self.weight = len(self.data)/self.Ndata
def logprob(self, x):
""" prob(x)
Find the (log)probability of a datum x given it is
a member of this cluster
Parameters
----------
x : ndarray
The position of the datum
Returns
-------
logprob : float
The log probability of the datum assuming it is a
member of this cluster
"""
q = np.linalg.solve(self.sigma, (x-self.mu).T).T
if self.weight > 0:
log_prob = (math.log(self.weight)
- np.linalg.slogdet(self.sigma)[1]/2
- np.sum((x-self.mu) * q, axis=1)/2)
else:
log_prob = np.zeros(x.shape[0]) - np.inf
return log_prob
|
stuartsale/pyBHC
|
pyBHC/hardEM.py
|
Python
|
bsd-3-clause
| 10,575
|
[
"Gaussian"
] |
597824fab2d7482707e530718f567ffee888f8f064a56209a7e003a913d9f0af
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
"""
Defines an abstract base class contract for Transformation object.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Sep 23, 2011"
import abc
from monty.json import MSONable
import six
class AbstractTransformation(six.with_metaclass(abc.ABCMeta, MSONable)):
"""
Abstract transformation class.
"""
@abc.abstractmethod
def apply_transformation(self, structure):
"""
Applies the transformation to a structure. Depending on whether a
transformation is one-to-many, there may be an option to return a
ranked list of structures.
Args:
structure:
input structure
return_ranked_list:
Boolean stating whether or not multiple structures are
returned. If return_ranked_list is a number, that number of
structures is returned.
Returns:
depending on returned_ranked list, either a transformed structure
or
a list of dictionaries, where each dictionary is of the form
{'structure' = .... , 'other_arguments'}
the key 'transformation' is reserved for the transformation that
was actually applied to the structure.
This transformation is parsed by the alchemy classes for generating
a more specific transformation history. Any other information will
be stored in the transformation_parameters dictionary in the
transmuted structure class.
"""
return
@abc.abstractproperty
def inverse(self):
"""
Returns the inverse transformation if available.
Otherwise, should return None.
"""
return
@abc.abstractproperty
def is_one_to_many(self):
"""
Determines if a Transformation is a one-to-many transformation. If a
Transformation is a one-to-many transformation, the
apply_transformation method should have a keyword arg
"return_ranked_list" which allows for the transformed structures to be
returned as a ranked list.
"""
return False
@property
def use_multiprocessing(self):
"""
Indicates whether the transformation can be applied by a
subprocessing pool. This should be overridden to return True for
transformations that the transmuter can parallelize.
"""
return False
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
|
migueldiascosta/pymatgen
|
pymatgen/transformations/transformation_abc.py
|
Python
|
mit
| 2,801
|
[
"pymatgen"
] |
6f8879435dc80d16647d90108624f36bb18ab8786c56bb43157df9c0ebf1ef11
|
#!/usr/bin/env python3
'''Manual DDNS testing'''
from dnstest.utils import *
from dnstest.test import Test
import random
t = Test()
def check_soa(master, prev_soa):
soa_resp = master.dig("ddns.", "SOA")
compare(prev_soa, soa_resp.resp.answer, "SOA changed when it shouldn't")
def verify(master, zone, dnssec):
if not dnssec:
return
master.flush(wait=True)
master.zone_verify(zone)
def do_normal_tests(master, zone, dnssec=False):
# add node
check_log("Node addition")
up = master.update(zone)
up.add("rrtest.ddns.", 3600, "A", "1.2.3.4")
up.send("NOERROR")
resp = master.dig("rrtest.ddns.", "A")
resp.check(rcode="NOERROR", rdata="1.2.3.4")
verify(master, zone, dnssec)
# add record to existing rrset
check_log("Node update - new record")
up = master.update(zone)
up.add("rrtest.ddns.", 3600, "A", "1.2.3.5")
up.send("NOERROR")
resp = master.dig("rrtest.ddns.", "A")
resp.check(rcode="NOERROR", rdata="1.2.3.4")
resp.check(rcode="NOERROR", rdata="1.2.3.5")
verify(master, zone, dnssec)
# add records to existing rrset
check_log("Node update - new records")
up = master.update(zone)
up.add("rrtest.ddns.", 3600, "A", "1.2.3.7")
up.add("rrtest.ddns.", 3600, "A", "1.2.3.0")
up.send("NOERROR")
resp = master.dig("rrtest.ddns.", "A")
resp.check(rcode="NOERROR", rdata="1.2.3.0")
resp.check(rcode="NOERROR", rdata="1.2.3.4")
resp.check(rcode="NOERROR", rdata="1.2.3.5")
resp.check(rcode="NOERROR", rdata="1.2.3.7")
verify(master, zone, dnssec)
# add rrset to existing node
check_log("Node update - new rrset")
up = master.update(zone)
up.add("rrtest.ddns.", 3600, "TXT", "abcedf")
up.send("NOERROR")
resp = master.dig("rrtest.ddns.", "TXT")
resp.check(rcode="NOERROR", rdata="abcedf")
resp = master.dig("rrtest.ddns.", "A")
resp.check(rcode="NOERROR", rdata="1.2.3.0")
resp.check(rcode="NOERROR", rdata="1.2.3.4")
resp.check(rcode="NOERROR", rdata="1.2.3.5")
resp.check(rcode="NOERROR", rdata="1.2.3.7")
verify(master, zone, dnssec)
# remove rrset
check_log("Node update - rrset removal")
up = master.update(zone)
up.delete("rrtest.ddns.", "TXT")
up.send("NOERROR")
resp = master.dig("rrtest.ddns.", "TXT")
resp.check(rcode="NOERROR")
compare(resp.count(section="answer"), 0, "TXT rrset removal")
resp = master.dig("rrtest.ddns.", "A")
resp.check(rcode="NOERROR", rdata="1.2.3.0")
resp.check(rcode="NOERROR", rdata="1.2.3.4")
resp.check(rcode="NOERROR", rdata="1.2.3.5")
resp.check(rcode="NOERROR", rdata="1.2.3.7")
verify(master, zone, dnssec)
# remove record
check_log("Node update - record removal")
up = master.update(zone)
up.delete("rrtest.ddns.", "A", "1.2.3.5")
up.send("NOERROR")
resp = master.dig("rrtest.ddns.", "A")
resp.check(rcode="NOERROR", nordata="1.2.3.5")
resp.check(rcode="NOERROR", rdata="1.2.3.0")
resp.check(rcode="NOERROR", rdata="1.2.3.4")
resp.check(rcode="NOERROR", rdata="1.2.3.7")
verify(master, zone, dnssec)
# remove records
check_log("Node update - records removal")
up = master.update(zone)
up.delete("rrtest.ddns.", "A", "1.2.3.0")
up.delete("rrtest.ddns.", "A", "1.2.3.7")
up.send("NOERROR")
resp = master.dig("rrtest.ddns.", "A")
resp.check(rcode="NOERROR", nordata="1.2.3.0")
resp.check(rcode="NOERROR", nordata="1.2.3.7")
resp.check(rcode="NOERROR", rdata="1.2.3.4")
verify(master, zone, dnssec)
# replace with different TTL
check_log("Replace with other TTL")
up = master.update(zone)
up.delete("rrtest.ddns.", "ANY")
up.add("rrtest.ddns.", 7, "A", "1.2.3.8")
up.send("NOERROR")
resp = master.dig("rrtest.ddns.", "A")
resp.check(rcode="NOERROR", rdata="1.2.3.8")
verify(master, zone, dnssec)
# remove node
check_log("Node removal")
up = master.update(zone)
up.delete("rrtest.ddns.", "ANY")
up.send("NOERROR")
resp = master.dig("rrtest.ddns.", "A")
resp.check(rcode="NXDOMAIN")
verify(master, zone, dnssec)
# add delegation
check_log("Delegation addition")
up = master.update(zone)
up.add("deleg.ddns.", 3600, "NS", "a.deleg.ddns.")
up.add("a.deleg.ddns.", 3600, "A", "1.2.3.4")
up.send("NOERROR")
resp = master.dig("deleg.ddns.", "NS")
resp.check_record(section="authority", rtype="NS", rdata="a.deleg.ddns.")
resp.check_record(section="additional", rtype="A", rdata="1.2.3.4")
verify(master, zone, dnssec)
# add delegation w/o glue
check_log("Delegation w/o glue")
up = master.update(zone)
up.add("deleglue.ddns.", 3600, "NS", "a.deleglue.ddns.")
up.send("NOERROR")
resp = master.dig("deleglue.ddns.", "NS")
resp.check_record(section="authority", rtype="NS", rdata="a.deleglue.ddns.")
resp.check_no_rr(section="additional", rname="a.deleglue.ddns.", rtype="A")
verify(master, zone, dnssec)
# add glue to delegation
check_log("Glue for existing delegation")
up = master.update(zone)
up.add("a.deleglue.ddns.", 3600, "A", "10.20.30.40")
up.send("NOERROR")
resp = master.dig("deleglue.ddns.", "NS")
resp.check_record(section="authority", rtype="NS", rdata="a.deleglue.ddns.")
resp.check_record(section="additional", rtype="A", rdata="10.20.30.40")
verify(master, zone, dnssec)
# remove delegation, keep glue
check_log("Remove delegation, keep glue")
up = master.update(zone)
up.delete("deleglue.ddns.", "NS")
up.send("NOERROR")
resp = master.dig("deleglue.ddns.", "NS")
resp.check(rcode="NOERROR")
resp.check_record(section="authority", rtype="SOA")
resp.check_no_rr(section="additional", rname="a.deleglue.ddns.", rtype="A")
resp = master.dig("a.deleglue.ddns.", "A")
resp.check(rcode="NOERROR")
resp.check_record(section="answer", rtype="A", rdata="10.20.30.40")
verify(master, zone, dnssec)
# add delegation to existing glue
check_log("Add delegation to existing glue")
up = master.update(zone)
up.add("deleglue.ddns.", 3600, "NS", "a.deleglue.ddns.")
up.send("NOERROR")
resp = master.dig("deleglue.ddns.", "NS")
resp.check_record(section="authority", rtype="NS", rdata="a.deleglue.ddns.")
resp.check_record(section="additional", rtype="A", rdata="10.20.30.40")
verify(master, zone, dnssec)
# make a delegation from NONAUTH node
check_log("NONAUTH to DELEG")
up = master.update(zone)
up.add("a.deleglue.ddns.", 3600, "NS", "a.deleglue.ddns.")
up.delete("deleglue.ddns.", "NS", "a.deleglue.ddns.")
up.send("NOERROR")
resp = master.dig("x.a.deleglue.ddns.", "A")
resp.check(rcode="NOERROR")
resp.check_record(section="authority", rtype="NS", rdata="a.deleglue.ddns.")
resp.check_record(section="additional", rtype="A", rdata="10.20.30.40")
verify(master, zone, dnssec)
# reverse of previous
check_log("DELEG to NONAUTH")
up = master.update(zone)
up.delete("a.deleglue.ddns.", "NS", "a.deleglue.ddns.")
up.add("deleglue.ddns.", 3600, "NS", "a.deleglue.ddns.")
up.send("NOERROR")
resp = master.dig("deleglue.ddns.", "NS")
resp.check(rcode="NOERROR")
resp.check_record(section="authority", rtype="NS", rdata="a.deleglue.ddns.")
resp.check_record(section="additional", rtype="A", rdata="10.20.30.40")
verify(master, zone, dnssec)
# add SVCB w/o glue
check_log("glueless SVCB")
up = master.update(zone)
try:
up.add("svcb.ddns.", 3600, "SVCB", "0 target.svcb.ddns.")
except:
up.add("svcb.ddns.", 3600, "TYPE64", "\# 20 00000674617267657404737663620464646E7300")
up.send("NOERROR")
resp = master.dig("svcb.ddns.", "TYPE64", dnssec=dnssec)
resp.check(rcode="NOERROR")
resp.check_count(0, rtype="AAAA", section="additional")
# add glue to SVCB
check_log("Add glue to SVCB")
up = master.update(zone)
up.add("target.svcb.ddns.", 3600, "AAAA", "1::2")
try:
up.add("target.svcb.ddns.", 3600, "SVCB", "2 . alpn=h2")
except:
up.add("target.svcb.ddns.", 3600, "TYPE64", "\# 10 00020000010003026832")
up.send("NOERROR")
resp = master.dig("svcb.ddns.", "TYPE64", dnssec=dnssec)
resp.check(rcode="NOERROR")
resp.check_count(1, rtype="AAAA", section="additional")
resp.check_count(1, rtype="TYPE64", section="additional")
if dnssec:
resp.check_count(3, rtype="RRSIG", section="additional")
# remove glue from SVCB
check_log("Remove glue from SVCB")
up = master.update(zone)
up.delete("target.svcb.ddns.", "AAAA")
up.delete("target.svcb.ddns.", "TYPE64")
up.send("NOERROR")
resp = master.dig("svcb.ddns.", "TYPE64", dnssec=dnssec)
resp.check(rcode="NOERROR")
resp.check_count(0, rtype="AAAA", section="additional")
resp.check_count(0, rtype="RRSIG", section="additional")
# now remove SVCB in order to make ldns-verify work
up = master.update(zone)
up.delete("svcb.ddns.", "TYPE64")
up.send()
# add CNAME to node with A records, should be ignored
check_log("Add CNAME to A node")
up = master.update(zone)
up.add("dns1.ddns.", "3600", "CNAME", "ignore.me.ddns.")
up.send("NOERROR")
resp = master.dig("dns1.ddns.", "CNAME")
compare(resp.count(), 0, "Added CNAME when it shouldn't")
verify(master, zone, dnssec)
# create new node by adding RR + try to add CNAME
# the update should ignore the CNAME
check_log("Add new node + add CNAME to it")
up = master.update(zone)
up.add("rrtest2.ddns.", "3600", "MX", "10 something.ddns.")
up.add("rrtest2.ddns.", "3600", "CNAME", "ignore.me.ddns.")
up.send("NOERROR")
resp = master.dig("rrtest2.ddns.", "ANY")
resp.check(rcode="NOERROR")
resp.check_record(rtype="MX", rdata="10 something.ddns.")
resp = master.dig("rrtest2.ddns.", "CNAME")
compare(resp.count(section="answer"), 0, "Added CNAME when it shouldn't")
verify(master, zone, dnssec)
# add A to CNAME node, should be ignored
check_log("Add A to CNAME node")
up = master.update(zone)
up.add("cname.ddns.", "3600", "A", "1.2.3.4")
up.send("NOERROR")
resp = master.dig("cname.ddns.", "ANY")
resp.check(rcode="NOERROR")
resp.check_record(rtype="A", nordata="1.2.3.4")
resp.check_record(rtype="CNAME", rdata="mail.ddns.")
verify(master, zone, dnssec)
# add new node with CNAME + add A to the same node, A should be ignored
check_log("Add new CNAME node + add A to it")
up = master.update(zone)
up.add("rrtest3.ddns.", "3600", "CNAME", "dont.ignore.me.ddns.")
up.add("rrtest3.ddns.", "3600", "TXT", "ignore")
up.send("NOERROR")
resp = master.dig("rrtest3.ddns.", "ANY")
resp.check(rcode="NOERROR")
resp.check_record(rtype="TXT", nordata="ignore")
resp.check_record(rtype="CNAME", rdata="dont.ignore.me.ddns.")
verify(master, zone, dnssec)
# add CNAME to CNAME node, should be replaced
check_log("CNAME to CNAME addition")
up = master.update(zone)
up.add("cname.ddns.", 3600, "CNAME", "new-cname.ddns.")
up.send("NOERROR")
resp = master.dig("cname.ddns.", "CNAME")
resp.check(rcode="NOERROR", rdata="new-cname.ddns.")
resp.check(rcode="NOERROR", nordata="mail.ddns.")
verify(master, zone, dnssec)
# add new CNAME node + another CNAME to it; last CNAME should stay in zone
check_log("Add two CNAMEs to a new node")
up = master.update(zone)
up.add("rrtest4.ddns.", "3600", "CNAME", "ignore.me.ddns.")
up.add("rrtest4.ddns.", "3600", "CNAME", "dont.ignore.me.ddns.")
up.send("NOERROR")
resp = master.dig("rrtest3.ddns.", "ANY")
resp.check(rcode="NOERROR")
resp.check_record(rtype="CNAME", rdata="dont.ignore.me.ddns.")
resp.check_record(rtype="CNAME", nordata="ignore.me.ddns")
verify(master, zone, dnssec)
# add SOA with higher than current serial, serial starting from 2010111213
check_log("Newer SOA addition")
up = master.update(zone)
up.add("ddns.", 3600, "SOA",
"dns1.ddns. hostmaster.ddns. 2011111213 10800 3600 1209600 7200")
up.send("NOERROR")
resp = master.dig("ddns.", "SOA")
resp.check(rcode="NOERROR",
rdata="dns1.ddns. hostmaster.ddns. 2011111213 10800 3600 1209600 7200")
verify(master, zone, dnssec)
# add SOA with higher serial + remove it in the same UPDATE
# should result in replacing the SOA (i.e. the remove should be ignored)
check_log("Newer SOA addition + removal")
up = master.update(zone)
up.add("ddns.", 3600, "SOA",
"dns1.ddns. hostmaster.ddns. 2012111213 10800 3600 1209600 7200")
up.delete("ddns.", "SOA",
"dns1.ddns. hostmaster.ddns. 2012111213 10800 3600 1209600 7200")
up.send("NOERROR")
resp = master.dig("ddns.", "SOA")
resp.check(rcode="NOERROR",
rdata="dns1.ddns. hostmaster.ddns. 2012111213 10800 3600 1209600 7200")
verify(master, zone, dnssec)
# add SOA with higher serial + remove all SOA in the same UPDATE
# the removal should be ignored, only replacing the SOA
check_log("Newer SOA addition + removal of all SOA")
up = master.update(zone)
up.add("ddns.", 3600, "SOA",
"dns1.ddns. hostmaster.ddns. 2013111213 10800 3600 1209600 7200")
up.delete("ddns.", "SOA")
up.send("NOERROR")
resp = master.dig("ddns.", "SOA")
resp.check(rcode="NOERROR")
resp.check_record(rtype="SOA", rdata="dns1.ddns. hostmaster.ddns. 2013111213 10800 3600 1209600 7200")
verify(master, zone, dnssec)
# add SOA with lower serial, should be ignored
check_log("Older SOA addition")
up = master.update(zone)
up.add("ddns.", 3600, "SOA",
"dns1.ddns. hostmaster.ddns. 2010111213 10800 3600 1209600 7200")
up.send("NOERROR")
resp = master.dig("ddns.", "SOA")
resp.check(rcode="NOERROR",
rdata="dns1.ddns. hostmaster.ddns. 2013111213 10800 3600 1209600 7200")
verify(master, zone, dnssec)
# add SOA with different TTL
check_log("SOA different TTL")
up = master.update(zone)
up.add("ddns.", 1800, "SOA",
"dns1.ddns. hostmaster.ddns. 2014111213 10800 1800 1209600 7200")
up.send("NOERROR")
resp = master.dig("ddns.", "SOA")
resp.check(rcode="NOERROR",
rdata="dns1.ddns. hostmaster.ddns. 2014111213 10800 1800 1209600 7200")
verify(master, zone, dnssec)
# add and remove the same record
check_log("Add and remove same record")
up = master.update(zone)
up.add("testaddrem.ddns.", 3600, "TXT", "record")
up.delete("testaddrem.ddns.", "TXT", "record")
up.send("NOERROR")
resp = master.dig("testaddrem.ddns.", "TXT")
resp.check(rcode="NXDOMAIN")
verify(master, zone, dnssec)
# add and remove the same record, delete whole RRSet
check_log("Add and remove same record, delete whole")
up = master.update(zone)
up.add("testaddrem.ddns.", 3600, "TXT", "record")
up.delete("testaddrem.ddns.", "TXT")
up.send("NOERROR")
resp = master.dig("testaddrem.ddns.", "TXT")
resp.check(rcode="NXDOMAIN")
verify(master, zone, dnssec)
# remove non-existent record
check_log("Remove non-existent record")
up = master.update(zone)
up.delete("testaddrem.ddns.", "TXT", "record")
up.send("NOERROR")
verify(master, zone, dnssec)
# remove NS from APEX (NS should stay)
check_log("Remove NS")
up = master.update(zone)
up.delete("ddns.", "NS")
up.send("NOERROR")
resp = master.dig("ddns.", "NS")
resp.check(rcode="NOERROR")
resp.check_record(rtype="NS", rdata="dns1.ddns.")
resp.check_record(rtype="NS", rdata="dns2.ddns.")
verify(master, zone, dnssec)
# remove all from APEX (NS should stay)
check_log("Remove all NS")
up = master.update(zone)
up.delete("ddns.", "ANY")
up.send("NOERROR")
resp = master.dig("ddns.", "NS")
resp.check(rcode="NOERROR")
resp.check_record(rtype="NS", rdata="dns1.ddns.")
resp.check_record(rtype="NS", rdata="dns2.ddns.")
resp = master.dig("ddns.", "MX")
resp.check(rcode="NOERROR")
compare(resp.count(section="answer"), 0, "MX rrset removal")
verify(master, zone, dnssec)
# remove all NS + add 1 new; result: 3 RRs
check_log("Remove all NS + add 1 new")
up = master.update(zone)
up.delete("ddns.", "NS")
up.add("ddns.", 3600, "NS", "dns3.ddns.")
up.send("NOERROR")
resp = master.dig("ddns.", "NS")
resp.check(rcode="NOERROR")
resp.check_record(rtype="NS", rdata="dns1.ddns.")
resp.check_record(rtype="NS", rdata="dns2.ddns.")
resp.check_record(rtype="NS", rdata="dns3.ddns.")
verify(master, zone, dnssec)
# remove NSs one at a time + add one new
# the last one + the new one should remain in the zone
check_log("Remove NSs one at a time + add 1 new")
up = master.update(zone)
up.delete("ddns.", "NS", "dns1.ddns.")
up.delete("ddns.", "NS", "dns2.ddns.")
up.delete("ddns.", "NS", "dns3.ddns.")
up.add("ddns.", 3600, "NS", "dns4.ddns.")
up.send("NOERROR")
resp = master.dig("ddns.", "NS")
resp.check(rcode="NOERROR", nordata="dns1.ddns.")
resp.check(nordata="dns2.ddns.")
resp.check_record(rtype="NS", rdata="dns3.ddns.")
resp.check_record(rtype="NS", rdata="dns4.ddns.")
verify(master, zone, dnssec)
# add new NS + remove all one at a time
# only the new NS should remain in the zone
check_log("Add 1 NS + remove all NSs one at a time")
up = master.update(zone)
up.add("ddns.", 3600, "NS", "dns5.ddns.")
up.delete("ddns.", "NS", "dns3.ddns.")
up.delete("ddns.", "NS", "dns4.ddns.")
up.send("NOERROR")
resp = master.dig("ddns.", "NS")
resp.check(rcode="NOERROR", nordata="dns3.ddns.")
resp.check(nordata="dns4.ddns.")
resp.check_record(rtype="NS", rdata="dns5.ddns.")
verify(master, zone, dnssec)
# add new NS + remove the old one; only the new one should remain
check_log("Add 1 NS + remove old NS")
up = master.update(zone)
up.add("ddns.", 3600, "NS", "dns1.ddns.")
up.delete("ddns.", "NS", "dns5.ddns.")
up.send("NOERROR")
resp = master.dig("ddns.", "NS")
resp.check(rcode="NOERROR", nordata="dns5.ddns.")
resp.check_record(rtype="NS", rdata="dns1.ddns.")
verify(master, zone, dnssec)
# remove old NS + add new NS; both should remain in the zone
check_log("Remove old NS + add 1 NS")
up = master.update(zone)
up.delete("ddns.", "NS", "dns1.ddns.")
up.add("ddns.", 3600, "NS", "dns2.ddns.")
up.send("NOERROR")
resp = master.dig("ddns.", "NS")
resp.check(rcode="NOERROR")
resp.check_record(rtype="NS", rdata="dns1.ddns.")
resp.check_record(rtype="NS", rdata="dns2.ddns.")
verify(master, zone, dnssec)
# remove NSs one at a time; the last one should remain in the zone
check_log("Remove NSs one at a time")
up = master.update(zone)
up.delete("ddns.", "NS", "dns1.ddns.")
up.delete("ddns.", "NS", "dns2.ddns.")
up.send("NOERROR")
resp = master.dig("ddns.", "NS")
resp.check(rcode="NOERROR", nordata="dns1.ddns.")
resp.check_record(rtype="NS", rdata="dns2.ddns.")
verify(master, zone, dnssec)
# add new NS + remove ALL NS; should ignore the remove and add the NS
check_log("Add new NS + remove ALL NSs at once")
up = master.update(zone)
up.add("ddns.", 3600, "NS", "dns1.ddns.")
up.delete("ddns.", "NS")
up.send("NOERROR")
resp = master.dig("ddns.", "NS")
resp.check_record(rtype="NS", rdata="dns1.ddns.")
resp.check_record(rtype="NS", rdata="dns2.ddns.")
verify(master, zone, dnssec)
# add empty generic record
check_log("Add empty generic record")
up = master.update(zone)
up.add("empty.ddns.", 300, "TYPE999", "\# 0")
up.send("NOERROR")
resp = master.dig("empty.ddns.", "TYPE999")
resp.check_record(rtype="TYPE999", rdata="\# 0")
verify(master, zone, dnssec)
# add NAPTR record (NAPTR has special processing)
check_log("Add NAPTR record")
up = master.update(zone)
up.add("3.1.1.1.1.1.1.1.1.2.7.9.9.ddns.", 172800, "NAPTR", "1 1 \"u\" \"E2U+sip\" \"!^.*$!sip:123@freeswitch.org!\" .")
up.send("NOERROR")
resp = master.dig("3.1.1.1.1.1.1.1.1.2.7.9.9.ddns.", "NAPTR")
resp.check_record(rtype="NAPTR", rdata="1 1 \"u\" \"E2U+sip\" \"!^.*$!sip:123@freeswitch.org!\" .")
verify(master, zone, dnssec)
# modify zone apex
check_log("Add TXT into apex")
up = master.update(zone)
up.add("ddns.", 300, "TXT", "This is apeeex!")
up.send("NOERROR")
resp = master.dig("ddns.", "TXT")
resp.check_record(rtype="TXT", rdata="This is apeeex!")
verify(master, zone, dnssec)
if dnssec:
# add DS for existing delegation
check_log("DS addition")
up = master.update(zone)
up.add("deleg.ddns.", 3600, "DS",
"54576 10 2 397E50C85EDE9CDE33F363A9E66FD1B216D788F8DD438A57A423A386869C8F06")
up.send("NOERROR")
resp = master.dig("deleg.ddns.", "NS", dnssec=True)
resp.check(rcode="NOERROR")
resp.check_record(section="authority", rtype="DS",
rdata="54576 10 2 397E50C85EDE9CDE33F363A9E66FD1B216D788F8DD438A57A423A386869C8F06")
resp.check_record(section="authority", rtype="NS", rdata="a.deleg.ddns.")
resp.check_record(section="authority", rtype="RRSIG")
verify(master, zone, dnssec)
# add AAAA to existing A glue
check_log("glue augmentation")
up = master.update(zone)
up.add("a.deleg.ddns.", 3600, "AAAA", "1::2")
up.send("NOERROR")
resp = master.dig("xy.deleg.ddns.", "A", dnssec=True)
resp.check_rr(section="authority", rname="deleg.ddns.", rtype="NS")
resp.check_rr(section="authority", rname="deleg.ddns.", rtype="RRSIG")
resp.check_rr(section="additional", rname="a.deleg.ddns.", rtype="AAAA")
resp.check_no_rr(section="additional", rname="a.deleg.ddns.", rtype="RRSIG")
verify(master, zone, dnssec)
def do_refusal_tests(master, zone, dnssec=False):
forbidden = [{'type':"RRSIG", 'data':"A 5 2 1800 20140331062706 20140317095503 132 nic.cz. rc7TwX4GnExDQBNDCdbgf0PS7zabtymSKQ0VhmbFJAcYZxN+yFF9PXAo SpsDVR5H0PIuUM4oqoe7gsKfqqpTdOuB9M6cN/Mni99u7XfKHkopDjYc qTJXKn3x2TER4WkGtG5uthuSEc9lseCr6XqAqkDnJlUa6pB2a3mEHwu/ Elk="},
{'type':"NSEC", 'data':"0-0.se. NS SOA TXT RRSIG NSEC DNSKEY"},
{'type':"NSEC3", 'data':"1 0 10 B8399FF56C1C0C7E D0RS5MTK2AT5SVG2S9LRMM4L2J63V6GL NS"}]
# Store initial SOA
soa_resp = master.dig("ddns.", "SOA")
prev_soa = soa_resp.resp.answer
# Add DDNS forbidden records
check_log("Adding forbidden records")
for f in forbidden:
up = master.update(zone)
up.add("forbidden.ddns.", 3600, f['type'], f['data'])
up.send("REFUSED")
resp = master.dig("forbidden.ddns", "ANY")
resp.check(rcode="NXDOMAIN")
check_soa(master, prev_soa)
# Remove DDNS forbidden records
check_log("Removing forbidden records")
for f in forbidden:
up = master.update(zone)
up.delete("forbidden.ddns.", f['type'])
up.send("REFUSED")
check_soa(master, prev_soa)
# Add normal records and then forbidden one
check_log("Refusal rollback")
up = master.update(zone)
up.add("rollback.ddns.", 3600, "TXT", "do not add me")
up.add("forbidden.ddns.", 3600, forbidden[0]['type'], forbidden[0]['data'])
up.send("REFUSED")
resp = master.dig("rollback.ddns", "ANY")
resp.check(rcode="NXDOMAIN")
resp = master.dig("forbidden.ddns", "ANY")
resp.check(rcode="NXDOMAIN")
check_soa(master, prev_soa)
# Add DNAME children
check_log("Add DNAME children rollback")
up = master.update(zone)
up.add("rollback.ddns.", 3600, "TXT", "do not add me")
up.add("under.dname.ddns.", 3600, "DNAME", "ddns.")
up.send("REFUSED")
resp = master.dig("rollback.ddns", "ANY")
resp.check(rcode="NXDOMAIN")
check_soa(master, prev_soa)
# Add DNAME grand-children
check_log("Add DNAME grand-children rollback")
up = master.update(zone)
up.add("rollback.ddns.", 3600, "TXT", "do not add me")
up.add("deep.under.dname.ddns.", 3600, "DNAME", "ddns.")
up.send("REFUSED")
resp = master.dig("rollback.ddns", "ANY")
resp.check(rcode="NXDOMAIN")
check_soa(master, prev_soa)
# Out-of-zone data
check_log("Out-of-zone data")
up = master.update(zone)
up.add("what.the.hell.am.i.doing.here.", "3600", "TXT", "I don't belong here")
up.send("NOTZONE")
check_soa(master, prev_soa)
# Remove 'all' SOA, ignore
check_log("Remove all SOA")
up = master.update(zone)
up.delete("ddns.", "SOA")
up.send("NOERROR")
check_soa(master, prev_soa)
# Remove specific SOA, ignore
check_log("Remove specific SOA")
up = master.update(zone)
up.delete("ddns.", "SOA", "dns1.ddns. hostmaster.ddns. 2011111213 10800 3600 1209600 7200")
up.send("NOERROR")
check_soa(master, prev_soa)
if dnssec:
# Add DNSKEY
check_log("DNSKEY addition")
up = master.update(zone)
up.add("ddns.", "3600", "DNSKEY",
"256 3 5 AwEAAbs0AlA6xWQn/lECfGt3S6TaeEmgJfEVVEMh06iNMNWMRHOfbqLF h3N52Ob7trmzlrzGlGLPnAZJvMB8lsFGC5CtaLUBD+4xCh5tl5QifZ+y o+MJvPGlVQI2cs7aMWV9CyFrRmuRcJaSZU2uBz9KFJ955UCq/WIy5KqS 7qaKLzzN")
up.send("REFUSED")
resp = master.dig("ddns.", "DNSKEY")
resp.check(rcode="NOERROR",
nordata="256 3 5 AwEAAbs0AlA6xWQn/lECfGt3S6TaeEmgJfEVVEMh06iNMNWMRHOfbqLF h3N52Ob7trmzlrzGlGLPnAZJvMB8lsFGC5CtaLUBD+4xCh5tl5QifZ+y o+MJvPGlVQI2cs7aMWV9CyFrRmuRcJaSZU2uBz9KFJ955UCq/WIy5KqS 7qaKLzzN")
# Add NSEC3PARAM
check_log("NSEC3PARAM addition")
up = master.update(zone)
up.add("ddns.", "0", "NSEC3PARAM", "1 0 10 B8399FF56C1C0C7E")
up.send("REFUSED")
resp = master.dig("ddns.", "NSEC3PARAM")
resp.check(rcode="NOERROR", nordata="1 0 10 B8399FF56C1C0C7E")
check_soa(master, prev_soa)
# Add DNSKEY
check_log("non-apex DNSKEY addition")
up = master.update(zone)
up.add("nonapex.ddns.", "3600", "DNSKEY",
"256 3 5 AwEAAbs0AlA6xWQn/lECfGt3S6TaeEmgJfEVVEMh06iNMNWMRHOfbqLF h3N52Ob7trmzlrzGlGLPnAZJvMB8lsFGC5CtaLUBD+4xCh5tl5QifZ+y o+MJvPGlVQI2cs7aMWV9CyFrRmuRcJaSZU2uBz9KFJ955UCq/WIy5KqS 7qaKLzzN")
up.send("NOERROR")
resp = master.dig("nonapex.ddns.", "DNSKEY")
resp.check(rcode="NOERROR",
rdata="256 3 5 AwEAAbs0AlA6xWQn/lECfGt3S6TaeEmgJfEVVEMh06iNMNWMRHOfbqLF h3N52Ob7trmzlrzGlGLPnAZJvMB8lsFGC5CtaLUBD+4xCh5tl5QifZ+y o+MJvPGlVQI2cs7aMWV9CyFrRmuRcJaSZU2uBz9KFJ955UCq/WIy5KqS 7qaKLzzN")
zone = t.zone("ddns.", storage=".")
master_plain = t.server("knot")
t.link(zone, master_plain, ddns=True)
master_nsec = t.server("knot")
t.link(zone, master_nsec, ddns=True)
master_nsec.dnssec(zone).enable = True
master_nsec3 = t.server("knot")
t.link(zone, master_nsec3, ddns=True)
master_nsec3.dnssec(zone).enable = True
master_nsec3.dnssec(zone).nsec3 = True
master_nsec3.dnssec(zone).nsec3_opt_out = (random.random() < 0.5)
t.start()
# DNSSEC-less test
check_log("============ Plain test ===========")
do_normal_tests(master_plain, zone)
do_refusal_tests(master_plain, zone)
# DNSSEC with NSEC test
check_log("============ NSEC test ============")
do_normal_tests(master_nsec, zone, dnssec=True)
do_refusal_tests(master_nsec, zone, dnssec=True)
# DNSSEC with NSEC3 test
check_log("============ NSEC3 test ===========")
do_normal_tests(master_nsec3, zone, dnssec=True)
do_refusal_tests(master_nsec3, zone, dnssec=True)
t.end()
|
CZ-NIC/knot
|
tests-extra/tests/ddns/basic/test.py
|
Python
|
gpl-3.0
| 27,901
|
[
"Elk"
] |
927930d0e63c36598e77bff8a1718f540e52ffe9bfd014691cca00e2b795c72e
|
# tests for openbabel in python
# A test of some smiple SMILES manipulation
# by Richard West <r.west@neu.edu>
# Three SMILES, first two obviously the same, third one a resonance isomer.
smis=['[CH2]C=CCO', 'C([CH2])=CCO','C=C[CH]CO']
import pybel
canonicals = [pybel.readstring("smi", smile).write("can").strip() for smile in smis]
assert len(canonicals) == 3
assert len(set(canonicals)) == 2
# go via InChI to recognize resonance isomer
inchis = [pybel.readstring("smi", smile).write("inchi").strip() for smile in smis]
canonicals = [pybel.readstring("inchi", inchi).write("can").strip() for inchi in inchis]
assert len(set(canonicals)) == 1
|
dmaticzka/bioconda-recipes
|
recipes/openbabel/2.4.1/run_test.py
|
Python
|
mit
| 646
|
[
"Pybel"
] |
994ba4ea83090bde6bdacc345d32a5d1e0f6da06b650294ba0d815e95d2b585f
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since, keyword_only
from pyspark.ml.param.shared import *
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaWrapper
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
__all__ = ['AFTSurvivalRegression', 'AFTSurvivalRegressionModel',
'DecisionTreeRegressor', 'DecisionTreeRegressionModel',
'GBTRegressor', 'GBTRegressionModel',
'GeneralizedLinearRegression', 'GeneralizedLinearRegressionModel',
'GeneralizedLinearRegressionSummary', 'GeneralizedLinearRegressionTrainingSummary',
'IsotonicRegression', 'IsotonicRegressionModel',
'LinearRegression', 'LinearRegressionModel',
'LinearRegressionSummary', 'LinearRegressionTrainingSummary',
'RandomForestRegressor', 'RandomForestRegressionModel']
@inherit_doc
class LinearRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
HasRegParam, HasTol, HasElasticNetParam, HasFitIntercept,
HasStandardization, HasSolver, HasWeightCol, HasAggregationDepth, HasLoss,
JavaMLWritable, JavaMLReadable):
"""
Linear regression.
The learning objective is to minimize the specified loss function, with regularization.
This supports two kinds of loss:
* squaredError (a.k.a squared loss)
* huber (a hybrid of squared error for relatively small errors and absolute error for \
relatively large ones, and we estimate the scale parameter from training data)
This supports multiple types of regularization:
* none (a.k.a. ordinary least squares)
* L2 (ridge regression)
* L1 (Lasso)
* L2 + L1 (elastic net)
Note: Fitting with huber loss only supports none and L2 regularization.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, 2.0, Vectors.dense(1.0)),
... (0.0, 2.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
>>> lr = LinearRegression(maxIter=5, regParam=0.0, solver="normal", weightCol="weight")
>>> model = lr.fit(df)
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> abs(model.transform(test0).head().prediction - (-1.0)) < 0.001
True
>>> abs(model.coefficients[0] - 1.0) < 0.001
True
>>> abs(model.intercept - 0.0) < 0.001
True
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> abs(model.transform(test1).head().prediction - 1.0) < 0.001
True
>>> lr.setParams("vector")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> lr_path = temp_path + "/lr"
>>> lr.save(lr_path)
>>> lr2 = LinearRegression.load(lr_path)
>>> lr2.getMaxIter()
5
>>> model_path = temp_path + "/lr_model"
>>> model.save(model_path)
>>> model2 = LinearRegressionModel.load(model_path)
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.intercept == model2.intercept
True
>>> model.numFeatures
1
>>> model.write().format("pmml").save(model_path + "_2")
.. versionadded:: 1.4.0
"""
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: auto, normal, l-bfgs.", typeConverter=TypeConverters.toString)
loss = Param(Params._dummy(), "loss", "The loss function to be optimized. Supported " +
"options: squaredError, huber.", typeConverter=TypeConverters.toString)
epsilon = Param(Params._dummy(), "epsilon", "The shape parameter to control the amount of " +
"robustness. Must be > 1.0. Only valid when loss is huber",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, solver="auto", weightCol=None, aggregationDepth=2,
loss="squaredError", epsilon=1.35):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
standardization=True, solver="auto", weightCol=None, aggregationDepth=2, \
loss="squaredError", epsilon=1.35)
"""
super(LinearRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.LinearRegression", self.uid)
self._setDefault(maxIter=100, regParam=0.0, tol=1e-6, loss="squaredError", epsilon=1.35)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, solver="auto", weightCol=None, aggregationDepth=2,
loss="squaredError", epsilon=1.35):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
standardization=True, solver="auto", weightCol=None, aggregationDepth=2, \
loss="squaredError", epsilon=1.35)
Sets params for linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return LinearRegressionModel(java_model)
@since("2.3.0")
def setEpsilon(self, value):
"""
Sets the value of :py:attr:`epsilon`.
"""
return self._set(epsilon=value)
@since("2.3.0")
def getEpsilon(self):
"""
Gets the value of epsilon or its default value.
"""
return self.getOrDefault(self.epsilon)
class LinearRegressionModel(JavaModel, JavaPredictionModel, GeneralJavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`LinearRegression`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("1.4.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("2.3.0")
def scale(self):
"""
The value by which \|y - X'w\| is scaled down when loss is "huber", otherwise 1.0.
"""
return self._call_java("scale")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. residuals, mse, r-squared ) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
java_lrt_summary = self._call_java("summary")
return LinearRegressionTrainingSummary(java_lrt_summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@property
@since("2.0.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@since("2.0.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_lr_summary = self._call_java("evaluate", dataset)
return LinearRegressionSummary(java_lr_summary)
class LinearRegressionSummary(JavaWrapper):
"""
.. note:: Experimental
Linear regression results evaluated on a dataset.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Dataframe outputted by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def predictionCol(self):
"""
Field in "predictions" which gives the predicted value of
the label at each instance.
"""
return self._call_java("predictionCol")
@property
@since("2.0.0")
def labelCol(self):
"""
Field in "predictions" which gives the true label of each
instance.
"""
return self._call_java("labelCol")
@property
@since("2.0.0")
def featuresCol(self):
"""
Field in "predictions" which gives the features of each instance
as a vector.
"""
return self._call_java("featuresCol")
@property
@since("2.0.0")
def explainedVariance(self):
"""
Returns the explained variance regression score.
explainedVariance = 1 - variance(y - \hat{y}) / variance(y)
.. seealso:: `Wikipedia explain variation \
<http://en.wikipedia.org/wiki/Explained_variation>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("explainedVariance")
@property
@since("2.0.0")
def meanAbsoluteError(self):
"""
Returns the mean absolute error, which is a risk function
corresponding to the expected value of the absolute error
loss or l1-norm loss.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("meanAbsoluteError")
@property
@since("2.0.0")
def meanSquaredError(self):
"""
Returns the mean squared error, which is a risk function
corresponding to the expected value of the squared error
loss or quadratic loss.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("meanSquaredError")
@property
@since("2.0.0")
def rootMeanSquaredError(self):
"""
Returns the root mean squared error, which is defined as the
square root of the mean squared error.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("rootMeanSquaredError")
@property
@since("2.0.0")
def r2(self):
"""
Returns R^2, the coefficient of determination.
.. seealso:: `Wikipedia coefficient of determination \
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("r2")
@property
@since("2.4.0")
def r2adj(self):
"""
Returns Adjusted R^2, the adjusted coefficient of determination.
.. seealso:: `Wikipedia coefficient of determination, Adjusted R^2 \
<https://en.wikipedia.org/wiki/Coefficient_of_determination#Adjusted_R2>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark versions.
"""
return self._call_java("r2adj")
@property
@since("2.0.0")
def residuals(self):
"""
Residuals (label - predicted value)
"""
return self._call_java("residuals")
@property
@since("2.0.0")
def numInstances(self):
"""
Number of instances in DataFrame predictions
"""
return self._call_java("numInstances")
@property
@since("2.2.0")
def degreesOfFreedom(self):
"""
Degrees of freedom.
"""
return self._call_java("degreesOfFreedom")
@property
@since("2.0.0")
def devianceResiduals(self):
"""
The weighted residuals, the usual residuals rescaled by the
square root of the instance weights.
"""
return self._call_java("devianceResiduals")
@property
@since("2.0.0")
def coefficientStandardErrors(self):
"""
Standard error of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("coefficientStandardErrors")
@property
@since("2.0.0")
def tValues(self):
"""
T-statistic of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("tValues")
@property
@since("2.0.0")
def pValues(self):
"""
Two-sided p-value of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("pValues")
@inherit_doc
class LinearRegressionTrainingSummary(LinearRegressionSummary):
"""
.. note:: Experimental
Linear regression training results. Currently, the training summary ignores the
training weights except for the objective trace.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def objectiveHistory(self):
"""
Objective function (scaled loss + regularization) at each
iteration.
This value is only available when using the "l-bfgs" solver.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("objectiveHistory")
@property
@since("2.0.0")
def totalIterations(self):
"""
Number of training iterations until termination.
This value is only available when using the "l-bfgs" solver.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("totalIterations")
@inherit_doc
class IsotonicRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasWeightCol, JavaMLWritable, JavaMLReadable):
"""
Currently implemented using parallelized pool adjacent violators algorithm.
Only univariate (single feature) algorithm supported.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> ir = IsotonicRegression()
>>> model = ir.fit(df)
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> model.boundaries
DenseVector([0.0, 1.0])
>>> ir_path = temp_path + "/ir"
>>> ir.save(ir_path)
>>> ir2 = IsotonicRegression.load(ir_path)
>>> ir2.getIsotonic()
True
>>> model_path = temp_path + "/ir_model"
>>> model.save(model_path)
>>> model2 = IsotonicRegressionModel.load(model_path)
>>> model.boundaries == model2.boundaries
True
>>> model.predictions == model2.predictions
True
.. versionadded:: 1.6.0
"""
isotonic = \
Param(Params._dummy(), "isotonic",
"whether the output sequence should be isotonic/increasing (true) or" +
"antitonic/decreasing (false).", typeConverter=TypeConverters.toBoolean)
featureIndex = \
Param(Params._dummy(), "featureIndex",
"The index of the feature if featuresCol is a vector column, no effect otherwise.",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol=None, isotonic=True, featureIndex=0):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
weightCol=None, isotonic=True, featureIndex=0):
"""
super(IsotonicRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.IsotonicRegression", self.uid)
self._setDefault(isotonic=True, featureIndex=0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol=None, isotonic=True, featureIndex=0):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
weightCol=None, isotonic=True, featureIndex=0):
Set the params for IsotonicRegression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return IsotonicRegressionModel(java_model)
def setIsotonic(self, value):
"""
Sets the value of :py:attr:`isotonic`.
"""
return self._set(isotonic=value)
def getIsotonic(self):
"""
Gets the value of isotonic or its default value.
"""
return self.getOrDefault(self.isotonic)
def setFeatureIndex(self, value):
"""
Sets the value of :py:attr:`featureIndex`.
"""
return self._set(featureIndex=value)
def getFeatureIndex(self):
"""
Gets the value of featureIndex or its default value.
"""
return self.getOrDefault(self.featureIndex)
class IsotonicRegressionModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`IsotonicRegression`.
.. versionadded:: 1.6.0
"""
@property
@since("1.6.0")
def boundaries(self):
"""
Boundaries in increasing order for which predictions are known.
"""
return self._call_java("boundaries")
@property
@since("1.6.0")
def predictions(self):
"""
Predictions associated with the boundaries at the same index, monotone because of isotonic
regression.
"""
return self._call_java("predictions")
class TreeEnsembleParams(DecisionTreeParams):
"""
Mixin for Decision Tree-based ensemble algorithms parameters.
"""
subsamplingRate = Param(Params._dummy(), "subsamplingRate", "Fraction of the training data " +
"used for learning each decision tree, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
supportedFeatureSubsetStrategies = ["auto", "all", "onethird", "sqrt", "log2"]
featureSubsetStrategy = \
Param(Params._dummy(), "featureSubsetStrategy",
"The number of features to consider for splits at each tree node. Supported " +
"options: 'auto' (choose automatically for task: If numTrees == 1, set to " +
"'all'. If numTrees > 1 (forest), set to 'sqrt' for classification and to " +
"'onethird' for regression), 'all' (use all features), 'onethird' (use " +
"1/3 of the features), 'sqrt' (use sqrt(number of features)), 'log2' (use " +
"log2(number of features)), 'n' (when n is in the range (0, 1.0], use " +
"n * number of features. When n is in the range (1, number of features), use" +
" n features). default = 'auto'", typeConverter=TypeConverters.toString)
def __init__(self):
super(TreeEnsembleParams, self).__init__()
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("1.4.0")
def getSubsamplingRate(self):
"""
Gets the value of subsamplingRate or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("1.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
.. note:: Deprecated in 2.4.0 and will be removed in 3.0.0.
"""
return self._set(featureSubsetStrategy=value)
@since("1.4.0")
def getFeatureSubsetStrategy(self):
"""
Gets the value of featureSubsetStrategy or its default value.
"""
return self.getOrDefault(self.featureSubsetStrategy)
class TreeRegressorParams(Params):
"""
Private class to track supported impurity measures.
"""
supportedImpurities = ["variance"]
impurity = Param(Params._dummy(), "impurity",
"Criterion used for information gain calculation (case-insensitive). " +
"Supported options: " +
", ".join(supportedImpurities), typeConverter=TypeConverters.toString)
def __init__(self):
super(TreeRegressorParams, self).__init__()
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def getImpurity(self):
"""
Gets the value of impurity or its default value.
"""
return self.getOrDefault(self.impurity)
class RandomForestParams(TreeEnsembleParams):
"""
Private class to track supported random forest parameters.
"""
numTrees = Param(Params._dummy(), "numTrees", "Number of trees to train (>= 1).",
typeConverter=TypeConverters.toInt)
def __init__(self):
super(RandomForestParams, self).__init__()
@since("1.4.0")
def setNumTrees(self, value):
"""
Sets the value of :py:attr:`numTrees`.
"""
return self._set(numTrees=value)
@since("1.4.0")
def getNumTrees(self):
"""
Gets the value of numTrees or its default value.
"""
return self.getOrDefault(self.numTrees)
class GBTParams(TreeEnsembleParams):
"""
Private class to track supported GBT params.
"""
supportedLossTypes = ["squared", "absolute"]
@inherit_doc
class DecisionTreeRegressor(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
DecisionTreeParams, TreeRegressorParams, HasCheckpointInterval,
HasSeed, JavaMLWritable, JavaMLReadable, HasVarianceCol):
"""
`Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> dt = DecisionTreeRegressor(maxDepth=2, varianceCol="variance")
>>> model = dt.fit(df)
>>> model.depth
1
>>> model.numNodes
3
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> dtr_path = temp_path + "/dtr"
>>> dt.save(dtr_path)
>>> dt2 = DecisionTreeRegressor.load(dtr_path)
>>> dt2.getMaxDepth()
2
>>> model_path = temp_path + "/dtr_model"
>>> model.save(model_path)
>>> model2 = DecisionTreeRegressionModel.load(model_path)
>>> model.numNodes == model2.numNodes
True
>>> model.depth == model2.depth
True
>>> model.transform(test1).head().variance
0.0
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance",
seed=None, varianceCol=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", seed=None, varianceCol=None)
"""
super(DecisionTreeRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.DecisionTreeRegressor", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", seed=None, varianceCol=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", seed=None, varianceCol=None)
Sets params for the DecisionTreeRegressor.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return DecisionTreeRegressionModel(java_model)
@inherit_doc
class DecisionTreeModel(JavaModel, JavaPredictionModel):
"""
Abstraction for Decision Tree models.
.. versionadded:: 1.5.0
"""
@property
@since("1.5.0")
def numNodes(self):
"""Return number of nodes of the decision tree."""
return self._call_java("numNodes")
@property
@since("1.5.0")
def depth(self):
"""Return depth of the decision tree."""
return self._call_java("depth")
@property
@since("2.0.0")
def toDebugString(self):
"""Full description of model."""
return self._call_java("toDebugString")
def __repr__(self):
return self._call_java("toString")
@inherit_doc
class TreeEnsembleModel(JavaModel):
"""
(private abstraction)
Represents a tree ensemble model.
"""
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeModel(m) for m in list(self._call_java("trees"))]
@property
@since("2.0.0")
def getNumTrees(self):
"""Number of trees in ensemble."""
return self._call_java("getNumTrees")
@property
@since("1.5.0")
def treeWeights(self):
"""Return the weights for each tree"""
return list(self._call_java("javaTreeWeights"))
@property
@since("2.0.0")
def totalNumNodes(self):
"""Total number of nodes, summed over all trees in the ensemble."""
return self._call_java("totalNumNodes")
@property
@since("2.0.0")
def toDebugString(self):
"""Full description of model."""
return self._call_java("toDebugString")
def __repr__(self):
return self._call_java("toString")
@inherit_doc
class DecisionTreeRegressionModel(DecisionTreeModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`DecisionTreeRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
This generalizes the idea of "Gini" importance to other losses,
following the explanation of Gini importance from "Random Forests" documentation
by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.
This feature importance is calculated as follows:
- importance(feature j) = sum (over nodes which split on feature j) of the gain,
where gain is scaled by the number of instances passing through node
- Normalize importances for tree to sum to 1.
.. note:: Feature importance for single decision trees can have high variance due to
correlated predictor variables. Consider using a :py:class:`RandomForestRegressor`
to determine feature importance instead.
"""
return self._call_java("featureImportances")
@inherit_doc
class RandomForestRegressor(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasSeed,
RandomForestParams, TreeRegressorParams, HasCheckpointInterval,
JavaMLWritable, JavaMLReadable):
"""
`Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> rf = RandomForestRegressor(numTrees=2, maxDepth=2, seed=42)
>>> model = rf.fit(df)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 1.0])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> model.numFeatures
1
>>> model.trees
[DecisionTreeRegressionModel (uid=...) of depth..., DecisionTreeRegressionModel...]
>>> model.getNumTrees
2
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
0.5
>>> rfr_path = temp_path + "/rfr"
>>> rf.save(rfr_path)
>>> rf2 = RandomForestRegressor.load(rfr_path)
>>> rf2.getNumTrees()
2
>>> model_path = temp_path + "/rfr_model"
>>> model.save(model_path)
>>> model2 = RandomForestRegressionModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
featureSubsetStrategy="auto"):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
featureSubsetStrategy="auto")
"""
super(RandomForestRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.RandomForestRegressor", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, numTrees=20,
featureSubsetStrategy="auto")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
featureSubsetStrategy="auto"):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
featureSubsetStrategy="auto")
Sets params for linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return RandomForestRegressionModel(java_model)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
class RandomForestRegressionModel(TreeEnsembleModel, JavaPredictionModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by :class:`RandomForestRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`
"""
return self._call_java("featureImportances")
@inherit_doc
class GBTRegressor(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
GBTParams, HasCheckpointInterval, HasStepSize, HasSeed, JavaMLWritable,
JavaMLReadable, TreeRegressorParams):
"""
`Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> gbt = GBTRegressor(maxIter=5, maxDepth=2, seed=42)
>>> print(gbt.getImpurity())
variance
>>> print(gbt.getFeatureSubsetStrategy())
all
>>> model = gbt.fit(df)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> gbtr_path = temp_path + "gbtr"
>>> gbt.save(gbtr_path)
>>> gbt2 = GBTRegressor.load(gbtr_path)
>>> gbt2.getMaxDepth()
2
>>> model_path = temp_path + "gbtr_model"
>>> model.save(model_path)
>>> model2 = GBTRegressionModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.treeWeights == model2.treeWeights
True
>>> model.trees
[DecisionTreeRegressionModel (uid=...) of depth..., DecisionTreeRegressionModel...]
>>> validation = spark.createDataFrame([(0.0, Vectors.dense(-1.0))],
... ["label", "features"])
>>> model.evaluateEachIteration(validation, "squared")
[0.0, 0.0, 0.0, 0.0, 0.0]
.. versionadded:: 1.4.0
"""
lossType = Param(Params._dummy(), "lossType",
"Loss function which GBT tries to minimize (case-insensitive). " +
"Supported options: " + ", ".join(GBTParams.supportedLossTypes),
typeConverter=TypeConverters.toString)
stepSize = Param(Params._dummy(), "stepSize",
"Step size (a.k.a. learning rate) in interval (0, 1] for shrinking " +
"the contribution of each estimator.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
impurity="variance", featureSubsetStrategy="all"):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
impurity="variance", featureSubsetStrategy="all")
"""
super(GBTRegressor, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.regression.GBTRegressor", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1,
impurity="variance", featureSubsetStrategy="all")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
impuriy="variance", featureSubsetStrategy="all"):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
impurity="variance", featureSubsetStrategy="all")
Sets params for Gradient Boosted Tree Regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GBTRegressionModel(java_model)
@since("1.4.0")
def setLossType(self, value):
"""
Sets the value of :py:attr:`lossType`.
"""
return self._set(lossType=value)
@since("1.4.0")
def getLossType(self):
"""
Gets the value of lossType or its default value.
"""
return self.getOrDefault(self.lossType)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
class GBTRegressionModel(TreeEnsembleModel, JavaPredictionModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`GBTRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@since("2.4.0")
def evaluateEachIteration(self, dataset, loss):
"""
Method to compute error or loss for every iteration of gradient boosting.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
:param loss:
The loss function used to compute error.
Supported options: squared, absolute
"""
return self._call_java("evaluateEachIteration", dataset, loss)
@inherit_doc
class AFTSurvivalRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasFitIntercept, HasMaxIter, HasTol, HasAggregationDepth,
JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Accelerated Failure Time (AFT) Model Survival Regression
Fit a parametric AFT survival regression model based on the Weibull distribution
of the survival time.
.. seealso:: `AFT Model <https://en.wikipedia.org/wiki/Accelerated_failure_time_model>`_
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0), 1.0),
... (1e-40, Vectors.sparse(1, [], []), 0.0)], ["label", "features", "censor"])
>>> aftsr = AFTSurvivalRegression()
>>> model = aftsr.fit(df)
>>> model.predict(Vectors.dense(6.3))
1.0
>>> model.predictQuantiles(Vectors.dense(6.3))
DenseVector([0.0101, 0.0513, 0.1054, 0.2877, 0.6931, 1.3863, 2.3026, 2.9957, 4.6052])
>>> model.transform(df).show()
+-------+---------+------+----------+
| label| features|censor|prediction|
+-------+---------+------+----------+
| 1.0| [1.0]| 1.0| 1.0|
|1.0E-40|(1,[],[])| 0.0| 1.0|
+-------+---------+------+----------+
...
>>> aftsr_path = temp_path + "/aftsr"
>>> aftsr.save(aftsr_path)
>>> aftsr2 = AFTSurvivalRegression.load(aftsr_path)
>>> aftsr2.getMaxIter()
100
>>> model_path = temp_path + "/aftsr_model"
>>> model.save(model_path)
>>> model2 = AFTSurvivalRegressionModel.load(model_path)
>>> model.coefficients == model2.coefficients
True
>>> model.intercept == model2.intercept
True
>>> model.scale == model2.scale
True
.. versionadded:: 1.6.0
"""
censorCol = Param(Params._dummy(), "censorCol",
"censor column name. The value of this column could be 0 or 1. " +
"If the value is 1, it means the event has occurred i.e. " +
"uncensored; otherwise censored.", typeConverter=TypeConverters.toString)
quantileProbabilities = \
Param(Params._dummy(), "quantileProbabilities",
"quantile probabilities array. Values of the quantile probabilities array " +
"should be in the range (0, 1) and the array should be non-empty.",
typeConverter=TypeConverters.toListFloat)
quantilesCol = Param(Params._dummy(), "quantilesCol",
"quantiles column name. This column will output quantiles of " +
"corresponding quantileProbabilities if it is set.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]),
quantilesCol=None, aggregationDepth=2):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
quantilesCol=None, aggregationDepth=2)
"""
super(AFTSurvivalRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.AFTSurvivalRegression", self.uid)
self._setDefault(censorCol="censor",
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99],
maxIter=100, tol=1E-6)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]),
quantilesCol=None, aggregationDepth=2):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
quantilesCol=None, aggregationDepth=2):
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return AFTSurvivalRegressionModel(java_model)
@since("1.6.0")
def setCensorCol(self, value):
"""
Sets the value of :py:attr:`censorCol`.
"""
return self._set(censorCol=value)
@since("1.6.0")
def getCensorCol(self):
"""
Gets the value of censorCol or its default value.
"""
return self.getOrDefault(self.censorCol)
@since("1.6.0")
def setQuantileProbabilities(self, value):
"""
Sets the value of :py:attr:`quantileProbabilities`.
"""
return self._set(quantileProbabilities=value)
@since("1.6.0")
def getQuantileProbabilities(self):
"""
Gets the value of quantileProbabilities or its default value.
"""
return self.getOrDefault(self.quantileProbabilities)
@since("1.6.0")
def setQuantilesCol(self, value):
"""
Sets the value of :py:attr:`quantilesCol`.
"""
return self._set(quantilesCol=value)
@since("1.6.0")
def getQuantilesCol(self):
"""
Gets the value of quantilesCol or its default value.
"""
return self.getOrDefault(self.quantilesCol)
class AFTSurvivalRegressionModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Model fitted by :class:`AFTSurvivalRegression`.
.. versionadded:: 1.6.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("1.6.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("1.6.0")
def scale(self):
"""
Model scale parameter.
"""
return self._call_java("scale")
@since("2.0.0")
def predictQuantiles(self, features):
"""
Predicted Quantiles
"""
return self._call_java("predictQuantiles", features)
@since("2.0.0")
def predict(self, features):
"""
Predicted value
"""
return self._call_java("predict", features)
@inherit_doc
class GeneralizedLinearRegression(JavaEstimator, HasLabelCol, HasFeaturesCol, HasPredictionCol,
HasFitIntercept, HasMaxIter, HasTol, HasRegParam, HasWeightCol,
HasSolver, JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Generalized Linear Regression.
Fit a Generalized Linear Model specified by giving a symbolic description of the linear
predictor (link function) and a description of the error distribution (family). It supports
"gaussian", "binomial", "poisson", "gamma" and "tweedie" as family. Valid link functions for
each family is listed below. The first link function of each family is the default one.
* "gaussian" -> "identity", "log", "inverse"
* "binomial" -> "logit", "probit", "cloglog"
* "poisson" -> "log", "identity", "sqrt"
* "gamma" -> "inverse", "identity", "log"
* "tweedie" -> power link function specified through "linkPower". \
The default link power in the tweedie family is 1 - variancePower.
.. seealso:: `GLM <https://en.wikipedia.org/wiki/Generalized_linear_model>`_
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(0.0, 0.0)),
... (1.0, Vectors.dense(1.0, 2.0)),
... (2.0, Vectors.dense(0.0, 0.0)),
... (2.0, Vectors.dense(1.0, 1.0)),], ["label", "features"])
>>> glr = GeneralizedLinearRegression(family="gaussian", link="identity", linkPredictionCol="p")
>>> model = glr.fit(df)
>>> transformed = model.transform(df)
>>> abs(transformed.head().prediction - 1.5) < 0.001
True
>>> abs(transformed.head().p - 1.5) < 0.001
True
>>> model.coefficients
DenseVector([1.5..., -1.0...])
>>> model.numFeatures
2
>>> abs(model.intercept - 1.5) < 0.001
True
>>> glr_path = temp_path + "/glr"
>>> glr.save(glr_path)
>>> glr2 = GeneralizedLinearRegression.load(glr_path)
>>> glr.getFamily() == glr2.getFamily()
True
>>> model_path = temp_path + "/glr_model"
>>> model.save(model_path)
>>> model2 = GeneralizedLinearRegressionModel.load(model_path)
>>> model.intercept == model2.intercept
True
>>> model.coefficients[0] == model2.coefficients[0]
True
.. versionadded:: 2.0.0
"""
family = Param(Params._dummy(), "family", "The name of family which is a description of " +
"the error distribution to be used in the model. Supported options: " +
"gaussian (default), binomial, poisson, gamma and tweedie.",
typeConverter=TypeConverters.toString)
link = Param(Params._dummy(), "link", "The name of link function which provides the " +
"relationship between the linear predictor and the mean of the distribution " +
"function. Supported options: identity, log, inverse, logit, probit, cloglog " +
"and sqrt.", typeConverter=TypeConverters.toString)
linkPredictionCol = Param(Params._dummy(), "linkPredictionCol", "link prediction (linear " +
"predictor) column name", typeConverter=TypeConverters.toString)
variancePower = Param(Params._dummy(), "variancePower", "The power in the variance function " +
"of the Tweedie distribution which characterizes the relationship " +
"between the variance and mean of the distribution. Only applicable " +
"for the Tweedie family. Supported values: 0 and [1, Inf).",
typeConverter=TypeConverters.toFloat)
linkPower = Param(Params._dummy(), "linkPower", "The index in the power link function. " +
"Only applicable to the Tweedie family.",
typeConverter=TypeConverters.toFloat)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: irls.", typeConverter=TypeConverters.toString)
offsetCol = Param(Params._dummy(), "offsetCol", "The offset column name. If this is not set " +
"or empty, we treat all instance offsets as 0.0",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, labelCol="label", featuresCol="features", predictionCol="prediction",
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None,
variancePower=0.0, linkPower=None, offsetCol=None):
"""
__init__(self, labelCol="label", featuresCol="features", predictionCol="prediction", \
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, \
variancePower=0.0, linkPower=None, offsetCol=None)
"""
super(GeneralizedLinearRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.GeneralizedLinearRegression", self.uid)
self._setDefault(family="gaussian", maxIter=25, tol=1e-6, regParam=0.0, solver="irls",
variancePower=0.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction",
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None,
variancePower=0.0, linkPower=None, offsetCol=None):
"""
setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction", \
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, \
variancePower=0.0, linkPower=None, offsetCol=None)
Sets params for generalized linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GeneralizedLinearRegressionModel(java_model)
@since("2.0.0")
def setFamily(self, value):
"""
Sets the value of :py:attr:`family`.
"""
return self._set(family=value)
@since("2.0.0")
def getFamily(self):
"""
Gets the value of family or its default value.
"""
return self.getOrDefault(self.family)
@since("2.0.0")
def setLinkPredictionCol(self, value):
"""
Sets the value of :py:attr:`linkPredictionCol`.
"""
return self._set(linkPredictionCol=value)
@since("2.0.0")
def getLinkPredictionCol(self):
"""
Gets the value of linkPredictionCol or its default value.
"""
return self.getOrDefault(self.linkPredictionCol)
@since("2.0.0")
def setLink(self, value):
"""
Sets the value of :py:attr:`link`.
"""
return self._set(link=value)
@since("2.0.0")
def getLink(self):
"""
Gets the value of link or its default value.
"""
return self.getOrDefault(self.link)
@since("2.2.0")
def setVariancePower(self, value):
"""
Sets the value of :py:attr:`variancePower`.
"""
return self._set(variancePower=value)
@since("2.2.0")
def getVariancePower(self):
"""
Gets the value of variancePower or its default value.
"""
return self.getOrDefault(self.variancePower)
@since("2.2.0")
def setLinkPower(self, value):
"""
Sets the value of :py:attr:`linkPower`.
"""
return self._set(linkPower=value)
@since("2.2.0")
def getLinkPower(self):
"""
Gets the value of linkPower or its default value.
"""
return self.getOrDefault(self.linkPower)
@since("2.3.0")
def setOffsetCol(self, value):
"""
Sets the value of :py:attr:`offsetCol`.
"""
return self._set(offsetCol=value)
@since("2.3.0")
def getOffsetCol(self):
"""
Gets the value of offsetCol or its default value.
"""
return self.getOrDefault(self.offsetCol)
class GeneralizedLinearRegressionModel(JavaModel, JavaPredictionModel, JavaMLWritable,
JavaMLReadable):
"""
.. note:: Experimental
Model fitted by :class:`GeneralizedLinearRegression`.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("2.0.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. residuals, deviance, pValues) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
java_glrt_summary = self._call_java("summary")
return GeneralizedLinearRegressionTrainingSummary(java_glrt_summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@property
@since("2.0.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@since("2.0.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_glr_summary = self._call_java("evaluate", dataset)
return GeneralizedLinearRegressionSummary(java_glr_summary)
class GeneralizedLinearRegressionSummary(JavaWrapper):
"""
.. note:: Experimental
Generalized linear regression results evaluated on a dataset.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Predictions output by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def predictionCol(self):
"""
Field in :py:attr:`predictions` which gives the predicted value of each instance.
This is set to a new column name if the original model's `predictionCol` is not set.
"""
return self._call_java("predictionCol")
@property
@since("2.2.0")
def numInstances(self):
"""
Number of instances in DataFrame predictions.
"""
return self._call_java("numInstances")
@property
@since("2.0.0")
def rank(self):
"""
The numeric rank of the fitted linear model.
"""
return self._call_java("rank")
@property
@since("2.0.0")
def degreesOfFreedom(self):
"""
Degrees of freedom.
"""
return self._call_java("degreesOfFreedom")
@property
@since("2.0.0")
def residualDegreeOfFreedom(self):
"""
The residual degrees of freedom.
"""
return self._call_java("residualDegreeOfFreedom")
@property
@since("2.0.0")
def residualDegreeOfFreedomNull(self):
"""
The residual degrees of freedom for the null model.
"""
return self._call_java("residualDegreeOfFreedomNull")
@since("2.0.0")
def residuals(self, residualsType="deviance"):
"""
Get the residuals of the fitted model by type.
:param residualsType: The type of residuals which should be returned.
Supported options: deviance (default), pearson, working, and response.
"""
return self._call_java("residuals", residualsType)
@property
@since("2.0.0")
def nullDeviance(self):
"""
The deviance for the null model.
"""
return self._call_java("nullDeviance")
@property
@since("2.0.0")
def deviance(self):
"""
The deviance for the fitted model.
"""
return self._call_java("deviance")
@property
@since("2.0.0")
def dispersion(self):
"""
The dispersion of the fitted model.
It is taken as 1.0 for the "binomial" and "poisson" families, and otherwise
estimated by the residual Pearson's Chi-Squared statistic (which is defined as
sum of the squares of the Pearson residuals) divided by the residual degrees of freedom.
"""
return self._call_java("dispersion")
@property
@since("2.0.0")
def aic(self):
"""
Akaike's "An Information Criterion"(AIC) for the fitted model.
"""
return self._call_java("aic")
@inherit_doc
class GeneralizedLinearRegressionTrainingSummary(GeneralizedLinearRegressionSummary):
"""
.. note:: Experimental
Generalized linear regression training results.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def numIterations(self):
"""
Number of training iterations.
"""
return self._call_java("numIterations")
@property
@since("2.0.0")
def solver(self):
"""
The numeric solver used for training.
"""
return self._call_java("solver")
@property
@since("2.0.0")
def coefficientStandardErrors(self):
"""
Standard error of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("coefficientStandardErrors")
@property
@since("2.0.0")
def tValues(self):
"""
T-statistic of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("tValues")
@property
@since("2.0.0")
def pValues(self):
"""
Two-sided p-value of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("pValues")
def __repr__(self):
return self._call_java("toString")
if __name__ == "__main__":
import doctest
import pyspark.ml.regression
from pyspark.sql import SparkSession
globs = pyspark.ml.regression.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.regression tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
|
rikima/spark
|
python/pyspark/ml/regression.py
|
Python
|
apache-2.0
| 66,566
|
[
"Gaussian"
] |
04df7ef02cc1b609bb6b57eef9abbe9e6265bbe8e3aee875c662ca43d69d7732
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to assist user in submitting feedback about gcloud.
Does one of two things:
1. If invoked in the context of a recent gcloud crash (i.e. an exception that
was not caught anywhere in the Cloud SDK), will direct the user to the Cloud SDK
bug tracker, with a partly pre-filled form.
2. Otherwise, directs the user to either the Cloud SDK bug tracker,
StackOverflow, or the Cloud SDK groups page.
"""
import datetime
import textwrap
from googlecloudsdk.api_lib import feedback_util
from googlecloudsdk.api_lib.sdktool import info_holder
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.util import text as text_util
STACKOVERFLOW_URL = 'http://stackoverflow.com/questions/tagged/gcloud'
GROUPS_PAGE_URL = ('https://groups.google.com/forum/?fromgroups#!forum/'
'google-cloud-sdk')
FEEDBACK_MESSAGE = """\
We appreciate your feedback.
If you have a question, post it on Stack Overflow using the "gcloud" tag at
[{0}].
For general feedback, use our groups page
[{1}],
send a mail to [google-cloud-sdk@googlegroups.com] or visit the [#gcloud] IRC
channel on freenode.
""".format(STACKOVERFLOW_URL, GROUPS_PAGE_URL)
FEEDBACK_PROMPT = """\
Would you like to file a bug using our issue tracker site at [{0}] \
(will open a new browser tab)?\
""".format(feedback_util.ISSUE_TRACKER_URL)
def _PrintQuiet(info_str, log_data):
"""Print message referring to various feedback resources for quiet execution.
Args:
info_str: str, the output of `gcloud info`
log_data: info_holder.LogData, log data for the provided log file
"""
if log_data:
if not log_data.traceback:
log.Print(('Please consider including the log file [{0}] in any '
'feedback you submit.').format(log_data.filename))
log.Print(textwrap.dedent("""\
If you have a question, post it on Stack Overflow using the "gcloud" tag
at [{0}].
For general feedback, use our groups page
[{1}],
send a mail to [google-cloud-sdk@googlegroups.com], or visit the [#gcloud]
IRC channel on freenode.
If you have found a bug, file it using our issue tracker site at
[{2}].
Please include the following information when filing a bug report:\
""").format(STACKOVERFLOW_URL, GROUPS_PAGE_URL,
feedback_util.ISSUE_TRACKER_URL))
divider = feedback_util.GetDivider()
log.Print(divider)
if log_data and log_data.traceback:
log.Print(log_data.traceback)
log.Print(info_str.strip())
log.Print(divider)
def _SuggestIncludeRecentLogs():
recent_runs = info_holder.LogsInfo().GetRecentRuns()
if recent_runs:
now = datetime.datetime.now()
def _FormatLogData(run):
crash = ' (crash detected)' if run.traceback else ''
time = 'Unknown time'
if run.date:
time = text_util.PrettyTimeDelta(now - run.date) + ' ago'
return '[{0}]{1}: {2}'.format(run.command, crash, time)
idx = console_io.PromptChoice(
map(_FormatLogData, recent_runs) + ['None of these'], default=0,
message=('Which recent gcloud invocation would you like to provide '
'feedback about? This will open a new browser tab.'))
if idx < len(recent_runs):
return recent_runs[idx]
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Feedback(base.Command):
"""Provide feedback to the Google Cloud SDK team.
The Google Cloud SDK team offers support through a number of channels:
* Google Cloud SDK Issue Tracker
* Stack Overflow "#gcloud" tag
* google-cloud-sdk Google group
This command lists the available channels and facilitates getting help through
one of them by opening a web browser to the relevant page, possibly with
information relevant to the current install and configuration pre-populated in
form fields on that page.
"""
@staticmethod
def Args(parser):
parser.add_argument(
'--log-file',
help='Path to the log file from a prior gcloud run.')
def Run(self, args):
info = info_holder.InfoHolder()
log_data = None
if args.log_file:
try:
log_data = info_holder.LogData.FromFile(args.log_file)
except IOError as err:
log.warn('Error reading the specified file [{0}]: '
'{1}\n'.format(args.log_file, err))
if args.quiet:
_PrintQuiet(str(info), log_data)
else:
log.status.Print(FEEDBACK_MESSAGE)
if not log_data:
log_data = _SuggestIncludeRecentLogs()
if log_data or console_io.PromptContinue(
prompt_string=('No invocation selected. Would you still like to file '
'a bug (will open a new browser tab)')):
feedback_util.OpenNewIssueInBrowser(info, log_data)
|
flgiordano/netcash
|
+/google-cloud-sdk/lib/surface/feedback.py
|
Python
|
bsd-3-clause
| 5,403
|
[
"VisIt"
] |
78e6b6904eb6c64fed59384732a9efe84b332ae4ca83b4074a561eaea3b61920
|
#!/usr/bin/env python
#
# Urwid Palette Test. Showing off highcolor support
# Copyright (C) 2004-2009 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
"""
Palette test. Shows the available foreground and background settings
in monochrome, 16 color, 88 color and 256 color modes.
"""
import re
import sys
import urwid
import urwid.raw_display
CHART_256 = """
brown__ dark_red_ dark_magenta_ dark_blue_ dark_cyan_ dark_green_
yellow_ light_red light_magenta light_blue light_cyan light_green
#00f#06f#08f#0af#0df#0ff black_______ dark_gray___
#60f#00d#06d#08d#0ad#0dd#0fd light_gray__ white_______
#80f#60d#00a#06a#08a#0aa#0da#0fa
#a0f#80d#60a#008#068#088#0a8#0d8#0f8
#d0f#a0d#80d#608#006#066#086#0a6#0d6#0f6
#f0f#d0d#a0a#808#606#000#060#080#0a0#0d0#0f0#0f6#0f8#0fa#0fd#0ff
#f0d#d0a#a08#806#600#660#680#6a0#6d0#6f0#6f6#6f8#6fa#6fd#6ff#0df
#f0a#d08#a06#800#860#880#8a0#8d0#8f0#8f6#8f8#8fa#8fd#8ff#6df#0af
#f08#d06#a00#a60#a80#aa0#ad0#af0#af6#af8#afa#afd#aff#8df#6af#08f
#f06#d00#d60#d80#da0#dd0#df0#df6#df8#dfa#dfd#dff#adf#8af#68f#06f
#f00#f60#f80#fa0#fd0#ff0#ff6#ff8#ffa#ffd#fff#ddf#aaf#88f#66f#00f
#fd0#fd6#fd8#fda#fdd#fdf#daf#a8f#86f#60f
#66d#68d#6ad#6dd #fa0#fa6#fa8#faa#fad#faf#d8f#a6f#80f
#86d#66a#68a#6aa#6da #f80#f86#f88#f8a#f8d#f8f#d6f#a0f
#a6d#86a#668#688#6a8#6d8 #f60#f66#f68#f6a#f6d#f6f#d0f
#d6d#a6a#868#666#686#6a6#6d6#6d8#6da#6dd #f00#f06#f08#f0a#f0d#f0f
#d6a#a68#866#886#8a6#8d6#8d8#8da#8dd#6ad
#d68#a66#a86#aa6#ad6#ad8#ada#add#8ad#68d
#d66#d86#da6#dd6#dd8#dda#ddd#aad#88d#66d g78_g82_g85_g89_g93_g100
#da6#da8#daa#dad#a8d#86d g52_g58_g62_g66_g70_g74_
#88a#8aa #d86#d88#d8a#d8d#a6d g27_g31_g35_g38_g42_g46_g50_
#a8a#888#8a8#8aa #d66#d68#d6a#d6d g0__g3__g7__g11_g15_g19_g23_
#a88#aa8#aaa#88a
#a88#a8a
"""
CHART_88 = """
brown__ dark_red_ dark_magenta_ dark_blue_ dark_cyan_ dark_green_
yellow_ light_red light_magenta light_blue light_cyan light_green
#00f#08f#0cf#0ff black_______ dark_gray___
#80f#00c#08c#0cc#0fc light_gray__ white_______
#c0f#80c#008#088#0c8#0f8
#f0f#c0c#808#000#080#0c0#0f0#0f8#0fc#0ff #88c#8cc
#f0c#c08#800#880#8c0#8f0#8f8#8fc#8ff#0cf #c8c#888#8c8#8cc
#f08#c00#c80#cc0#cf0#cf8#cfc#cff#8cf#08f #c88#cc8#ccc#88c
#f00#f80#fc0#ff0#ff8#ffc#fff#ccf#88f#00f #c88#c8c
#fc0#fc8#fcc#fcf#c8f#80f
#f80#f88#f8c#f8f#c0f g62_g74_g82_g89_g100
#f00#f08#f0c#f0f g0__g19_g35_g46_g52
"""
CHART_16 = """
brown__ dark_red_ dark_magenta_ dark_blue_ dark_cyan_ dark_green_
yellow_ light_red light_magenta light_blue light_cyan light_green
black_______ dark_gray___ light_gray__ white_______
"""
ATTR_RE = re.compile("(?P<whitespace>[ \n]*)(?P<entry>[^ \n]+)")
SHORT_ATTR = 4 # length of short high-colour descriptions which may
# be packed one after the next
def parse_chart(chart, convert):
"""
Convert string chart into text markup with the correct attributes.
chart -- palette chart as a string
convert -- function that converts a single palette entry to an
(attr, text) tuple, or None if no match is found
"""
out = []
for match in re.finditer(ATTR_RE, chart):
if match.group('whitespace'):
out.append(match.group('whitespace'))
entry = match.group('entry')
entry = entry.replace("_", " ")
while entry:
# try the first four characters
attrtext = convert(entry[:SHORT_ATTR])
if attrtext:
elen = SHORT_ATTR
entry = entry[SHORT_ATTR:].strip()
else: # try the whole thing
attrtext = convert(entry.strip())
assert attrtext, "Invalid palette entry: %r" % entry
elen = len(entry)
entry = ""
attr, text = attrtext
out.append((attr, text.ljust(elen)))
return out
def foreground_chart(chart, background, colors):
"""
Create text markup for a foreground colour chart
chart -- palette chart as string
background -- colour to use for background of chart
colors -- number of colors (88 or 256)
"""
def convert_foreground(entry):
try:
attr = urwid.AttrSpec(entry, background, colors)
except urwid.AttrSpecError:
return None
return attr, entry
return parse_chart(chart, convert_foreground)
def background_chart(chart, foreground, colors):
"""
Create text markup for a background colour chart
chart -- palette chart as string
foreground -- colour to use for foreground of chart
colors -- number of colors (88 or 256)
This will remap 8 <= colour < 16 to high-colour versions
in the hopes of greater compatibility
"""
def convert_background(entry):
try:
attr = urwid.AttrSpec(foreground, entry, colors)
except urwid.AttrSpecError:
return None
# fix 8 <= colour < 16
if colors > 16 and attr.background_basic and \
attr.background_number >= 8:
# use high-colour with same number
entry = 'h%d'%attr.background_number
attr = urwid.AttrSpec(foreground, entry, colors)
return attr, entry
return parse_chart(chart, convert_background)
def main():
palette = [
('header', 'black,underline', 'light gray', 'standout,underline',
'black,underline', '#88a'),
('panel', 'light gray', 'dark blue', '',
'#ffd', '#00a'),
('focus', 'light gray', 'dark cyan', 'standout',
'#ff8', '#806'),
]
screen = urwid.raw_display.Screen()
screen.register_palette(palette)
lb = urwid.SimpleListWalker([])
chart_offset = None # offset of chart in lb list
mode_radio_buttons = []
chart_radio_buttons = []
def fcs(widget):
# wrap widgets that can take focus
return urwid.AttrMap(widget, None, 'focus')
def set_mode(colors, is_foreground_chart):
# set terminal mode and redraw chart
screen.set_terminal_properties(colors)
screen.reset_default_terminal_palette()
chart_fn = (background_chart, foreground_chart)[is_foreground_chart]
if colors == 1:
lb[chart_offset] = urwid.Divider()
else:
chart = {16: CHART_16, 88: CHART_88, 256: CHART_256}[colors]
txt = chart_fn(chart, 'default', colors)
lb[chart_offset] = urwid.Text(txt, wrap='clip')
def on_mode_change(rb, state, colors):
# if this radio button is checked
if state:
is_foreground_chart = chart_radio_buttons[0].state
set_mode(colors, is_foreground_chart)
def mode_rb(text, colors, state=False):
# mode radio buttons
rb = urwid.RadioButton(mode_radio_buttons, text, state)
urwid.connect_signal(rb, 'change', on_mode_change, colors)
return fcs(rb)
def on_chart_change(rb, state):
# handle foreground check box state change
set_mode(screen.colors, state)
def click_exit(button):
raise urwid.ExitMainLoop()
lb.extend([
urwid.AttrMap(urwid.Text("Urwid Palette Test"), 'header'),
urwid.AttrMap(urwid.Columns([
urwid.Pile([
mode_rb("Monochrome", 1),
mode_rb("16-Color", 16, True),
mode_rb("88-Color", 88),
mode_rb("256-Color", 256),]),
urwid.Pile([
fcs(urwid.RadioButton(chart_radio_buttons,
"Foreground Colors", True, on_chart_change)),
fcs(urwid.RadioButton(chart_radio_buttons,
"Background Colors")),
urwid.Divider(),
fcs(urwid.Button("Exit", click_exit)),
]),
]),'panel')
])
chart_offset = len(lb)
lb.extend([
urwid.Divider() # placeholder for the chart
])
set_mode(16, True) # displays the chart
def unhandled_input(key):
if key in ('Q','q','esc'):
raise urwid.ExitMainLoop()
urwid.MainLoop(urwid.ListBox(lb), screen=screen,
unhandled_input=unhandled_input).run()
if __name__ == "__main__":
main()
|
rndusr/urwid
|
examples/palette_test.py
|
Python
|
lgpl-2.1
| 9,400
|
[
"ADF"
] |
f7e299a941e5dd4d133c368c976346b1e80f3a0dbc13d30e565a1e35d9176e6e
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd
import espressomd.electrostatics
from espressomd import electrostatic_extensions
@utx.skipIfMissingFeatures(["P3M"])
class ELC_vs_MMM2D_neutral(ut.TestCase):
# Handle to espresso system
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
acc = 1e-6
elc_gap = 5.0
box_l = 10.0
bl2 = box_l * 0.5
system.time_step = 0.01
system.cell_system.skin = 0.1
def test_elc_vs_mmm2d(self):
elc_param_sets = {
"inert": {
"gap_size": self.elc_gap,
"maxPWerror": self.acc,
"neutralize": False,
"check_neutrality": False}
# "const_pot_0": {
# "gap_size": self.elc_gap,
# "maxPWerror": self.acc,
# "const_pot": True,
# "pot_diff": 0.0},
# "const_pot_1": {
# "gap_size": self.elc_gap,
# "maxPWerror": self.acc,
# "const_pot": True,
# "pot_diff": 1.0},
# "const_pot_m1": {
# "gap_size": self.elc_gap,
# "maxPWerror": self.acc,
# "const_pot": True,
# "pot_diff": -1.0}
}
mmm2d_param_sets = {
"inert": {
"prefactor": 1.0,
"maxPWerror": self.acc,
"check_neutrality": False}
# "const_pot_0": {
# "prefactor": 1.0,
# "maxPWerror": self.acc,
# "const_pot": True,
# "pot_diff": 0.0},
# "const_pot_1": {
# "prefactor": 1.0,
# "maxPWerror": self.acc,
# "const_pot": True,
# "pot_diff": 1.0},
# "const_pot_m1": {
# "prefactor": 1.0,
# "maxPWerror": self.acc,
# "const_pot": True,
# "pot_diff": -1.0}
}
self.system.box_l = 3 * [self.box_l]
buf_node_grid = self.system.cell_system.node_grid
self.system.cell_system.set_layered(
n_layers=10, use_verlet_lists=False)
self.system.periodicity = [1, 1, 0]
q = 1.0
self.system.part.add(id=0, pos=(5.0, 5.0, 5.0), q=-3.0 * q)
self.system.part.add(id=1, pos=(2.0, 2.0, 5.0), q=q / 3.0)
self.system.part.add(id=2, pos=(2.0, 5.0, 2.0), q=q / 3.0)
self.system.part.add(id=3, pos=(5.0, 2.0, 7.0), q=q / 3.0)
# MMM2D
mmm2d = espressomd.electrostatics.MMM2D(**mmm2d_param_sets["inert"])
self.system.actors.add(mmm2d)
mmm2d_res = {}
mmm2d_res["inert"] = self.scan()
# mmm2d.set_params(**mmm2d_param_sets["const_pot_0"])
# mmm2d_res["const_pot_0"] = self.scan()
# mmm2d.set_params(**mmm2d_param_sets["const_pot_1"])
# mmm2d_res["const_pot_1"] = self.scan()
# mmm2d.set_params(**mmm2d_param_sets["const_pot_m1"])
# mmm2d_res["const_pot_m1"] = self.scan()
self.system.actors.remove(mmm2d)
# ELC
self.system.box_l = [self.box_l, self.box_l, self.box_l + self.elc_gap]
self.system.cell_system.set_domain_decomposition(
use_verlet_lists=True)
self.system.cell_system.node_grid = buf_node_grid
self.system.periodicity = [1, 1, 1]
p3m = espressomd.electrostatics.P3M(prefactor=1.0, accuracy=self.acc,
mesh=[20, 20, 32], cao=7,
check_neutrality=False)
self.system.actors.add(p3m)
elc = electrostatic_extensions.ELC(**elc_param_sets["inert"])
self.system.actors.add(elc)
elc_res = {}
elc_res["inert"] = self.scan()
# elc.set_params(**elc_param_sets["const_pot_0"])
# elc_res["const_pot_0"] = self.scan()
# elc.set_params(**elc_param_sets["const_pot_1"])
# elc_res["const_pot_1"] = self.scan()
# elc.set_params(**elc_param_sets["const_pot_m1"])
# elc_res["const_pot_m1"] = self.scan()
for run in elc_res:
self.assertTrue(np.testing.assert_allclose(
mmm2d_res[run], elc_res[run], rtol=0, atol=1e-4) is None)
def scan(self):
n = 10
d = 0.5
res = []
for i in range(n + 1):
z = self.box_l - d - 1.0 * i / n * (self.box_l - 2 * d)
self.system.part[0].pos = [self.bl2, self.bl2, z]
self.system.integrator.run(0)
energy = self.system.analysis.energy()
m = [z]
m.extend(self.system.part[0].f)
m.append(energy['coulomb'])
res.append(m)
return res
if __name__ == "__main__":
ut.main()
|
psci2195/espresso-ffans
|
testsuite/python/elc_vs_mmm2d_nonneutral.py
|
Python
|
gpl-3.0
| 5,864
|
[
"ESPResSo"
] |
5ea5582fe6b30dd7f57836617653f94312dd38497cefc2235767fc21b127d686
|
import time
import numpy as np
import matplotlib.pyplot as plt
import h5py
from ..doublyPeriodic import doublyPeriodicModel
from numpy import pi
class model(doublyPeriodicModel):
def __init__(self, name = None,
# Grid parameters
nx = 256, ny = None, Lx = 1e6, Ly = None,
# Solver parameters
t = 0.0,
dt = 1.0e-1, # Numerical timestep
step = 0,
timeStepper = "ETDRK4", # Time-stepping method
nThreads = 1, # Number of threads for FFTW
useFilter = False,
#
# Hydrostatic Wave Eqn params: rotating and gravitating Earth
f0 = 1.0,
sigma = np.sqrt(5),
kappa = 8.0,
# Friction: 4th order hyperviscosity
waveVisc = 1.0e-12,
meanVisc = 1.0e-8,
waveViscOrder = 4.0,
meanViscOrder = 4.0,
):
# Physical parameters specific to the Physical Problem
self.f0 = f0
self.sigma = sigma
self.kappa = kappa
self.meanVisc = meanVisc
self.waveVisc = waveVisc
self.meanViscOrder = meanViscOrder
self.waveViscOrder = waveViscOrder
# Initialize super-class.
doublyPeriodicModel.__init__(self, name = name,
physics = "two-dimensional turbulence and the" + \
" hydrostatic wave equation",
nVars = 2,
realVars = False,
# Persistent doublyPeriodic initialization arguments
nx = nx, ny = ny, Lx = Lx, Ly = Ly, t = t, dt = dt, step = step,
timeStepper = timeStepper, nThreads = nThreads, useFilter = useFilter,
)
# Default initial condition.
soln = np.zeros_like(self.soln)
## Default vorticity initial condition: Gaussian vortex
rVortex = self.Lx/10.0
x0, y0 = self.Lx/2.0, self.Ly/2.0
q0 = 0.05*self.f0 * np.exp( -( (self.x-x0)**2.0 + (self.y-y0)**2.0 ) \
/ (2*rVortex**2.0) \
)
soln[:, :, 0] = q0
## Default wave initial condition: plane wave. Find closest
## plane wave that satisfies specified dispersion relation.
kExact = np.sqrt(self.alpha)*self.kappa
kApprox = 2.0*pi/self.Lx*np.round(self.Lx*kExact/(2.0*pi))
# Set initial wave velocity to 1
A00 = -self.alpha*self.f0 / (1j*self.sigma*kApprox)
A0 = A00*np.exp(1j*kApprox*self.x)
soln[:, :, 1] = A0
self.set_physical_soln(soln)
self.update_state_variables()
# Initialize default diagnostics
self.add_diagnostic('CFL', lambda self: self._calc_CFL(),
description="Maximum CFL number")
self.add_diagnostic('Eq', lambda self: self._calc_Eq(),
description="Total mean energy")
# Methods - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def describe_physics(self):
print("""
This model solves the hydrostatic wave equation and the \n
two-dimensional vorticity equation simulataneously. \n
Arbitrary-order hyperdissipation can be specified for both. \n
There are two prognostic variables: wave amplitude, and mean vorticity.
""")
def _init_linear_coeff(self):
""" Calculate the coefficient that multiplies the linear left hand
side of the equation """
# Two-dimensional turbulent part.
self.linearCoeff[:, :, 0] = -self.meanVisc \
* (self.k**2.0 + self.l**2.0)**(self.meanViscOrder/2.0)
waveDispersion = self.k**2.0 + self.l**2.0 - self.alpha*self.kappa**2.0
waveDissipation = -self.waveVisc \
* (self.k**2.0 + self.l**2.0)**(self.waveViscOrder/2.0)
self.linearCoeff[:, :, 1] = waveDissipation \
+ self._invE*1j*self.alpha*self.sigma*waveDispersion
def _calc_right_hand_side(self, soln, t):
""" Calculate the nonlinear right hand side of PDE """
qh = soln[:, :, 0]
Ah = soln[:, :, 1]
self.q = np.real(self.ifft2(qh))
# Derivatives of A in physical space
self.Ax = self.ifft2(self._jk*Ah)
self.Ay = self.ifft2(self._jl*Ah)
self.Axx = -self.ifft2(self.k**2.0*Ah)
self.Ayy = -self.ifft2(self.l**2.0*Ah)
self.Axy = -self.ifft2(self.l*self.k*Ah)
self.EA = -self.ifft2( self.alpha/2.0*Ah*( \
self.k**2.0 + self.l**2.0 \
+ (4.0+3.0*self.alpha)*self.kappa**2.0 ))
# Calculate streamfunction
self.psih = -qh / self._divSafeKsq
# Mean velocities
self.U = np.real(self.ifft2(-self._jl*self.psih))
self.V = np.real(self.ifft2( self._jk*self.psih))
# Views to clarify calculation of A's RHS
U = self.U
V = self.V
q = self.q
Ax = self.Ax
Ay = self.Ay
EA = self.EA
Axx = self.Axx
Ayy = self.Ayy
Axy = self.Axy
f0 = self.f0
sigma = self.sigma
kappa = self.kappa
# Right hand side for q
self.RHS[:, :, 0] = -self._jk*self.fft2(U*q) \
-self._jl*self.fft2(V*q)
# Right hand side for A, in steps:
## 1. Advection term,
self.RHS[:, :, 1] = -self._invE*( \
self._jk*self.fft2(U*EA) + self._jl*self.fft2(V*EA) )
## 2. Refraction term
self.RHS[:, :, 1] += -self._invE/f0*( \
self._jk*self.fft2( q * (1j*sigma*Ax - f0*Ay) ) \
+ self._jl*self.fft2( q * (1j*sigma*Ay + f0*Ax) ) \
)
## 3. 'Middling' difference Jacobian term.
self.RHS[:, :, 1] += self._invE*(2j*sigma/f0**2.0)*( \
self._jk*self.fft2( V*(1j*sigma*Axy - f0*Ayy) \
- U*(1j*sigma*Ayy + f0*Axy) ) \
+ self._jl*self.fft2( U*(1j*sigma*Axy + f0*Axx) \
- V*(1j*sigma*Axx - f0*Axy) ) \
)
self._dealias_RHS()
def _init_problem_parameters(self):
""" Pre-allocate parameters in memory """
# Frequency parameter
self.alpha = (self.sigma**2.0 - self.f0**2.0) / self.f0**2.0
# Wavenumbers and products
self._jk = 1j*self.k
self._jl = 1j*self.l
self._divSafeKsq = self.k**2.0 + self.l**2.0
self._divSafeKsq[0, 0] = float('Inf')
# Inversion of the operator E
E = -self.alpha/2.0 * \
( self.k**2.0 + self.l**2.0 + self.kappa**2.0*(4.0+3.0*self.alpha) )
self._invE = 1.0 / E
# Vorticity and wave-field amplitude
self.q = np.zeros(self.physVarShape, np.dtype('float64'))
self.A = np.zeros(self.physVarShape, np.dtype('complex128'))
# Streamfunction transform
self.psih = np.zeros(self.specVarShape, np.dtype('complex128'))
# Mean and wave velocity components
self.U = np.zeros(self.physVarShape, np.dtype('float64'))
self.V = np.zeros(self.physVarShape, np.dtype('float64'))
self.u = np.zeros(self.physVarShape, np.dtype('float64'))
self.v = np.zeros(self.physVarShape, np.dtype('float64'))
# Derivatives of wave field amplitude
self.Ax = np.zeros(self.physVarShape, np.dtype('complex128'))
self.Ay = np.zeros(self.physVarShape, np.dtype('complex128'))
self.EA = np.zeros(self.physVarShape, np.dtype('complex128'))
self.Axx = np.zeros(self.physVarShape, np.dtype('complex128'))
self.Ayy = np.zeros(self.physVarShape, np.dtype('complex128'))
self.Axy = np.zeros(self.physVarShape, np.dtype('complex128'))
def update_state_variables(self):
""" Update diagnostic variables to current model state """
qh = self.soln[:, :, 0]
Ah = self.soln[:, :, 1]
# Streamfunction
self.psih = -qh / self._divSafeKsq
# Physical-space PV and velocity components
self.A = self.ifft2(Ah)
self.q = np.real(self.ifft2(qh))
self.U = -np.real(self.ifft2(self._jl*self.psih))
self.V = np.real(self.ifft2(self._jk*self.psih))
# Wave velocities
uh = -1.0/(self.alpha*self.f0)*( \
1j*self.sigma*self._jk*Ah - self.f0*self._jl*Ah )
vh = -1.0/(self.alpha*self.f0)*( \
1j*self.sigma*self._jl*Ah + self.f0*self._jk*Ah )
self.u = np.real( self.ifft2(uh) + np.conj(self.ifft2(uh)) )
self.v = np.real( self.ifft2(vh) + np.conj(self.ifft2(vh)) )
def set_q(self, q):
""" Set model vorticity """
self.soln[:, :, 0] = self.fft2(q)
self._dealias_soln()
self.update_state_variables()
def set_A(self, A):
""" Set model wave-field amplitude """
self.soln[:, :, 1] = self.fft2(A)
self._dealias_soln()
self.update_state_variables()
def visualize_model_state(self, show=False):
""" Visualize the model state """
self.update_state_variables()
# Plot in kilometers
h = 1e-3
(qMax, c) = (np.max(np.abs(self.q)), 0.8)
(cmin, cmax) = (-c*qMax, c*qMax)
fig, axArr = plt.subplots(ncols=2, figsize=(8, 4), sharex=True, sharey=True)
fig.canvas.set_window_title("Waves and flow")
axArr[0].pcolormesh(h*self.x, h*self.y, self.q, cmap='RdBu_r',
vmin=cmin, vmax=cmax)
axArr[1].pcolormesh(h*self.x, h*self.y,
np.sqrt(self.u**2.0+self.v**2.0))
axArr[0].set_ylabel('$y$', labelpad=12.0)
axArr[0].set_xlabel('$x$', labelpad=5.0)
axArr[1].set_xlabel('$x$', labelpad=5.0)
message = '$t = {:03.1f}$ wave periods'.format(
self.t*self.sigma/(2.0*pi))
titles = ['$q$ ($\mathrm{s^{-1}}$)', '$\sqrt{u^2+v^2}$ (m/s)']
#positions = [axArr[0].get_position(), axArr[1].get_position()]
plt.text(0.00, 1.03, message, transform=axArr[0].transAxes)
plt.text(1.00, 1.03, titles[0], transform=axArr[0].transAxes,
HorizontalAlignment='right')
plt.text(1.00, 1.03, titles[1], transform=axArr[1].transAxes,
HorizontalAlignment='right')
if show:
plt.pause(0.01)
else:
plt.savefig('{}/{}_{:09d}'.format(
self.plotDirectory, self.runName, self.step))
plt.close(fig)
def describe_model(self):
""" Describe the current model state """
print("\nThis is a doubly-periodic spectral model for \n"
+ "{:s} \n".format(self.physics)
+ "with the following attributes:\n\n"
+ " Domain : {:.2e} X {:.2e} m\n".format(
self.Lx, self.Ly)
+ " Grid : {:d} X {:d}\n".format(self.nx, self.ny)
+ " Wave hypervisc : {:.2e} m^{:d}/s\n".format(
self.waveVisc, int(self.waveViscOrder))
+ " Mean hypervisc : {:.2e} m^{:d}/s\n".format(
self.meanVisc, int(self.meanViscOrder))
+ " Frequency param : {:.2f}\n".format(self.alpha)
+ " Comp. threads : {:d} \n".format(self.nThreads)
)
# Diagnostic-calculating functions - - - - - - - - - - - - - - - - - - - -
def _calc_CFL(self):
""" Calculate the maximum CFL number in the model """
maxSpeed = (np.sqrt(self.U**2.0 + self.V**2.0)).max()
CFL = maxSpeed * self.dt * self.nx/self.Lx
return CFL
def _calc_Eq(self):
""" Calculate the total mean energy """
E = np.sum( self.Lx*self.Ly*(self.k**2.0+self.l**2.0)
* np.abs(self.psih)**2.0 )
return E
# External helper functions - - - - - - - - - - - - - - - - - - - - - - - - - -
def init_from_turb_endpoint(fileName, runName, **kwargs):
""" Initialize a hydrostatic wave eqn model from the saved endpoint of a
twoDimTurbulence run. """
dataFile = h5py.File(fileName, 'r', libver='latest')
if 'endpoint' not in dataFile[runName]:
raise ValueError("The run named {} in {}".format(runName, fileName)
+ " does not have a saved endpoint.")
# Get model input and re-initialize
inputParams = { param:value
for param, value in dataFile[runName].attrs.iteritems() }
# Change 'visc' to 'meanVisc'
inputParams['meanVisc'] = inputParams.pop('visc')
inputParams['meanViscOrder'] = inputParams.pop('viscOrder')
# Change default time-stepper
inputParams['timeStepper'] = 'ETDRK4'
# Re-initialize model, overwriting 2D turb params with keyword args.
inputParams.update(kwargs)
m = model(**inputParams)
# Initialize turbulence field
m.set_q(dataFile[runName]['endpoint']['q'][:])
return m
|
glwagner/py2Periodic
|
py2Periodic/physics/hydrostaticWaveEqn_xy.py
|
Python
|
mit
| 12,965
|
[
"Gaussian"
] |
bee5d3f580852fffa3c8ba8ae7ee48a55338fb8e469a4fca2e06cb21c077ac0e
|
#!/usr/bin/python
import os
import sys
import Bio
from Bio import AlignIO
"""Functions for parsing and manipulating sequence alignment files
Functions by Zach Zbinden and Tyler Chafin"""
#Function to parse a PHYLIP formatted file of SNPs
#Function returns a BioALign object
def read_phylip(infile):
for aln in AlignIO.parse(infile, "phylip-relaxed"):
return (aln)
#Write FASTA from pandas df where col1 is index, col2 is sequence
#seqs must be a pandas df
def writeFasta(seqs, fas):
file_object = open(fas, "w")
#Write seqs to FASTA first
#Assumes that a[0] is index, a[1] is id, and a[2] is sequence
for a in seqs.itertuples():
name = ">id_" + str(a[1]) + "\n"
seq = a[2] + "\n"
file_object.write(name)
file_object.write(seq)
file_object.close()
#This is a GENERATOR function to read through a .loci file
#.loci is the RAD alignment output from the promgram pyRAD
#YIELDS: BioPython MultipleSeqAlignment object
def read_loci(infile):
# make emptyp dictionary
loci = Bio.Align.MultipleSeqAlignment([])
# read file from command line
try:
f = open(infile)
except IOError as err:
print("I/O error({0}): {1}".format(err.errno, err.strerror))
except:
print("Unexpected error:", sys.exec_info()[0])
with f as file_object:
for line in file_object:
if line[0] == ">":
identifier = line.split()[0]
sequence = line.split()[1]
loci.add_sequence(identifier, sequence)
else:
yield(loci)
loci = Bio.Align.MultipleSeqAlignment([])
#Function by ZVZ to "chunk" a given MAF alignment file into n number of chunks
def maf_chunker(infile, chunks):
# maf_chunker creates specified number of files containing equal numbers
# of loci (unless there are remainder loci, which append to the last
# chunk.
# 1 to n '.maf_chunk' files will be created
# read file from command line
with open(infile) as file_object:
#count number of loci, loci_count = -1 so that header is not counted
loci_count = -1
chunks = int(sys.argv[2])
for line in file_object:
line = line.strip()
if len(line) > 0:
pass
else:
loci_count = loci_count+1
chunk_size = loci_count // chunks
#write .maf file into chunk files, with each chunk beginning with header
#first read header
with open(infile) as file_object:
max_chunks = int(sys.argv[2])
chunks = 0
loci_number = 0
individual = 1
for line in file_object:
line = line.strip()
#isolate header chunk
if loci_number == 0:
if len(line) > 0:
print(line.strip(), file=open(str(chunks) + ".maf_chunk", "a"))
else:
loci_number = loci_number + 1
chunks = chunks + 1
#move to loci chunks
else:
if chunks < max_chunks:
if loci_number <= chunk_size:
#print contents of header before printing loci of individual 1
if individual == 1 and chunks == 1:
with open('0.maf_chunk') as header:
for var in header:
print(var.strip(), file=open(str(chunks) + ".maf_chunk", "a"))
print("", file=open(str(chunks) + ".maf_chunk", "a"))
print(line.strip(), file=open(str(chunks) + ".maf_chunk", "a"))
individual = individual + 1
else:
if len(line) > 0:
print(line.strip(), file=open(str(chunks) + ".maf_chunk", "a"))
individaul = individual + 1
else:
loci_number = loci_number + 1
individual = 1
print("", file=open(str(chunks) + ".maf_chunk", "a"))
else:
loci_number = 1
chunks = chunks + 1
individual = 1
with open('0.maf_chunk') as header:
for var in header:
print(var.strip(), file=open(str(chunks) + ".maf_chunk", "a"))
print("", file=open(str(chunks) + ".maf_chunk", "a"))
print(line.strip(), file=open(str(chunks) + ".maf_chunk", "a"))
else:
chunks = max_chunks
print(line.strip(), file=open(str(chunks) + ".maf_chunk", "a"))
os.remove("0.maf_chunk")
#Function by ZDZ to split a given .loci file into n chunks
def loci_chunker(infile, chunks):
# read file from command line
with open(infile) as file_object:
#count number of loci
loci_count = 1
chunks = int(sys.argv[2])
for line in file_object:
if line[0] == ">":
pass
else:
loci_count = loci_count+1
chunk_size = loci_count // chunks
#write .loci file into chunk files
with open(infile) as file_object:
max_chunks = int(sys.argv[2])
chunks = 1
loci_number = 1
for line in file_object:
if chunks < max_chunks:
if loci_number <= chunk_size:
if line[0] == ">":
print(line.strip(), file=open(str(chunks) + ".chunk", "a"))
else:
loci_number = loci_number + 1
print("", file=open(str(chunks) + ".chunk", "a"))
else:
loci_number = 1
chunks = chunks + 1
print(line.strip(), file=open(str(chunks) + ".chunk", "a"))
else:
chunks = max_chunks
print(line.strip(), file=open(str(chunks) + ".chunk", "a"))
|
tkchafin/fst_filter.py
|
aln_file_tools.py
|
Python
|
gpl-3.0
| 4,867
|
[
"Biopython"
] |
e16e3d29ba3aa16d3cd819e2f58e5cb52c63b2b0f4a41a7a912f8fbb07b3264e
|
# coding: utf-8
import constance
from django.conf import settings
from hub.models import ConfigurationFile, PerUserSetting
from hub.utils.i18n import I18nUtils
def external_service_tokens(request):
out = {}
if settings.GOOGLE_ANALYTICS_TOKEN:
out['google_analytics_token'] = settings.GOOGLE_ANALYTICS_TOKEN
if settings.RAVEN_JS_DSN:
out['raven_js_dsn'] = settings.RAVEN_JS_DSN
try:
intercom_setting = PerUserSetting.objects.get(name='INTERCOM_APP_ID')
except PerUserSetting.DoesNotExist:
pass
else:
out['intercom_app_id'] = intercom_setting.get_for_user(request.user)
return out
def email(request):
out = {}
# 'kpi_protocol' used in the activation_email.txt template
out['kpi_protocol'] = request.META.get('wsgi.url_scheme', 'http')
return out
def sitewide_messages(request):
"""
required in the context for any pages that need to display
custom text in django templates
"""
if request.path_info.endswith("accounts/register/"):
sitewide_message = I18nUtils.get_sitewide_message()
if sitewide_message is not None:
return {"welcome_message": sitewide_message}
return {}
class CombinedConfig:
'''
An object that gets its attributes from both a dictionary (`extra_config`)
AND a django-constance LazyConfig object
'''
def __init__(self, constance_config, extra_config):
'''
constance_config: LazyConfig object
extra_config: dictionary
'''
self.constance_config = constance_config
self.extra_config = extra_config
def __getattr__(self, key):
try:
return self.extra_config[key]
except KeyError:
return getattr(self.constance_config, key)
def config(request):
'''
Merges django-constance configuration field names and values with
slugs and URLs for each hub.ConfigurationFile. Example use in a template:
Please visit our <a href="{{ config.SUPPORT_URL }}">help page</a>.
<img src="{{ config.logo }}">
'''
conf_files = {f.slug: f.url for f in ConfigurationFile.objects.all()}
return {'config': CombinedConfig(constance.config, conf_files)}
|
kobotoolbox/kpi
|
kpi/context_processors.py
|
Python
|
agpl-3.0
| 2,236
|
[
"VisIt"
] |
017e9a5200411d06a9aaf91c0287d3c2e7ec5dee75b671a49be86a3ac4604b0a
|
# coding: utf-8
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import random
import re
import socket
import ssl
import sys
import time
import math
from ..compat import (
compat_cookiejar_Cookie,
compat_cookies,
compat_etree_Element,
compat_etree_fromstring,
compat_getpass,
compat_integer_types,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
compat_xml_parse_error,
)
from ..downloader.f4m import (
get_base_url,
remove_encrypted_media,
)
from ..utils import (
NO_DEFAULT,
age_restricted,
base_url,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
determine_protocol,
dict_get,
error_to_compat_str,
ExtractorError,
extract_attributes,
fix_xml_ampersands,
float_or_none,
GeoRestrictedError,
GeoUtils,
int_or_none,
js_to_json,
JSON_LD_RE,
mimetype2ext,
orderedSet,
parse_bitrate,
parse_codecs,
parse_duration,
parse_iso8601,
parse_m3u8_attributes,
parse_resolution,
RegexNotFoundError,
sanitized_Request,
sanitize_filename,
str_or_none,
str_to_int,
strip_or_none,
unescapeHTML,
unified_strdate,
unified_timestamp,
update_Request,
update_url_query,
urljoin,
url_basename,
url_or_none,
xpath_element,
xpath_text,
xpath_with_ns,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url The mandatory URL representing the media:
for plain file media - HTTP URL of this file,
for RTMP - RTMP URL,
for HLS - URL of the M3U8 media playlist,
for HDS - URL of the F4M manifest,
for DASH
- HTTP URL to plain file media (in case of
unfragmented media)
- URL of the MPD manifest or base URL
representing the media if MPD manifest
is parsed from a string (in case of
fragmented media)
for MSS - URL of the ISM manifest.
* manifest_url
The URL of the manifest file in case of
fragmented media:
for HLS - URL of the M3U8 master playlist,
for HDS - URL of the F4M manifest,
for DASH - URL of the MPD manifest,
for MSS - URL of the ISM manifest.
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* fragment_base_url
Base URL for fragments. Each fragment's path
value (if present) will be relative to
this URL.
* fragments A list of fragments of a fragmented media.
Each fragment entry must contain either an url
or a path. If an url is present it should be
considered by a client. Otherwise both path and
fragment_base_url must be present. Here is
the list of all potential fields:
* "url" - fragment's URL
* "path" - fragment's path relative to
fragment_base_url
* "duration" (optional, int or float)
* "filesize" (optional, int)
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
* downloader_options A dictionary of downloader options as
described in FileDownloader
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height}",
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
channel: Full name of the channel the video is uploaded on.
Note that channel fields may or may not repeat uploader
fields. This depends on a particular extractor.
channel_id: Id of the channel.
channel_url: Full URL to a channel webpage.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{tag: subformats}. "tag" is usually a language code, and
"subformats" is a list sorted from lower to higher
preference, each element is a dictionary with the "ext"
entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
chapters: A list of dictionaries, with the following entries:
* "start_time" - The start time of the chapter in seconds
* "end_time" - The end time of the chapter in seconds
* "title" (optional, string)
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series, programme or podcast:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "id", "title", "description", "uploader",
"uploader_id", "uploader_url" attributes with the same semantics as videos
(see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
_GEO_BYPASS attribute may be set to False in order to disable
geo restriction bypass mechanisms for a particular extractor.
Though it won't disable explicit geo restriction bypass based on
country code provided with geo_bypass_country.
_GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
countries for this extractor. One of these countries will be used by
geo restriction bypass mechanism right away in order to bypass
geo restriction, of course, if the mechanism is not disabled.
_GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
IP blocks in CIDR notation for this extractor. One of these IP blocks
will be used by geo restriction bypass mechanism similarly
to _GEO_COUNTRIES.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_x_forwarded_for_ip = None
_GEO_BYPASS = True
_GEO_COUNTRIES = None
_GEO_IP_BLOCKS = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self._x_forwarded_for_ip = None
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return compat_str(m.group('id'))
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
self._initialize_geo_bypass({
'countries': self._GEO_COUNTRIES,
'ip_blocks': self._GEO_IP_BLOCKS,
})
if not self._ready:
self._real_initialize()
self._ready = True
def _initialize_geo_bypass(self, geo_bypass_context):
"""
Initialize geo restriction bypass mechanism.
This method is used to initialize geo bypass mechanism based on faking
X-Forwarded-For HTTP header. A random country from provided country list
is selected and a random IP belonging to this country is generated. This
IP will be passed as X-Forwarded-For HTTP header in all subsequent
HTTP requests.
This method will be used for initial geo bypass mechanism initialization
during the instance initialization with _GEO_COUNTRIES and
_GEO_IP_BLOCKS.
You may also manually call it from extractor's code if geo bypass
information is not available beforehand (e.g. obtained during
extraction) or due to some other reason. In this case you should pass
this information in geo bypass context passed as first argument. It may
contain following fields:
countries: List of geo unrestricted countries (similar
to _GEO_COUNTRIES)
ip_blocks: List of geo unrestricted IP blocks in CIDR notation
(similar to _GEO_IP_BLOCKS)
"""
if not self._x_forwarded_for_ip:
# Geo bypass mechanism is explicitly disabled by user
if not self._downloader.params.get('geo_bypass', True):
return
if not geo_bypass_context:
geo_bypass_context = {}
# Backward compatibility: previously _initialize_geo_bypass
# expected a list of countries, some 3rd party code may still use
# it this way
if isinstance(geo_bypass_context, (list, tuple)):
geo_bypass_context = {
'countries': geo_bypass_context,
}
# The whole point of geo bypass mechanism is to fake IP
# as X-Forwarded-For HTTP header based on some IP block or
# country code.
# Path 1: bypassing based on IP block in CIDR notation
# Explicit IP block specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
ip_block = self._downloader.params.get('geo_bypass_ip_block', None)
# Otherwise use random IP block from geo bypass context but only
# if extractor is known as geo bypassable
if not ip_block:
ip_blocks = geo_bypass_context.get('ip_blocks')
if self._GEO_BYPASS and ip_blocks:
ip_block = random.choice(ip_blocks)
if ip_block:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(
'[debug] Using fake IP %s as X-Forwarded-For.'
% self._x_forwarded_for_ip)
return
# Path 2: bypassing based on country code
# Explicit country code specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
country = self._downloader.params.get('geo_bypass_country', None)
# Otherwise use random country code from geo bypass context but
# only if extractor is known as geo bypassable
if not country:
countries = geo_bypass_context.get('countries')
if self._GEO_BYPASS and countries:
country = random.choice(countries)
if country:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(
'[debug] Using fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country.upper()))
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
for _ in range(2):
try:
self.initialize()
ie_result = self._real_extract(url)
if self._x_forwarded_for_ip:
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
return ie_result
except GeoRestrictedError as e:
if self.__maybe_fake_ip_and_retry(e.countries):
continue
raise
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def __maybe_fake_ip_and_retry(self, countries):
if (not self._downloader.params.get('geo_bypass_country', None)
and self._GEO_BYPASS
and self._downloader.params.get('geo_bypass', True)
and not self._x_forwarded_for_ip
and countries):
country_code = random.choice(countries)
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._x_forwarded_for_ip:
self.report_warning(
'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
return True
return False
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
@staticmethod
def __can_accept_status_code(err, expected_status):
assert isinstance(err, compat_urllib_error.HTTPError)
if expected_status is None:
return False
if isinstance(expected_status, compat_integer_types):
return err.code == expected_status
elif isinstance(expected_status, (list, tuple)):
return err.code in expected_status
elif callable(expected_status):
return expected_status(err.code) is True
else:
assert False
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}, expected_status=None):
"""
Return the response handle.
See _download_webpage docstring for arguments specification.
"""
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
# Some sites check X-Forwarded-For HTTP header in order to figure out
# the origin of the client behind proxy. This allows bypassing geo
# restriction by faking this header's value to IP that belongs to some
# geo unrestricted country. We will do so once we encounter any
# geo restriction error.
if self._x_forwarded_for_ip:
if 'X-Forwarded-For' not in headers:
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
if hasattr(ssl, 'CertificateError'):
exceptions.append(ssl.CertificateError)
try:
return self._downloader.urlopen(url_or_request)
except tuple(exceptions) as err:
if isinstance(err, compat_urllib_error.HTTPError):
if self.__can_accept_status_code(err, expected_status):
# Retain reference to error to prevent file object from
# being closed before it can be read. Works around the
# effects of <https://bugs.python.org/issue15002>
# introduced in Python 3.4.1.
err.fp._error = err
return err.fp
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
"""
Return a tuple (page content as string, URL handle).
See _download_webpage docstring for arguments specification.
"""
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def __check_blocked(self, content):
first_block = content[:512]
if ('<title>Access to this site is blocked</title>' in content
and 'Websense' in first_block):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in first_block:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content
and 'blocklist.rkn.gov.ru' in content):
raise ExtractorError(
'Access to this webpage has been blocked by decision of the Russian government. '
'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
expected=True)
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
self.to_screen('Dumping request to ' + urlh.geturl())
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
basen = '%s_%s' % (video_id, urlh.geturl())
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
self.__check_blocked(content)
return content
def _download_webpage(
self, url_or_request, video_id, note=None, errnote=None,
fatal=True, tries=1, timeout=5, encoding=None, data=None,
headers={}, query={}, expected_status=None):
"""
Return the data of the page as a string.
Arguments:
url_or_request -- plain text URL as a string or
a compat_urllib_request.Requestobject
video_id -- Video/playlist/item identifier (string)
Keyword arguments:
note -- note printed before downloading (string)
errnote -- note printed in case of an error (string)
fatal -- flag denoting whether error should be considered fatal,
i.e. whether it should cause ExtractionError to be raised,
otherwise a warning will be reported and extraction continued
tries -- number of tries
timeout -- sleep interval between tries
encoding -- encoding for a page content decoding, guessed automatically
when not explicitly specified
data -- POST data (bytes)
headers -- HTTP headers (dict)
query -- URL query (dict)
expected_status -- allows to accept failed HTTP requests (non 2xx
status code) by explicitly specifying a set of accepted status
codes. Can be any of the following entities:
- an integer type specifying an exact failed status code to
accept
- a list or a tuple of integer types specifying a list of
failed status codes to accept
- a callable accepting an actual failed status code and
returning True if it should be accepted
Note that this argument does not affect success status codes (2xx)
which are always accepted.
"""
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml_handle(
self, url_or_request, video_id, note='Downloading XML',
errnote='Unable to download XML', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (xml as an compat_etree_Element, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
xml_string, urlh = res
return self._parse_xml(
xml_string, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_xml(
self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None,
data=None, headers={}, query={}, expected_status=None):
"""
Return the xml as an compat_etree_Element.
See _download_webpage docstring for arguments specification.
"""
res = self._download_xml_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True):
if transform_source:
xml_string = transform_source(xml_string)
try:
return compat_etree_fromstring(xml_string.encode('utf-8'))
except compat_xml_parse_error as ve:
errmsg = '%s: Failed to parse XML ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def _download_json_handle(
self, url_or_request, video_id, note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (JSON object, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
json_string, urlh = res
return self._parse_json(
json_string, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_json(
self, url_or_request, video_id, note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return the JSON object as a dict.
See _download_webpage docstring for arguments specification.
"""
res = self._download_json_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None):
raise GeoRestrictedError(msg, countries=countries)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
def playlist_from_matches(self, matches, playlist_id=None, playlist_title=None, getter=None, ie=None):
urls = orderedSet(
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
for m in matches)
return self.playlist_result(
urls, playlist_id=playlist_id, playlist_title=playlist_title)
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
netrc_machine = netrc_machine or self._NETRC_MACHINE
if self._downloader.params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(netrc_machine)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
return username, password
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"""
Get the login info as (username, password)
First look for the manually specified credentials using username_option
and password_option as keys in params dictionary. If no such credentials
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
value.
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get(username_option) is not None:
username = downloader_params[username_option]
password = downloader_params[password_option]
else:
username, password = self._get_netrc_login_info(netrc_machine)
return username, password
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og[:-]%(prop)s\'|"og[:-]%(prop)s"|\s*og[:-]%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if not isinstance(prop, (list, tuple)):
prop = [prop]
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta(
'isFamilyFriendly', html, default=None)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld_list = list(re.finditer(JSON_LD_RE, html))
default = kwargs.get('default', NO_DEFAULT)
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
json_ld = []
for mobj in json_ld_list:
json_ld_item = self._parse_json(
mobj.group('json_ld'), video_id, fatal=fatal)
if not json_ld_item:
continue
if isinstance(json_ld_item, dict):
json_ld.append(json_ld_item)
elif isinstance(json_ld_item, (list, tuple)):
json_ld.extend(json_ld_item)
if json_ld:
json_ld = self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
if json_ld:
return json_ld
if default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract JSON-LD')
else:
self._downloader.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
return {}
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
INTERACTION_TYPE_MAP = {
'CommentAction': 'comment',
'AgreeAction': 'like',
'DisagreeAction': 'dislike',
'LikeAction': 'like',
'DislikeAction': 'dislike',
'ListenAction': 'view',
'WatchAction': 'view',
'ViewAction': 'view',
}
def extract_interaction_statistic(e):
interaction_statistic = e.get('interactionStatistic')
if not isinstance(interaction_statistic, list):
return
for is_e in interaction_statistic:
if not isinstance(is_e, dict):
continue
if is_e.get('@type') != 'InteractionCounter':
continue
interaction_type = is_e.get('interactionType')
if not isinstance(interaction_type, compat_str):
continue
# For interaction count some sites provide string instead of
# an integer (as per spec) with non digit characters (e.g. ",")
# so extracting count with more relaxed str_to_int
interaction_count = str_to_int(is_e.get('userInteractionCount'))
if interaction_count is None:
continue
count_kind = INTERACTION_TYPE_MAP.get(interaction_type.split('/')[-1])
if not count_kind:
continue
count_key = '%s_count' % count_kind
if info.get(count_key) is not None:
continue
info[count_key] = interaction_count
def extract_video_object(e):
assert e['@type'] == 'VideoObject'
info.update({
'url': url_or_none(e.get('contentUrl')),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnail': url_or_none(e.get('thumbnailUrl') or e.get('thumbnailURL')),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
'uploader': str_or_none(e.get('author')),
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
'view_count': int_or_none(e.get('interactionCount')),
})
extract_interaction_statistic(e)
for e in json_ld:
if '@context' in e:
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
continue
if item_type in ('TVEpisode', 'Episode'):
episode_name = unescapeHTML(e.get('name'))
info.update({
'episode': episode_name,
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
if not info.get('title') and episode_name:
info['title'] = episode_name
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') in ('TVSeason', 'Season', 'CreativeWorkSeason'):
info.update({
'season': unescapeHTML(part_of_season.get('name')),
'season_number': int_or_none(part_of_season.get('seasonNumber')),
})
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') in ('TVSeries', 'Series', 'CreativeWorkSeries'):
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Movie':
info.update({
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('dateCreated')),
})
elif item_type in ('Article', 'NewsArticle'):
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody')),
})
elif item_type == 'VideoObject':
extract_video_object(e)
if expected_type is None:
continue
else:
break
video = e.get('video')
if isinstance(video, dict) and video.get('@type') == 'VideoObject':
extract_video_object(video)
if expected_type is None:
continue
else:
break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)(<input[^>]+>)', html):
attrs = extract_attributes(input)
if not input:
continue
if attrs.get('type') not in ('hidden', 'submit'):
continue
name = attrs.get('name') or attrs.get('id')
value = attrs.get('value')
if name and value is not None:
hidden_inputs[name] = value
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
protocol = f.get('protocol') or determine_protocol(f)
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video', headers={}):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
return True
except ExtractorError as e:
self.to_screen(
'%s: %s URL is invalid, skipping: %s'
% (video_id, item, error_to_compat_str(e.cause)))
return False
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None, data=None, headers={}, query={}):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/ytdl-org/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal, data=data, headers=headers, query=query)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
if not isinstance(manifest, compat_etree_Element) and not fatal:
return []
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/ytdl-org/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
manifest_base_url = get_base_url(manifest)
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
vcodec = None
mime_type = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
'base URL', default=None)
if mime_type and mime_type.startswith('audio/'):
vcodec = 'none'
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
'vcodec': vcodec,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'manifest_url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'protocol': 'f4m',
'tbr': tbr,
'width': width,
'height': height,
'vcodec': vcodec,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 100 if preference else -100,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False, data=None, headers={},
query={}):
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
return self._parse_m3u8_formats(
m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
preference=preference, m3u8_id=m3u8_id, live=live)
def _parse_m3u8_formats(self, m3u8_doc, m3u8_url, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, live=False):
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
return []
if re.search(r'#EXT-X-SESSION-KEY:.*?URI="skd://', m3u8_doc): # Apple FairPlay
return []
formats = []
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
# References:
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
# 2. https://github.com/ytdl-org/youtube-dl/issues/12211
# 3. https://github.com/ytdl-org/youtube-dl/issues/18923
# We should try extracting formats only from master playlists [1, 4.3.4],
# i.e. playlists that describe available qualities. On the other hand
# media playlists [1, 4.3.3] should be returned as is since they contain
# just the media without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
# master playlist tags MUST NOT appear in a media playlist and vice versa.
# As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
# media playlist and MUST NOT appear in master playlist thus we can
# clearly detect media playlist with this criterion.
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
groups = {}
last_stream_inf = {}
def extract_media(x_media_line):
media = parse_m3u8_attributes(x_media_line)
# As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME')
if not (media_type and group_id and name):
return
groups.setdefault(group_id, []).append(media)
if media_type not in ('VIDEO', 'AUDIO'):
return
media_url = media.get('URI')
if media_url:
format_id = []
for v in (m3u8_id, group_id, name):
if v:
format_id.append(v)
f = {
'format_id': '-'.join(format_id),
'url': format_url(media_url),
'manifest_url': m3u8_url,
'language': media.get('LANGUAGE'),
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
if media_type == 'AUDIO':
f['vcodec'] = 'none'
formats.append(f)
def build_stream_name():
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
# or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
# 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
stream_name = last_stream_inf.get('NAME')
if stream_name:
return stream_name
# If there is no NAME in EXT-X-STREAM-INF it will be obtained
# from corresponding rendition group
stream_group_id = last_stream_inf.get('VIDEO')
if not stream_group_id:
return
stream_group = groups.get(stream_group_id)
if not stream_group:
return stream_group_id
rendition = stream_group[0]
return rendition.get('NAME') or stream_group_id
# parse EXT-X-MEDIA tags before EXT-X-STREAM-INF in order to have the
# chance to detect video only formats when EXT-X-STREAM-INF tags
# precede EXT-X-MEDIA tags in HLS manifest such as [3].
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-MEDIA:'):
extract_media(line)
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_stream_inf = parse_m3u8_attributes(line)
elif line.startswith('#') or not line.strip():
continue
else:
tbr = float_or_none(
last_stream_inf.get('AVERAGE-BANDWIDTH')
or last_stream_inf.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
stream_name = build_stream_name()
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
manifest_url = format_url(line.strip())
f = {
'format_id': '-'.join(format_id),
'url': manifest_url,
'manifest_url': m3u8_url,
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_stream_inf.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_stream_inf.get('RESOLUTION')
if resolution:
mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
if mobj:
f['width'] = int(mobj.group('width'))
f['height'] = int(mobj.group('height'))
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
codecs = parse_codecs(last_stream_inf.get('CODECS'))
f.update(codecs)
audio_group_id = last_stream_inf.get('AUDIO')
# As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
# references a rendition group MUST have a CODECS attribute.
# However, this is not always respected, for example, [2]
# contains EXT-X-STREAM-INF tag which references AUDIO
# rendition group but does not have CODECS and despite
# referencing an audio group it represents a complete
# (with audio and video) format. So, for such cases we will
# ignore references to rendition groups and treat them
# as complete formats.
if audio_group_id and codecs and f.get('vcodec') != 'none':
audio_group = groups.get(audio_group_id)
if audio_group and audio_group[0].get('URI'):
# TODO: update acodec for audio only formats with
# the same GROUP-ID
f['acodec'] = 'none'
formats.append(f)
# for DailyMotion
progressive_uri = last_stream_inf.get('PROGRESSIVE-URI')
if progressive_uri:
http_f = f.copy()
del http_f['manifest_url']
http_f.update({
'format_id': f['format_id'].replace('hls-', 'http-'),
'protocol': 'http',
'url': progressive_uri,
})
formats.append(http_f)
last_stream_inf = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
elif src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
elif src_ext == 'mpd':
formats.extend(self._extract_mpd_formats(
src_url, video_id, mpd_id='dash', fatal=False))
elif re.search(r'\.ism/[Mm]anifest', src_url):
formats.extend(self._extract_ism_formats(
src_url, video_id, ism_id='mss', fatal=False))
elif src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, xspf_url, playlist_id, fatal=True):
xspf = self._download_xml(
xspf_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(
xspf, playlist_id, xspf_url=xspf_url,
xspf_base_url=base_url(xspf_url))
def _parse_xspf(self, xspf_doc, playlist_id, xspf_url=None, xspf_base_url=None):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in xspf_doc.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = []
for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP)):
format_url = urljoin(xspf_base_url, location.text)
if not format_url:
continue
formats.append({
'url': format_url,
'manifest_url': xspf_url,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
})
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}, data=None, headers={}, query={}):
res = self._download_xml_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return []
mpd_doc, urlh = res
if mpd_doc is None:
return []
mpd_base_url = base_url(urlh.geturl())
return self._parse_mpd_formats(
mpd_doc, mpd_id=mpd_id, mpd_base_url=mpd_base_url,
formats_dict=formats_dict, mpd_url=mpd_url)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
# As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
# common attributes and elements. We will only extract relevant
# for us.
def extract_common(source):
segment_timeline = source.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
start_number = source.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
timescale = source.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = source.get('duration')
if segment_duration:
ms_info['segment_duration'] = float(segment_duration)
def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
extract_common(segment_list)
extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
extract_common(segment_template)
media = segment_template.get('media')
if media:
ms_info['media'] = media
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization'] = initialization
else:
extract_Initialization(segment_template)
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type in ('video', 'audio'):
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'manifest_url': mpd_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': float_or_none(bandwidth, 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
'container': mimetype2ext(mime_type) + '_dash',
}
f.update(parse_codecs(representation_attrib.get('codecs')))
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
def prepare_template(template_name, identifiers):
tmpl = representation_ms_info[template_name]
# First of, % characters outside $...$ templates
# must be escaped by doubling for proper processing
# by % operator string formatting used further (see
# https://github.com/ytdl-org/youtube-dl/issues/16867).
t = ''
in_template = False
for c in tmpl:
t += c
if c == '$':
in_template = not in_template
elif c == '%' and not in_template:
t += c
# Next, $...$ templates are translated to their
# %(...) counterparts to be used with % operator
t = t.replace('$RepresentationID$', representation_id)
t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
t.replace('$$', '$')
return t
# @initialization is a regular template like @media one
# so it should be handled just the same way (see
# https://github.com/ytdl-org/youtube-dl/issues/11605)
if 'initialization' in representation_ms_info:
initialization_template = prepare_template(
'initialization',
# As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
# $Time$ shall not be included for @initialization thus
# only $Bandwidth$ remains
('Bandwidth', ))
representation_ms_info['initialization_url'] = initialization_template % {
'Bandwidth': bandwidth,
}
def location_key(location):
return 'url' if re.match(r'^https?://', location) else 'path'
if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
media_location_key = location_key(media_template)
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template and 's' not in representation_ms_info:
segment_duration = None
if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
representation_ms_info['fragments'] = [{
media_location_key: media_template % {
'Number': segment_number,
'Bandwidth': bandwidth,
},
'duration': segment_duration,
} for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
# $Number*$ or $Time$ in media template with S list available
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url():
segment_url = media_template % {
'Time': segment_time,
'Bandwidth': bandwidth,
'Number': segment_number,
}
representation_ms_info['fragments'].append({
media_location_key: segment_url,
'duration': float_or_none(segment_d, representation_ms_info['timescale']),
})
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url()
segment_number += 1
segment_time += segment_d
elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
# No media template
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
# or any YouTube dashsegments video
fragments = []
segment_index = 0
timescale = representation_ms_info['timescale']
for s in representation_ms_info['s']:
duration = float_or_none(s['d'], timescale)
for r in range(s.get('r', 0) + 1):
segment_uri = representation_ms_info['segment_urls'][segment_index]
fragments.append({
location_key(segment_uri): segment_uri,
'duration': duration,
})
segment_index += 1
representation_ms_info['fragments'] = fragments
elif 'segment_urls' in representation_ms_info:
# Segment URLs with no SegmentTimeline
# Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
# https://github.com/ytdl-org/youtube-dl/pull/14844
fragments = []
segment_duration = float_or_none(
representation_ms_info['segment_duration'],
representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None
for segment_url in representation_ms_info['segment_urls']:
fragment = {
location_key(segment_url): segment_url,
}
if segment_duration:
fragment['duration'] = segment_duration
fragments.append(fragment)
representation_ms_info['fragments'] = fragments
# If there is a fragments key available then we correctly recognized fragmented media.
# Otherwise we will assume unfragmented media with direct access. Technically, such
# assumption is not necessarily correct since we may simply have no support for
# some forms of fragmented media renditions yet, but for now we'll use this fallback.
if 'fragments' in representation_ms_info:
f.update({
# NB: mpd_url may be empty when MPD manifest is parsed from a string
'url': mpd_url or base_url,
'fragment_base_url': base_url,
'fragments': [],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url']
if not f.get('url'):
f['url'] = initialization_url
f['fragments'].append({location_key(initialization_url): initialization_url})
f['fragments'].extend(representation_ms_info['fragments'])
else:
# Assuming direct URL to unfragmented media.
f['url'] = base_url
# According to [1, 5.3.5.2, Table 7, page 35] @id of Representation
# is not necessarily unique within a Period thus formats with
# the same `format_id` are quite possible. There are numerous examples
# of such manifests (see https://github.com/ytdl-org/youtube-dl/issues/15111,
# https://github.com/ytdl-org/youtube-dl/issues/13919)
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
res = self._download_xml_handle(
ism_url, video_id,
note=note or 'Downloading ISM manifest',
errnote=errnote or 'Failed to download ISM manifest',
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return []
ism_doc, urlh = res
if ism_doc is None:
return []
return self._parse_ism_formats(ism_doc, urlh.geturl(), ism_id)
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
"""
Parse formats from ISM manifest.
References:
1. [MS-SSTR]: Smooth Streaming Protocol,
https://msdn.microsoft.com/en-us/library/ff469518.aspx
"""
if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None:
return []
duration = int(ism_doc.attrib['Duration'])
timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
formats = []
for stream in ism_doc.findall('StreamIndex'):
stream_type = stream.get('Type')
if stream_type not in ('video', 'audio'):
continue
url_pattern = stream.attrib['Url']
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
stream_name = stream.get('Name')
for track in stream.findall('QualityLevel'):
fourcc = track.get('FourCC', 'AACL' if track.get('AudioTag') == '255' else None)
# TODO: add support for WVC1 and WMAP
if fourcc not in ('H264', 'AVC1', 'AACL'):
self.report_warning('%s is not a supported codec' % fourcc)
continue
tbr = int(track.attrib['Bitrate']) // 1000
# [1] does not mention Width and Height attributes. However,
# they're often present while MaxWidth and MaxHeight are
# missing, so should be used as fallbacks
width = int_or_none(track.get('MaxWidth') or track.get('Width'))
height = int_or_none(track.get('MaxHeight') or track.get('Height'))
sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = []
fragment_ctx = {
'time': 0,
}
stream_fragments = stream.findall('c')
for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
if not fragment_ctx['duration']:
try:
next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
except IndexError:
next_fragment_time = duration
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
for _ in range(fragment_repeat):
fragments.append({
'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
'duration': fragment_ctx['duration'] / stream_timescale,
})
fragment_ctx['time'] += fragment_ctx['duration']
format_id = []
if ism_id:
format_id.append(ism_id)
if stream_name:
format_id.append(stream_name)
format_id.append(compat_str(tbr))
formats.append({
'format_id': '-'.join(format_id),
'url': ism_url,
'manifest_url': ism_url,
'ext': 'ismv' if stream_type == 'video' else 'isma',
'width': width,
'height': height,
'tbr': tbr,
'asr': sampling_rate,
'vcodec': 'none' if stream_type == 'audio' else fourcc,
'acodec': 'none' if stream_type == 'video' else fourcc,
'protocol': 'ism',
'fragments': fragments,
'_download_params': {
'duration': duration,
'timescale': stream_timescale,
'width': width or 0,
'height': height or 0,
'fourcc': fourcc,
'codec_private_data': track.get('CodecPrivateData'),
'sampling_rate': sampling_rate,
'channels': int_or_none(track.get('Channels', 2)),
'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
},
})
return formats
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None):
def absolute_url(item_url):
return urljoin(base_url, item_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
def _media_formats(src, cur_media_type, type_info={}):
full_url = absolute_url(src)
ext = type_info.get('ext') or determine_ext(full_url)
if ext == 'm3u8':
is_plain_url = False
formats = self._extract_m3u8_formats(
full_url, video_id, ext='mp4',
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
preference=preference, fatal=False)
elif ext == 'mpd':
is_plain_url = False
formats = self._extract_mpd_formats(
full_url, video_id, mpd_id=mpd_id, fatal=False)
else:
is_plain_url = True
formats = [{
'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None,
}]
return is_plain_url, formats
entries = []
# amp-video and amp-audio are very similar to their HTML5 counterparts
# so we wll include them right here (see
# https://www.ampproject.org/docs/reference/components/amp-video)
# For dl8-* tags see https://delight-vr.com/documentation/dl8-video/
_MEDIA_TAG_NAME_RE = r'(?:(?:amp|dl8(?:-live)?)-)?(video|audio)'
media_tags = [(media_tag, media_tag_name, media_type, '')
for media_tag, media_tag_name, media_type
in re.findall(r'(?s)(<(%s)[^>]*/>)' % _MEDIA_TAG_NAME_RE, webpage)]
media_tags.extend(re.findall(
# We only allow video|audio followed by a whitespace or '>'.
# Allowing more characters may end up in significant slow down (see
# https://github.com/ytdl-org/youtube-dl/issues/11979, example URL:
# http://www.porntrex.com/maps/videositemap.xml).
r'(?s)(<(?P<tag>%s)(?:\s+[^>]*)?>)(.*?)</(?P=tag)>' % _MEDIA_TAG_NAME_RE, webpage))
for media_tag, _, media_type, media_content in media_tags:
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = strip_or_none(media_attributes.get('src'))
if src:
_, formats = _media_formats(src, media_type)
media_info['formats'].extend(formats)
media_info['thumbnail'] = absolute_url(media_attributes.get('poster'))
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
s_attr = extract_attributes(source_tag)
# data-video-src and data-src are non standard but seen
# several times in the wild
src = strip_or_none(dict_get(s_attr, ('src', 'data-video-src', 'data-src')))
if not src:
continue
f = parse_content_type(s_attr.get('type'))
is_plain_url, formats = _media_formats(src, media_type, f)
if is_plain_url:
# width, height, res, label and title attributes are
# all not standard but seen several times in the wild
labels = [
s_attr.get(lbl)
for lbl in ('label', 'title')
if str_or_none(s_attr.get(lbl))
]
width = int_or_none(s_attr.get('width'))
height = (int_or_none(s_attr.get('height'))
or int_or_none(s_attr.get('res')))
if not width or not height:
for lbl in labels:
resolution = parse_resolution(lbl)
if not resolution:
continue
width = width or resolution.get('width')
height = height or resolution.get('height')
for lbl in labels:
tbr = parse_bitrate(lbl)
if tbr:
break
else:
tbr = None
f.update({
'width': width,
'height': height,
'tbr': tbr,
'format_id': s_attr.get('label') or s_attr.get('title'),
})
f.update(formats[0])
media_info['formats'].append(f)
else:
media_info['formats'].extend(formats)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind in ('subtitles', 'captions'):
src = strip_or_none(track_attributes.get('src'))
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
for f in media_info['formats']:
f.setdefault('http_headers', {})['Referer'] = base_url
if media_info['formats'] or media_info['subtitles']:
entries.append(media_info)
return entries
def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
formats = []
hdcore_sign = 'hdcore=3.7.0'
f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
hds_host = hosts.get('hds')
if hds_host:
f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
if 'hdcore=' not in f4m_url:
f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
f4m_formats = self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False)
for entry in f4m_formats:
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.extend(f4m_formats)
m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
hls_host = hosts.get('hls')
if hls_host:
m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
m3u8_formats = self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False)
formats.extend(m3u8_formats)
http_host = hosts.get('http')
if http_host and m3u8_formats and 'hdnea=' not in m3u8_url:
REPL_REGEX = r'https?://[^/]+/i/([^,]+),([^/]+),([^/]+)\.csmil/.+'
qualities = re.match(REPL_REGEX, m3u8_url).group(2).split(',')
qualities_length = len(qualities)
if len(m3u8_formats) in (qualities_length, qualities_length + 1):
i = 0
for f in m3u8_formats:
if f['vcodec'] != 'none':
for protocol in ('http', 'https'):
http_f = f.copy()
del http_f['manifest_url']
http_url = re.sub(
REPL_REGEX, protocol + r'://%s/\g<1>%s\3' % (http_host, qualities[i]), f['url'])
http_f.update({
'format_id': http_f['format_id'].replace('hls-', protocol + '-'),
'url': http_url,
'protocol': protocol,
})
formats.append(http_f)
i += 1
return formats
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
query = compat_urlparse.urlparse(url).query
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
mobj = re.search(
r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
url_base = mobj.group('url')
http_base_url = '%s%s:%s' % ('http', mobj.group('s') or '', url_base)
formats = []
def manifest_url(manifest):
m_url = '%s/%s' % (http_base_url, manifest)
if query:
m_url += '?%s' % query
return m_url
if 'm3u8' not in skip_protocols:
formats.extend(self._extract_m3u8_formats(
manifest_url('playlist.m3u8'), video_id, 'mp4',
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
if 'f4m' not in skip_protocols:
formats.extend(self._extract_f4m_formats(
manifest_url('manifest.f4m'),
video_id, f4m_id='hds', fatal=False))
if 'dash' not in skip_protocols:
formats.extend(self._extract_mpd_formats(
manifest_url('manifest.mpd'),
video_id, mpd_id='dash', fatal=False))
if re.search(r'(?:/smil:|\.smil)', url_base):
if 'smil' not in skip_protocols:
rtmp_formats = self._extract_smil_formats(
manifest_url('jwplayer.smil'),
video_id, fatal=False)
for rtmp_format in rtmp_formats:
rtsp_format = rtmp_format.copy()
rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'protocol': 'rtsp',
})
formats.extend([rtmp_format, rtsp_format])
else:
for protocol in ('rtmp', 'rtsp'):
if protocol not in skip_protocols:
formats.append({
'url': '%s:%s' % (protocol, url_base),
'format_id': protocol,
'protocol': protocol,
})
return formats
def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
mobj = re.search(
r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)(?!</script>).*?\.setup\s*\((?P<options>[^)]+)\)',
webpage)
if mobj:
try:
jwplayer_data = self._parse_json(mobj.group('options'),
video_id=video_id,
transform_source=transform_source)
except ExtractorError:
pass
else:
if isinstance(jwplayer_data, dict):
return jwplayer_data
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
return self._parse_jwplayer_data(
jwplayer_data, video_id, *args, **kwargs)
def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
# JWPlayer backward compatibility: flattened playlists
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
if 'playlist' not in jwplayer_data:
jwplayer_data = {'playlist': [jwplayer_data]}
entries = []
# JWPlayer backward compatibility: single playlist item
# https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
if not isinstance(jwplayer_data['playlist'], list):
jwplayer_data['playlist'] = [jwplayer_data['playlist']]
for video_data in jwplayer_data['playlist']:
# JWPlayer backward compatibility: flattened sources
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
if 'sources' not in video_data:
video_data['sources'] = [video_data]
this_video_id = video_id or video_data['mediaid']
formats = self._parse_jwplayer_formats(
video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
subtitles = {}
tracks = video_data.get('tracks')
if tracks and isinstance(tracks, list):
for track in tracks:
if not isinstance(track, dict):
continue
track_kind = track.get('kind')
if not track_kind or not isinstance(track_kind, compat_str):
continue
if track_kind.lower() not in ('captions', 'subtitles'):
continue
track_url = urljoin(base_url, track.get('file'))
if not track_url:
continue
subtitles.setdefault(track.get('label') or 'en', []).append({
'url': self._proto_relative_url(track_url)
})
entry = {
'id': this_video_id,
'title': unescapeHTML(video_data['title'] if require_title else video_data.get('title')),
'description': clean_html(video_data.get('description')),
'thumbnail': urljoin(base_url, self._proto_relative_url(video_data.get('image'))),
'timestamp': int_or_none(video_data.get('pubdate')),
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
'subtitles': subtitles,
}
# https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
if len(formats) == 1 and re.search(r'^(?:http|//).*(?:youtube\.com|youtu\.be)/.+', formats[0]['url']):
entry.update({
'_type': 'url_transparent',
'url': formats[0]['url'],
})
else:
self._sort_formats(formats)
entry['formats'] = formats
entries.append(entry)
if len(entries) == 1:
return entries[0]
else:
return self.playlist_result(entries)
def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
urls = []
formats = []
for source in jwplayer_sources_data:
if not isinstance(source, dict):
continue
source_url = urljoin(
base_url, self._proto_relative_url(source.get('file')))
if not source_url or source_url in urls:
continue
urls.append(source_url)
source_type = source.get('type') or ''
ext = mimetype2ext(source_type) or determine_ext(source_url)
if source_type == 'hls' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=m3u8_id, fatal=False))
elif source_type == 'dash' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
source_url, video_id, mpd_id=mpd_id, fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
source_url, video_id, fatal=False))
# https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
elif source_type.startswith('audio') or ext in (
'oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
formats.append({
'url': source_url,
'vcodec': 'none',
'ext': ext,
})
else:
height = int_or_none(source.get('height'))
if height is None:
# Often no height is provided but there is a label in
# format like "1080p", "720p SD", or 1080.
height = int_or_none(self._search_regex(
r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''),
'height', default=None))
a_format = {
'url': source_url,
'width': int_or_none(source.get('width')),
'height': height,
'tbr': int_or_none(source.get('bitrate')),
'ext': ext,
}
if source_url.startswith('rtmp'):
a_format['ext'] = 'flv'
# See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
# of jwplayer.flash.swf
rtmp_url_parts = re.split(
r'((?:mp4|mp3|flv):)', source_url, 1)
if len(rtmp_url_parts) == 3:
rtmp_url, prefix, play_path = rtmp_url_parts
a_format.update({
'url': rtmp_url,
'play_path': prefix + play_path,
})
if rtmp_params:
a_format.update(rtmp_params)
formats.append(a_format)
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
path='/', secure=False, discard=False, rest={}, **kwargs):
cookie = compat_cookiejar_Cookie(
0, name, value, port, port is not None, domain, True,
domain.startswith('.'), path, True, secure, expire_time,
discard, None, None, rest)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def _apply_first_set_cookie_header(self, url_handle, cookie):
"""
Apply first Set-Cookie header instead of the last. Experimental.
Some sites (e.g. [1-3]) may serve two cookies under the same name
in Set-Cookie header and expect the first (old) one to be set rather
than second (new). However, as of RFC6265 the newer one cookie
should be set into cookie store what actually happens.
We will workaround this issue by resetting the cookie to
the first one manually.
1. https://new.vk.com/
2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201
3. https://learning.oreilly.com/
"""
for header, cookies in url_handle.headers.items():
if header.lower() != 'set-cookie':
continue
if sys.version_info[0] >= 3:
cookies = cookies.encode('iso-8859-1')
cookies = cookies.decode('utf-8')
cookie_value = re.search(
r'%s=(.+?);.*?\b[Dd]omain=(.+?)(?:[,;]|$)' % cookie, cookies)
if cookie_value:
value, domain = cookie_value.groups()
self._set_cookie(domain, cookie, value)
break
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False)
or self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False)
or self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False)
and (self._get_login_info()[0] is not None
or self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
def _generic_id(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
def _generic_title(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
|
spvkgn/youtube-dl
|
youtube_dl/extractor/common.py
|
Python
|
unlicense
| 143,174
|
[
"VisIt"
] |
a948277513fbf7c34a9be0d156d137630433f1b849caef8bc75dab80b90ace00
|
# -*- coding: utf-8 -*-
################################
######## Red - Discord bot #####
################################
# made by Twentysix
#
#
import discord
import logging
import time
import datetime
import requests
import aiohttp
import traceback
import re
import youtube_dl
import os
import asyncio
import glob
from os import path
from random import choice, randint, shuffle
import dataIO #IO settings, proverbs, etc
import economy #Credits
import youtubeparser
from sys import modules
#settings = {"PREFIX" : "!"} #prevents boot error
def loadHelp():
global help, audio_help, meme_help, admin_help, trivia_help
help = """**Commands list:**
{0}flip - Flip a coin
{0}rps [rock or paper o scissors] - Play rock paper scissors
{0}proverb
{0}choose option1 or option2 or option3 (...) - Random choice
{0}8 [question] - Ask 8 ball
{0}sw - Start/stop the stopwatch
{0}avatar [name or mention] - Shows user's avatar
{0}trivia start - Start a trivia session
{0}trivia stop - Stop a trivia session
{0}twitch [stream] - Check if stream is online
{0}twitchalert [stream] - Whenever the stream is online the bot will send an alert in the channel (admin only)
{0}stoptwitchalert [stream] - Stop sending alerts about the specified stream in the channel (admin only)
{0}roll [number] - Random number between 0 and [number]
{0}gif [text] - GIF search
{0}imdb - Retrieves a movie's information from IMDB using its title
{0}urban [text] - Search definitions in the urban dictionary
{0}meme [ID;Text1;Text2] - Create a meme
{0}imdb [search terms] - Search on IMDB
{0}customcommands - Custom commands' list
{0}addcom [command] [text] - Add a custom command
{0}editcom [command] [text] - Edit a custom command
{0}delcom [command] - Delete a custom command
{0}meme help - Memes help
{0}audio help - Audio related commands
{0}economy - Economy explanation, if available
{0}trivia - Trivia commands and lists
""".format(settings["PREFIX"])
audio_help = """
**General audio help commands:**
{0}next or {0}skip - Next song
{0}prev - Previous song
{0}pause - Pause song
{0}resume - Resume song
{0}repeat or {0}replay - Replay current song
{0}title or {0}song - Current song's title + link
{0}youtube [link] - Play a youtube video in a voice channel
{0}sing - Make Red sing
{0}stop - Stop any voice channel activity
{0}volume [0-1] - Sets the volume
{0}downloadmode - Disables/enables download mode (admin only)
**Playlist commands:**
{0}play [playlist_name] - Play chosen playlist
{0}playlists - Playlists' list
{0}shuffle - Mix music list
{0}addplaylist [name] [link] - Add a youtube playlist. Link format example: https://www.youtube.com/playlist?list=PLe8jmEHFkvsaDOOWcREvkgFoj6MD0pXXX
{0}delplaylist [name] - Delete a youtube playlist. Limited to author and admins.
{0}getplaylist - Receive the current playlist through DM. This also works with favorites.
**Local commands:**
{0}local [playlist_name] - Play chosen local playlist
{0}locallist or {0}local or {0}locals - Local playlists' list
**Favorites:**
{0}addfavorite - Add song to your favorites
{0}delfavorite - Remove song from your favorites
{0}playfavorites - Play your favorites
**You can submit your own playlist by doing the following:**
1) Make a txt file. Name must be only letters, numbers and underscores. It will be your playlist's name, so choose wisely.
2) One youtube link each line.
3) Send me the txt. If any line is incorrect I will reject it.
4) Listen to it with {0}play [playlist_name]!
""".format(settings["PREFIX"])
meme_help = """
Usage example:
One-Does-Not-Simply Template ID: 61579
{0}meme 61579;Test;Test
Memes list:
ID Name
61579 One Does Not Simply
438680 Batman Slapping Robin
61532 The Most Interesting Man In The World
101470 Ancient Aliens
61520 Futurama Fry
347390 X, X Everywhere
5496396 Leonardo Dicaprio Cheers
61539 First World Problems
61546 Brace Yourselves X is Coming
16464531 But Thats None Of My Business
61582 Creepy Condescending Wonka
61585 Bad Luck Brian
563423 That Would Be Great
61544 Success Kid
405658 Grumpy Cat
101288 Third World Skeptical Kid
8072285 Doge
100947 Matrix Morpheus
1509839 Captain Picard Facepalm
61533 X All The Y
1035805 Boardroom Meeting Suggestion
245898 Picard Wtf
21735 The Rock Driving
259680 Am I The Only One Around Here
14230520 Black Girl Wat
40945639 Dr Evil Laser
235589 Evil Toddler
61580 Too Damn High
61516 Philosoraptor
6235864 Finding Neverland
9440985 Face You Make Robert Downey Jr
101287 Third World Success Kid
100955 Confession Bear
444501 The lie detector determined that was a lie. The fact that you X determined that was a lie. Maury Povich.
97984 Disaster Girl
442575 Aint Nobody Got Time For That
109765 Ill Just Wait Here
124212 Say That Again I Dare You
28251713 Oprah You Get A
61556 Grandma Finds The Internet
101440 10 Guy
101711 Skeptical Baby
101716 Yo Dawg Heard You
101511 Dont You Squidward
For more memes: `https://imgflip.com/memetemplates`
Choose a meme, click on "Blank Template" then add the ID
""".format(settings["PREFIX"])
admin_help = """
**Admin commands:**
{0}addwords [word1 word2 (...)] [phrase/with/many/words] - Add words to message filter
{0}removewords [word1 word2 (...)] [phrase/with/many/words] - Remove words from message filter
{0}addregex [regex] - Add regular expression to message filter
{0}removeregex [regex] - Remove regular expression from message filter
{0}shutdown - Shutdown the bot
{0}join [invite] - Join another server
{0}leaveserver - Leave server
{0}shush - Ignore the current channel
{0}talk - Stop ignoring the current channel
{0}reload - Reload most files. Useful in case of manual edits
{0}name [name] - Change the bot's name
{0}cleanup [number] - Delete the last [number] messages
{0}cleanup [name/mention] [number] - Delete the last [number] of messages by [name]
{0}blacklist [name/mention] - Add user to Red's blacklist
{0}forgive [name/mention] - Removes user from Red's blacklist
{0}setting [setting] [value] - Modify setting
""".format(settings["PREFIX"])
trivia_help = """
**Trivia commands:**
{0}trivia - Trivia questions lists and help
{0}trivia [name] - Starts trivia session with specified list
{0}trivia random - Starts trivia session with random list
{0}trivia stop - Stop trivia session
""".format(settings["PREFIX"])
youtube_dl_options = {
'format': 'bestaudio/best',
'extractaudio': True,
'audioformat': "mp3",
'outtmpl': '%(id)s',
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': True,
'quiet': True,
'no_warnings': True,
'outtmpl': "cache/%(id)s"}
client = discord.Client()
if not discord.opus.is_loaded():
discord.opus.load_opus('libopus-0.dll')
@client.async_event
async def on_message(message):
global trivia_sessions
p = settings["PREFIX"]
await gameSwitcher.changeGame()
if message.author.id in blacklisted_users and not isMemberAdmin(message):
return False
if message.channel.is_private and message.attachments != []:
await transferPlaylist(message)
if not message.channel.is_private and message.author.id != client.user.id:
if settings["FILTER"] and not isMemberAdmin(message):
if await checkFilter(message) or await checkRegex(message):
return False #exits without checking for commands
if message.channel.id in shush_list and message.content == p + "talk":
await talk(message)
if message.channel.id not in shush_list:
if message.content == client.user.name.upper() or message.content == client.user.name.upper() + "?":
await client.send_message(message.channel, "`" + choice(greetings_caps) + "`")
elif message.content.lower() == client.user.name.lower() + "?":
await client.send_message(message.channel, "`" + choice(greetings) + "`")
elif message.content == client.user.mention + " ?" or message.content == client.user.mention + "?":
await client.send_message(message.channel, "`" + choice(greetings) + "`")
elif message.content == p + "flip":
await client.send_message(message.channel, "*flips a coin and... " + choice(["HEADS!*", "TAILS!*"]))
elif message.content.startswith(p + "rps"):
await rpsgame(message)
elif message.content == p + "proverb":
await client.send_message(message.channel, "`" + choice(proverbs) + "`")
elif message.content == p + "help":
await client.send_message(message.author, help)
await client.send_message(message.channel, "{} `Check your DMs for the command list.`".format(message.author.mention))
elif message.content.startswith(p + 'choose'):
await randomchoice(message)
elif message.content.startswith(p + '8 ') and message.content.endswith("?") and len(message.content) > 5:
await client.send_message(message.channel, "{}: ".format(message.author.mention) + "`" + choice(ball) + "`")
elif message.content.startswith(p + 'roll'):
await roll(message)
elif message.content.startswith(p + 'addcom'):
await addcom(message)
elif message.content.startswith(p + 'editcom'):
await editcom(message)
elif message.content.startswith(p + 'delcom'):
await delcom(message)
elif message.content == p + "customcommands":
await listCustomCommands(message)
elif message.content.startswith(p + 'sw'):
await stopwatch(message)
elif message.content.startswith(p + 'id'):
await client.send_message(message.channel, "{} `Your id is {}`".format(message.author.mention, message.author.id))
elif message.content.startswith(p + 'twitchalert'):
await addTwitchAlert(message)
elif message.content.startswith(p + 'stoptwitchalert'):
await removeTwitchAlert(message)
elif message.content.startswith(p + 'twitch'):
await twitchCheck(message)
elif message.content.startswith(p + 'image'):
#image(message)
pass
elif message.content.startswith(p + 'gif'):
await gif(message)
elif message.content.startswith(p + 'imdb'):
await imdb(message)
elif message.content.startswith(p + 'urban'):
await urban(message)
elif message.content.startswith(p + 'uptime'):
await uptime(message)
elif message.content.startswith(p + 'avatar'):
await avatar(message)
elif message.content == p + 'meme help' or message.content == p + 'memes':
await client.send_message(message.author, meme_help)
await client.send_message(message.channel, "{} `Check your DMs for " + p +"meme help.`".format(message.author.mention))
elif message.content.startswith (p + 'meme'):
await memes(message)
elif message.content.startswith (p + 'lmgtfy'):
await lmgtfy(message)
################## music #######################
elif message.content == p + "sing":
await playPlaylist(message, sing=True)
elif message.content.startswith(p + 'playyoutube'):
await playVideo(message)
elif message.content.startswith(p + 'play '):
await playPlaylist(message)
elif message.content.startswith(p + 'local '):
await playLocal(message)
elif message.content == p + "local" or message.content == p + "locallist" or message.content == p + "locals":
await listLocal(message)
await client.send_message(message.channel, "{} `Check your DMs for the local playlists list.`".format(message.author.mention))
elif message.content == p + "stop":
await leaveVoice()
elif message.content == p + "playlist" or message.content == p + "playlists":
await listPlaylists(message)
await client.send_message(message.channel, "{} `Check your DMs for the playlists list.`".format(message.author.mention))
elif message.content == p + "skip" or message.content == p + "next":
if currentPlaylist: currentPlaylist.nextSong(currentPlaylist.getNextSong())
elif message.content == p + "prev" or message.content == p + "previous":
if currentPlaylist: currentPlaylist.nextSong(currentPlaylist.getPreviousSong())
elif message.content == p + "repeat" or message.content == p + "replay":
if currentPlaylist: currentPlaylist.nextSong(currentPlaylist.current)
elif message.content == p + "pause":
if currentPlaylist: currentPlaylist.pause()
elif message.content == p + "resume":
if currentPlaylist: currentPlaylist.resume()
elif message.content == p + "shuffle":
if currentPlaylist: currentPlaylist.shuffle()
elif message.content == p + "song" or message.content == p + "title" :
if currentPlaylist: await getSongTitle(message)
elif message.content == p + "audio help":
await client.send_message(message.author, audio_help)
await client.send_message(message.channel, "{} `Check your DMs for the audio help.`".format(message.author.mention))
elif message.content.startswith(p + "addplaylist"):
await addPlaylist(message)
elif message.content.startswith(p + "delplaylist"):
await delPlaylist(message)
elif message.content == p + "addfavorite":
await addToFavorites(message)
elif message.content == p + "delfavorite":
await removeFromFavorites(message)
elif message.content == p + "playfavorites":
await playFavorites(message)
elif message.content == p + "getplaylist":
await sendPlaylist(message)
elif message.content.startswith(p + "volume"):
await setVolume(message)
elif message.content == p + "downloadmode":
await downloadMode(message)
elif message.content == p + "endpoll":
await endPoll(message)
elif message.content.startswith(p + "poll"):
await startPoll(message)
################################################
elif message.content == p + "trivia":
await triviaList(message)
elif message.content.startswith(p + "trivia"):
if checkAuth("Trivia", message, settings):
if message.content == p + "trivia stop":
if getTriviabyChannel(message.channel):
await getTriviabyChannel(message.channel).endGame()
await client.send_message(message.channel, "`Trivia stopped.`")
else:
await client.send_message(message.channel, "`There's no trivia session ongoing in this channel.`")
elif not getTriviabyChannel(message.channel):
t = Trivia(message)
trivia_sessions.append(t)
await t.loadQuestions(message.content)
else:
await client.send_message(message.channel, "`A trivia session is already ongoing in this channel.`")
else:
await client.send_message(message.channel, "`Trivia is currently admin-only.`")
######## Admin commands #######################
elif message.content.startswith(p + 'addwords'):
await addBadWords(message)
elif message.content.startswith(p + 'removewords'):
await removeBadWords(message)
elif message.content.startswith(p + 'addregex ') and len(message.content) > 11:
await addRegex(message)
elif message.content.startswith(p + 'removeregex ') and len(message.content) > 14:
await removeRegex(message)
elif message.content == p + "shutdown":
await shutdown(message)
elif message.content.startswith(p + 'join'):
await join(message)
elif message.content == p + "leaveserver":
await leave(message)
elif message.content == p + "shush":
await shush(message)
elif message.content == p + "talk": #prevents !talk custom command
pass
elif message.content == p + "reload":
await reloadSettings(message)
elif message.content.startswith(p + "name"):
await changeName(message)
elif message.content.startswith(p + "cleanup"):
await cleanup(message)
elif message.content == p + "admin help":
if isMemberAdmin(message):
await client.send_message(message.author, admin_help)
else:
await client.send_message(message.channel, "`Admin status required.`")
elif message.content.startswith(p + "debug"):
await debug(message)
elif message.content.startswith(p + "exec"):
await execFunc(message)
elif message.content.startswith(p + "blacklist"):
await blacklist(message, "add")
elif message.content.startswith(p + "forgive"):
await blacklist(message, "remove")
elif message.content.startswith(p + "setting"):
await modifySettings(message)
###################################
elif getTriviabyChannel(message.channel): #check if trivia is ongoing in the channel
trvsession = getTriviabyChannel(message.channel)
await trvsession.checkAnswer(message)
elif "economy" in modules:
await economy.checkCommands(message)
if getPollByChannel(message):
getPollByChannel(message).checkAnswer(message)
if message.content.startswith(p) and len(message.content) > 2 and settings["CUSTOMCOMMANDS"]:
await customCommand(message)
@client.async_event
async def on_ready():
logger.info("I'm online " + "(" + client.user.id + ")")
await gameSwitcher.changeGame(now=True)
# cns = threading.Thread(target=console, args=[])
# cns.start() # console, WIP
@client.async_event
def on_message_delete(message):
# WIP. Need to check for permissions
#await client.send_message(message.channel, "{} `I have deleted your message.`".format(message.author.mention))
pass
@client.async_event
async def on_message_edit(before, message):
if message.author.id != client.user.id and settings["FILTER"] and not isMemberAdmin(message) and not message.channel.is_private:
await checkFilter(message)
await checkRegex(message)
def loggerSetup():
#api wrapper
logger = logging.getLogger('discord')
logger.setLevel(logging.WARNING)
handler = logging.FileHandler(filename='wrapper.log', encoding='utf-8', mode='a')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s', datefmt="[%d/%m/%Y %H:%M]"))
logger.addHandler(handler)
#Red
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s', datefmt="[%d/%m/%Y %H:%M]"))
logger.addHandler(handler)
file_handler = logging.FileHandler(filename="red.log", mode='a')
file_formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s', datefmt="[%d/%m/%Y %H:%M]")
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
return logger
class Trivia():
def __init__(self, message):
self.gaveAnswer = ["I know this one! {}!", "Easy: {}.", "Oh really? It's {} of course."]
self.currentQ = None # {"QUESTION" : "String", "ANSWERS" : []}
self.questionList = ""
self.channel = message.channel
logger.info("Trivia started in channel " + self.channel.id)
self.scoreList = {}
self.status = None
self.timer = None
self.count = 0
async def loadQuestions(self, msg):
msg = msg.split(" ")
if len(msg) == 2:
_, qlist = msg
if qlist == "random":
chosenList = choice(glob.glob("trivia/*.txt"))
self.questionList = self.loadList(chosenList)
self.status = "new question"
self.timeout = time.perf_counter()
if self.questionList: await self.newQuestion()
else:
if os.path.isfile("trivia/" + qlist + ".txt"):
self.questionList = self.loadList("trivia/" + qlist + ".txt")
self.status = "new question"
self.timeout = time.perf_counter()
if self.questionList: await self.newQuestion()
else:
await client.send_message(self.channel, "`There is no list with that name.`")
await self.stopTrivia()
else:
await client.send_message(self.channel, "`" + settings["PREFIX"] + "trivia [list name]`")
async def stopTrivia(self):
global trivia_sessions
self.status = "stop"
trivia_sessions.remove(self)
logger.info("Trivia stopped in channel " + self.channel.id)
async def endGame(self):
global trivia_sessions
self.status = "stop"
if self.scoreList:
await self.sendTable()
trivia_sessions.remove(self)
logger.info("Trivia stopped in channel " + self.channel.id)
def loadList(self, qlist):
with open(qlist, "r", encoding="utf-8") as f:
qlist = f.readlines()
parsedList = []
for line in qlist:
if "`" in line and len(line) > 4:
line = line.replace("\n", "")
line = line.split("`")
question = line[0]
answers = []
for l in line[1:]:
answers.append(l.lower())
if len(line) >= 2:
line = {"QUESTION" : question, "ANSWERS": answers} #string, list
parsedList.append(line)
if parsedList != []:
return parsedList
else:
self.stopTrivia()
return None
async def newQuestion(self):
for score in self.scoreList.values():
if score == settings["TRIVIA_MAX_SCORE"]:
await self.endGame()
return True
if self.questionList == []:
await self.endGame()
return True
self.currentQ = choice(self.questionList)
self.questionList.remove(self.currentQ)
self.status = "waiting for answer"
self.count += 1
self.timer = int(time.perf_counter())
await client.send_message(self.channel, "**Question number {}!**\n\n{}".format(str(self.count), self.currentQ["QUESTION"]))
while self.status != "correct answer" and abs(self.timer - int(time.perf_counter())) <= settings["TRIVIA_DELAY"]:
if abs(self.timeout - int(time.perf_counter())) >= settings["TRIVIA_TIMEOUT"]:
await client.send_message(self.channel, "Guys...? Well, I guess I'll stop then.")
await self.stopTrivia()
return True
await asyncio.sleep(1) #Waiting for an answer or for the time limit
if self.status == "correct answer":
self.status = "new question"
await asyncio.sleep(3)
if not self.status == "stop":
await self.newQuestion()
elif self.status == "stop":
return True
else:
msg = choice(self.gaveAnswer).format(self.currentQ["ANSWERS"][0])
if settings["TRIVIA_BOT_PLAYS"]:
msg += " **+1** for me!"
self.addPoint(client.user.name)
self.currentQ["ANSWERS"] = []
await client.send_message(self.channel, msg)
await client.send_typing(self.channel)
await asyncio.sleep(3)
if not self.status == "stop":
await self.newQuestion()
async def sendTable(self):
self.scoreList = sorted(self.scoreList.items(), reverse=True, key=lambda x: x[1]) # orders score from lower to higher
t = "```Scores: \n\n"
for score in self.scoreList:
t += score[0] # name
t += "\t"
t += str(score[1]) # score
t += "\n"
t += "```"
await client.send_message(self.channel, t)
async def checkAnswer(self, message):
self.timeout = time.perf_counter()
for answer in self.currentQ["ANSWERS"]:
if answer in message.content.lower():
self.currentQ["ANSWERS"] = []
self.status = "correct answer"
self.addPoint(message.author.name)
await client.send_message(self.channel, "You got it {}! **+1** to you!".format(message.author.name))
await client.send_typing(self.channel)
return True
def addPoint(self, user):
if user in self.scoreList:
self.scoreList[user] += 1
else:
self.scoreList[user] = 1
def getTriviaQuestion(self):
q = choice(list(trivia_questions.keys()))
return q, trivia_questions[q] # question, answer
class botPlays():
def __init__(self):
self.games = dataIO.fileIO("json/games.json", "load")
self.lastChanged = int(time.perf_counter())
self.delay = 300
async def changeGame(self, now=False):
if abs(self.lastChanged - int(time.perf_counter())) >= self.delay or now:
self.lastChanged = int(time.perf_counter())
await client.change_status(discord.Game(name=choice(self.games)))
class Playlist():
def __init__(self, filename=None): #a playlist with a single song is just there to make !addfavorite work with !youtube command
self.filename = filename
self.current = 0
self.stop = False
self.lastAction = 999
self.currentTitle = ""
self.type = filename["type"]
if filename["type"] == "playlist":
self.playlist = dataIO.fileIO("playlists/" + filename["filename"] + ".txt", "load")["playlist"]
elif filename["type"] == "favorites":
self.playlist = dataIO.fileIO("favorites/" + filename["filename"] + ".txt", "load")
elif filename["type"] == "local":
self.playlist = filename["filename"]
elif filename["type"] == "singleSong":
self.playlist = [filename["filename"]]
self.playSingleSong(self.playlist[0])
else:
raise("Invalid playlist call.")
if filename["type"] != "singleSong":
self.nextSong(0)
def nextSong(self, nextTrack, lastError=False):
global musicPlayer
if not self.passedTime() < 1 and not self.stop: #direct control
if musicPlayer: musicPlayer.stop()
self.lastAction = int(time.perf_counter())
try:
if isPlaylistValid([self.playlist[nextTrack]]): #Checks if it's a valid youtube link
if settings["DOWNLOADMODE"]:
path = self.getVideo(self.playlist[nextTrack])
try:
logger.info("Starting track...")
musicPlayer = client.voice.create_ffmpeg_player("cache/" + path, options='''-filter:a "volume={}"'''.format(settings["VOLUME"]))
musicPlayer.start()
except:
logger.warning("Something went wrong with track " + self.playlist[self.current])
if not lastError: #prevents error loop
self.lastAction = 999
self.nextSong(self.getNextSong(), lastError=True)
else: #Stream mode. Buggy.
musicPlayer = client.voice.create_ytdl_player(self.playlist[nextTrack], options=youtube_dl_options)
musicPlayer.start()
else: # must be a local playlist then
musicPlayer = client.voice.create_ffmpeg_player(self.playlist[nextTrack], options='''-filter:a "volume={}"'''.format(settings["VOLUME"]))
musicPlayer.start()
except Exception as e:
logger.warning("Something went wrong with track " + self.playlist[self.current])
if not lastError: #prevents error loop
self.lastAction = 999
self.nextSong(self.getNextSong(), lastError=True)
def getVideo(self, url):
try:
yt = youtube_dl.YoutubeDL(youtube_dl_options)
v = yt.extract_info(url, download=False)
if not os.path.isfile("cache/" + v["id"]):
logger.info("Track not in cache, downloading...")
v = yt.extract_info(url, download=True)
self.currentTitle = v["title"]
return v["id"]
except Exception as e:
logger.error(e)
return False
def playSingleSong(self, url):
global musicPlayer
if settings["DOWNLOADMODE"]:
v = self.getVideo(url)
if musicPlayer:
if musicPlayer.is_playing():
musicPlayer.stop()
if v:
musicPlayer = client.voice.create_ffmpeg_player("cache/" + v, options='''-filter:a "volume={}"'''.format(settings["VOLUME"]))
musicPlayer.start()
else:
if musicPlayer:
if musicPlayer.is_playing():
musicPlayer.stop()
musicPlayer = client.voice.create_ytdl_player(self.playlist[0], options=youtube_dl_options)
musicPlayer.start()
async def songSwitcher(self):
while not self.stop:
if musicPlayer.is_done() and not self.stop:
self.nextSong(self.getNextSong())
await asyncio.sleep(0.5)
def passedTime(self):
return abs(self.lastAction - int(time.perf_counter()))
def getPreviousSong(self):
try:
song = self.playlist[self.current-1]
self.current -= 1
return self.current
except: #if the current song was the first song, returns the last in the playlist
song = self.playlist[len(self.current)-1]
self.current -= 1
return self.current
def getNextSong(self):
try:
song = self.playlist[self.current+1]
self.current += 1
return self.current
except: #if the current song was the last song, returns the first in the playlist
song = self.playlist[0]
self.current = 0
return self.current
def pause(self):
if musicPlayer.is_playing() and not self.stop:
musicPlayer.pause()
def resume(self):
if not self.stop:
musicPlayer.resume()
def shuffle(self):
if not self.stop:
shuffle(self.playlist)
class Poll():
def __init__(self, message):
self.channel = message.channel
self.author = message.author.id
msg = message.content[6:]
msg = msg.split(";")
if len(msg) < 2: # Needs at least one question and 2 choices
self.valid = False
return None
else:
self.valid = True
self.already_voted = []
self.question = msg[0]
msg.remove(self.question)
self.answers = {}
i = 1
for answer in msg: # {id : {answer, votes}}
self.answers[i] = {"ANSWER" : answer, "VOTES" : 0}
i += 1
async def start(self):
msg = "**POLL STARTED!**\n\n{}\n\n".format(self.question)
for id, data in self.answers.items():
msg += "{}. *{}*\n".format(id, data["ANSWER"])
msg += "\nType the number to vote!"
await client.send_message(self.channel, msg)
await asyncio.sleep(settings["POLL_DURATION"])
if self.valid:
await self.endPoll()
async def endPoll(self):
global poll_sessions
self.valid = False
msg = "**POLL ENDED!**\n\n{}\n\n".format(self.question)
for data in self.answers.values():
msg += "*{}* - {} votes\n".format(data["ANSWER"], str(data["VOTES"]))
await client.send_message(self.channel, msg)
poll_sessions.remove(self)
def checkAnswer(self, message):
try:
i = int(message.content)
if i in self.answers.keys():
if message.author.id not in self.already_voted:
data = self.answers[i]
data["VOTES"] += 1
self.answers[i] = data
self.already_voted.append(message.author.id)
except ValueError:
pass
async def startPoll(message):
global poll_sessions
if not getPollByChannel(message):
p = Poll(message)
if p.valid:
poll_sessions.append(p)
await p.start()
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "poll question;option1;option2 (...)`")
else:
await client.send_message(message.channel, "`A poll is already ongoing in this channel.`")
async def endPoll(message):
global poll_sessions
if getPollByChannel(message):
p = getPollByChannel(message)
if p.author == message.author.id or isMemberAdmin(message):
await getPollByChannel(message).endPoll()
else:
await client.send_message(message.channel, "`Only admins and the author can stop the poll.`")
else:
await client.send_message(message.channel, "`There's no poll ongoing in this channel.`")
def getPollByChannel(message):
for poll in poll_sessions:
if poll.channel == message.channel:
return poll
return False
async def addcom(message):
if checkAuth("ModifyCommands", message, settings):
msg = message.content.split()
if len(msg) > 2:
msg = message.content[8:] # removes !addcom
newcmd = msg[:msg.find(" ")] # extracts custom command
customtext = msg[msg.find(" ") + 1:] # extracts [text]
if len(newcmd) > 1 and newcmd.find(" ") == -1:
if not message.channel.server.id in commands:
commands[message.channel.server.id] = {}
cmdlist = commands[message.channel.server.id]
if newcmd not in cmdlist:
cmdlist[newcmd] = customtext
commands[message.channel.server.id] = cmdlist
dataIO.fileIO("json/commands.json", "save", commands)
logger.info("Saved commands database.")
await client.send_message(message.channel, "`Custom command successfully added.`")
else:
await client.send_message(message.channel, "`This command already exists. Use " + settings["PREFIX"] + "editcom [command] [text]`")
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "addcom [command] [text]`")
else:
await client.send_message(message.channel, "`You don't have permissions to edit custom commands.`")
async def editcom(message):
if checkAuth("ModifyCommands", message, settings):
msg = message.content.split()
if len(msg) > 2:
msg = message.content[9:] # removes !editcom
cmd = msg[:msg.find(" ")] # extracts custom command
customtext = msg[msg.find(" ") + 1:] # extracts [text]
if message.channel.server.id in commands:
cmdlist = commands[message.channel.server.id]
if cmd in cmdlist:
cmdlist[cmd] = customtext
commands[message.channel.server.id] = cmdlist
dataIO.fileIO("json/commands.json", "save", commands)
logger.info("Saved commands database.")
await client.send_message(message.channel, "`Custom command successfully edited.`")
else:
await client.send_message(message.channel, "`That command doesn't exist. Use " + settings["PREFIX"] + "addcom [command] [text]`")
else:
await client.send_message(message.channel, "`There are no custom commands in this server. Use " + settings["PREFIX"] + "addcom [command] [text]`")
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "editcom [command] [text]`")
else:
await client.send_message(message.channel, "`You don't have permissions to edit custom commands.`")
async def delcom(message):
if checkAuth("ModifyCommands", message, settings):
msg = message.content.split()
if len(msg) == 2:
if message.channel.server.id in commands:
cmdlist = commands[message.channel.server.id]
if msg[1] in cmdlist:
cmdlist.pop(msg[1], None)
commands[message.channel.server.id] = cmdlist
dataIO.fileIO("json/commands.json", "save", commands)
logger.info("Saved commands database.")
await client.send_message(message.channel, "`Custom command successfully deleted.`")
else:
await client.send_message(message.channel, "`That command doesn't exist.`")
else:
await client.send_message(message.channel, "`There are no custom commands in this server. Use " + settings["PREFIX"] + "addcom [command] [text]`")
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "delcom [command]`")
else:
await client.send_message(message.channel, "`You don't have permissions to edit custom commands.`")
async def listCustomCommands(message):
msg = "Custom commands: \n\n```"
if message.channel.server.id in commands:
cmds = commands[message.channel.server.id].keys()
if cmds:
for i, d in enumerate(cmds):
if i % 4 == 0 and i != 0:
msg = msg + d + "\n"
else:
msg = msg + d + "\t"
msg += "```"
await client.send_message(message.author, msg)
else:
await client.send_message(message.author, "There are no custom commands.")
else:
await client.send_message(message.author, "There are no custom commands.")
def checkAuth(cmd, message, settings): #checks if those settings are on. If they are, it checks if the user is a owner
if cmd == "ModifyCommands":
if settings["EDIT_CC_ADMIN_ONLY"]:
if isMemberAdmin(message):
return True
else:
return False
else:
return True
elif cmd == "Trivia":
if settings["TRIVIA_ADMIN_ONLY"]:
if isMemberAdmin(message):
return True
else:
return False
else:
return True
else:
logger.error("Invalid call to checkAuth")
return False
async def rpsgame(message):
rps = {"rock" : ":moyai:",
"paper": ":page_facing_up:",
"scissors":":scissors:"
}
msg = message.content.lower().split(" ")
if len(msg) == 2:
_, userchoice = msg
if userchoice in rps.keys():
botchoice = choice(list(rps.keys()))
msgs = {
"win": " You win {}!".format(message.author.mention),
"square": " We're square {}!".format(message.author.mention),
"lose": " You lose {}!".format(message.author.mention)
}
if userchoice == botchoice:
await client.send_message(message.channel, rps[botchoice] + msgs["square"])
elif userchoice == "rock" and botchoice == "paper":
await client.send_message(message.channel, rps[botchoice] + msgs["lose"])
elif userchoice == "rock" and botchoice == "scissors":
await client.send_message(message.channel, rps[botchoice] + msgs["win"])
elif userchoice == "paper" and botchoice == "rock":
await client.send_message(message.channel, rps[botchoice] + msgs["win"])
elif userchoice == "paper" and botchoice == "scissors":
await client.send_message(message.channel, rps[botchoice] + msgs["lose"])
elif userchoice == "scissors" and botchoice == "rock":
await client.send_message(message.channel, rps[botchoice] + msgs["lose"])
elif userchoice == "scissors" and botchoice == "paper":
await client.send_message(message.channel, rps[botchoice] + msgs["win"])
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "rps [rock or paper or scissors]`")
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "rps [rock or paper or scissors]`")
async def randomchoice(message):
sentences = ["Mmm... I think I'll choose ", "I choose ", "I prefer ", "This one is best: ", "This: "]
msg = message.content[8:] # removes !choose
msg = msg.split(" or ")
if len(msg) == 1:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "choose option1 or option2 or option3 (...)`")
elif len(msg) >= 2:
await client.send_message(message.channel, "`" + choice(sentences) + choice(msg) + "`")
else:
await client.send_message(message.channel, "`The options must be at least two.`")
async def stopwatch(message):
global stopwatches
if message.author.id in stopwatches:
tmp = abs(stopwatches[message.author.id] - int(time.perf_counter()))
tmp = str(datetime.timedelta(seconds=tmp))
await client.send_message(message.channel, "`Stopwatch stopped! Time: " + str(tmp) + " `")
stopwatches.pop(message.author.id, None)
else:
stopwatches[message.author.id] = int(time.perf_counter())
await client.send_message(message.channel, "`Stopwatch started! Use " + settings["PREFIX"] + "sw to stop it.`")
"""
async def image(message): # API's dead.
msg = message.content.split()
if len(msg) > 1:
if len(msg[1]) > 1 and len([msg[1]]) < 20:
try:
msg.remove(msg[0])
msg = "+".join(msg)
search = "http://ajax.googleapis.com/ajax/services/search/images?v=1.0&q=" + msg + "&start=0"
result = requests.get(search).json()
url = result["responseData"]["results"][0]["url"]
await client.send_message(message.channel, url)
except:
await client.send_message(message.channel, "Error.")
else:
await client.send_message(message.channel, "Invalid search.")
else:
await client.send_message(message.channel, "!image [text]")
"""
async def imdb(message): # Method added by BananaWaffles.
msg = message.content.split()
if apis["MYAPIFILMS_TOKEN"] == "TOKENHERE":
await client.send_message(message.channel, "`This command wasn't configured properly. If you're the owner, edit json/apis.json`")
return False
if len(msg) > 1:
if len(msg[1]) > 1 and len([msg[1]]) < 20:
try:
msg.remove(msg[0])
msg = "+".join(msg)
search = "http://api.myapifilms.com/imdb/title?format=json&title=" + msg + "&token=" + apis["MYAPIFILMS_TOKEN"]
async with aiohttp.get(search) as r:
result = await r.json()
title = result['data']['movies'][0]['title']
year = result['data']['movies'][0]['year']
rating = result['data']['movies'][0]['rating']
url = result['data']['movies'][0]['urlIMDB']
msg = "Title: " + title + " | Released on: " + year + " | IMDB Rating: " + rating + ".\n" + url
await client.send_message(message.channel, msg)
except:
await client.send_message(message.channel, "`Error.`")
else:
await client.send_message(message.channel, "`Invalid search.`")
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "imdb [text]`")
async def memes(message):
msg = message.content[6:]
msg = msg.split(";")
if apis["IMGFLIP_USERNAME"] == "USERNAMEHERE" or apis["IMGFLIP_PASSWORD"] == "PASSWORDHERE":
await client.send_message(message.channel, "`This command wasn't configured properly. If you're the owner, edit json/apis.json`")
return False
if len(msg) == 3:
if len(msg[0]) > 1 and len([msg[1]]) < 20 and len([msg[2]]) < 20:
try:
search = "https://api.imgflip.com/caption_image?template_id=" + msg[0] + "&username=" + apis["IMGFLIP_USERNAME"] + "&password=" + apis["IMGFLIP_PASSWORD"] + "&text0=" + msg[1] + "&text1=" + msg[2]
async with aiohttp.get(search) as r:
result = await r.json()
if result["data"] != []:
url = result["data"]["url"]
await client.send_message(message.channel, url)
except:
error = result["error_message"]
await client.send_message(message.channel, error)
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "meme id;text1;text2 | " + settings["PREFIX"] + "meme help for full list`")
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "meme id;text1;text2 | " + settings["PREFIX"] + "meme help for full list`")
async def urban(message):
msg = message.content.split()
if len(msg) > 1:
if len(msg[1]) > 1 and len([msg[1]]) < 20:
try:
msg.remove(msg[0])
msg = "+".join(msg)
search = "http://api.urbandictionary.com/v0/define?term=" + msg
async with aiohttp.get(search) as r:
result = await r.json()
if result["list"] != []:
definition = result['list'][0]['definition']
example = result['list'][0]['example']
await client.send_message(message.channel, "Definition: " + definition + "\n\n" + "Example: " + example )
else:
await client.send_message(message.channel, "`Your search terms gave no results.`")
except:
await client.send_message(message.channel, "`Error.`")
else:
await client.send_message(message.channel, "`Invalid search.`")
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "urban [text]`")
async def gif(message):
msg = message.content.split()
if len(msg) > 1:
if len(msg[1]) > 1 and len([msg[1]]) < 20:
try:
msg.remove(msg[0])
msg = "+".join(msg)
search = "http://api.giphy.com/v1/gifs/search?q=" + msg + "&api_key=dc6zaTOxFJmzC"
async with aiohttp.get(search) as r:
result = await r.json()
if result["data"] != []:
url = result["data"][0]["url"]
await client.send_message(message.channel, url)
else:
await client.send_message(message.channel, "`Your search terms gave no results.`")
except:
await client.send_message(message.channel, "`Error.`")
else:
await client.send_message(message.channel, "`Invalid search.`")
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "gif [text]`")
async def avatar(message):
if message.mentions:
m = message.mentions[0]
await client.send_message(message.channel, "{}'s avatar: {}".format(m.name, m.avatar_url))
else:
if len(message.content.split(" ")) >= 2:
name = message.content[8:]
member = discord.utils.get(message.server.members, name=name)
if member != None:
await client.send_message(message.channel, "{}'s avatar: {}".format(member.name, member.avatar_url))
else:
await client.send_message(message.channel, "`User not found.`")
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "avatar [name or mention]`")
async def lmgtfy(message):
msg = message.content.split()
if len(msg) >= 2:
msg = "+".join(msg[1:])
await client.send_message(message.channel, "http://lmgtfy.com/?q=" + msg)
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "lmgtfy [search terms]`")
def getTriviabyChannel(channel):
for t in trivia_sessions:
if t.channel == channel:
return t
return False
async def roll(message):
msg = message.content.split()
if len(msg) == 2:
if msg[1].isdigit():
msg[1] = int(msg[1])
if msg[1] < 99999 and msg[1] > 1:
await client.send_message(message.channel, "{} :game_die: `{}` :game_die:".format(message.author.mention, str(randint(1, msg[1]))))
else:
await client.send_message(message.channel, "{} `A number between 1 and 99999, maybe? :)`".format(message.author.mention))
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "roll [number]`")
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "roll [number]`")
async def checkFilter(message): #WIP
msg = message.content.lower()
if message.server.id in badwords:
for word in badwords[message.server.id]:
if msg.find(word.lower()) != -1:
if canDeleteMessages(message):
await client.delete_message(message)
logger.info("Message eliminated.")
return True
else:
logger.info("Couldn't delete message. I need permissions.")
return False
return False
async def checkRegex(message): #WIP
msg = message.content #.lower()?
if message.server.id in badwords_regex:
for pattern in badwords_regex[message.server.id]:
rr = re.search(pattern, msg, re.I | re.U)
if rr != None:
if canDeleteMessages(message):
await client.delete_message(message)
logger.info("Message eliminated. Regex: " + pattern)
return True
else:
logger.info("Couldn't delete message. I need permissions.")
return False
return False
async def twitchCheck(message):
msg = message.content.split()
if len(msg) == 2:
try:
url = "https://api.twitch.tv/kraken/streams/" + msg[1]
async with aiohttp.get(url) as r:
data = await r.json()
if "error" in data:
await client.send_message(message.channel, "{} `There is no streamer named {}`".format(message.author.mention, msg[1]))
elif "stream" in data:
if data["stream"] != None:
await client.send_message(message.channel, "{} `{} is online!` {}".format(message.author.mention, msg[1], "http://www.twitch.tv/" + msg[1]))
else:
await client.send_message(message.channel, "{} `{} is offline.`".format(message.author.mention, msg[1]))
else:
await client.send_message(message.channel, "{} `There is no streamer named {}`".format(message.author.mention, msg[1]))
except:
await client.send_message(message.channel, "{} `Error.`".format(message.author.mention))
else:
await client.send_message(message.channel, "{} `".format(message.author.mention) + settings["PREFIX"] + "twitch [name]`")
async def triviaList(message):
await client.send_message(message.author, trivia_help)
msg = "**Available trivia lists:** \n\n```"
lists = os.listdir("trivia/")
if lists:
clean_list = []
for txt in lists:
if txt.endswith(".txt") and " " not in txt:
txt = txt.replace(".txt", "")
clean_list.append(txt)
if clean_list:
for i, d in enumerate(clean_list):
if i % 4 == 0 and i != 0:
msg = msg + d + "\n"
else:
msg = msg + d + "\t"
msg += "```"
await client.send_message(message.author, msg)
else:
await client.send_message(message.author, "There are no trivia lists available.")
else:
await client.send_message(message.author, "There are no trivia lists available.")
async def uptime(message):
up = abs(uptime_timer - int(time.perf_counter()))
up = str(datetime.timedelta(seconds=up))
await client.send_message(message.channel, "`Uptime: {}`".format(up))
async def checkVoice(message):
if not client.is_voice_connected():
if message.author.voice_channel:
if message.author.voice_channel.permissions_for(message.server.me).connect:
await client.join_voice_channel(message.author.voice_channel)
else:
await client.send_message(message.channel, "{} `I need permissions to join that channel.`".format(message.author.mention))
return False
else:
await client.send_message(message.channel, "{} `You need to join a voice channel first.`".format(message.author.mention))
return False
return True
async def playVideo(message):
global musicPlayer, currentPlaylist
toDelete = None
if await checkVoice(message):
pattern = "(?:youtube\.com\/watch\?v=)(.*)|(?:youtu.be/)(.*)"
rr = re.search(pattern, message.content, re.I | re.U)
if rr.group(1) != None:
id = rr.group(1)
elif rr.group(2) != None:
id = rr.group(2)
else:
await client.send_message(message.channel, "{} `Invalid link.`".format(message.author.mention))
return False
stopMusic()
if settings["DOWNLOADMODE"]:
toDelete = await client.send_message(message.channel, "`I'm in download mode. It might take a bit for me to start. I'll delete this message as soon as I'm ready.`".format(id, message.author.name))
data = {"filename" : 'https://www.youtube.com/watch?v=' + id, "type" : "singleSong"}
currentPlaylist = Playlist(data)
if canDeleteMessages(message):
await client.send_message(message.channel, "`Playing youtube video {} requested by {}`".format(await youtubeparser.getTitle(currentPlaylist.playlist[currentPlaylist.current]), message.author.name))
await client.delete_message(message)
if toDelete:
await client.delete_message(toDelete)
# currentPlaylist.playlist = ['https://www.youtube.com/watch?v=' + id]
# musicPlayer = client.voice.create_ytdl_player('https://www.youtube.com/watch?v=' + id, options=youtube_dl_options)
# musicPlayer.start()
#!addfavorite compatibility stuff
async def playPlaylist(message, sing=False):
global musicPlayer, currentPlaylist
p = settings["PREFIX"]
msg = message.content
toDelete = None
if not sing:
if msg != p + "play" or msg != "play ":
if await checkVoice(message):
msg = message.content[6:]
if dataIO.fileIO("playlists/" + msg + ".txt", "check"):
stopMusic()
data = {"filename" : msg, "type" : "playlist"}
if settings["DOWNLOADMODE"]:
toDelete = await client.send_message(message.channel, "`I'm in download mode. It might take a bit for me to start and switch between tracks. I'll delete this message as soon as the current playlist stops.`".format(id, message.author.name))
currentPlaylist = Playlist(data)
await asyncio.sleep(2)
await currentPlaylist.songSwitcher()
if toDelete:
await client.delete_message(toDelete)
else:
await client.send_message(message.channel, "{} `That playlist doesn't exist.`".format(message.author.mention))
else:
if await checkVoice(message):
stopMusic()
msg = ["*Oh Daisy..*"]
playlist = ["https://www.youtube.com/watch?v=E7WQ1tdxSqI"]
song = choice(playlist)
data = {"filename" : song, "type" : "singleSong"}
if settings["DOWNLOADMODE"]:
toDelete = await client.send_message(message.channel, "`I'm in download mode. It might take a bit for me to start. I'll delete this message as soon as I'm ready.`".format(id, message.author.name))
currentPlaylist = Playlist(data)
# currentPlaylist.playlist = [song]
# musicPlayer = client.voice.create_ytdl_player(song, options=youtube_dl_options)
# musicPlayer.start()
if toDelete:
await client.delete_message(toDelete)
await client.send_message(message.channel, choice(msg))
async def playLocal(message):
global currentPlaylist
p = settings["PREFIX"]
msg = message.content.split(" ")
if await checkVoice(message):
if len(msg) == 2:
localplaylists = getLocalPlaylists()
if localplaylists and ("/" not in msg[1] and "\\" not in msg[1]):
if msg[1] in localplaylists:
files = []
if glob.glob("localtracks/" + msg[1] + "/*.mp3"):
files.extend(glob.glob("localtracks/" + msg[1] + "/*.mp3"))
if glob.glob("localtracks/" + msg[1] + "/*.flac"):
files.extend(glob.glob("localtracks/" + msg[1] + "/*.flac"))
stopMusic()
data = {"filename" : files, "type" : "local"}
currentPlaylist = Playlist(data)
await asyncio.sleep(2)
await currentPlaylist.songSwitcher()
else:
await client.send_message(message.channel, "`There is no local playlist called {}. " + p + "local or " + p + "locallist to receive the list.`".format(msg[1]))
else:
await client.send_message(message.channel, "`There are no valid playlists in the localtracks folder.`")
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "local [playlist]`")
def getLocalPlaylists():
dirs = []
files = os.listdir("localtracks/")
for f in files:
if os.path.isdir("localtracks/" + f) and " " not in f:
if glob.glob("localtracks/" + f + "/*.mp3") != []:
dirs.append(f)
elif glob.glob("localtracks/" + f + "/*.flac") != []:
dirs.append(f)
if dirs != []:
return dirs
else:
return False
async def leaveVoice():
if client.is_voice_connected():
stopMusic()
await client.voice.disconnect()
async def listPlaylists(message):
msg = "Available playlists: \n\n```"
files = os.listdir("playlists/")
if files:
for i, f in enumerate(files):
if f.endswith(".txt"):
if i % 4 == 0 and i != 0:
msg = msg + f.replace(".txt", "") + "\n"
else:
msg = msg + f.replace(".txt", "") + "\t"
msg += "```"
await client.send_message(message.author, msg)
else:
await client.send_message(message.author, "There are no playlists.")
async def listLocal(message):
msg = "Available local playlists: \n\n```"
dirs = getLocalPlaylists()
if dirs:
for i, d in enumerate(dirs):
if i % 4 == 0 and i != 0:
msg = msg + d + "\n"
else:
msg = msg + d + "\t"
msg += "```"
await client.send_message(message.author, msg)
else:
await client.send_message(message.author, "There are no local playlists.")
def stopMusic():
global musicPlayer, currentPlaylist
if currentPlaylist != None:
currentPlaylist.stop = True
if musicPlayer != None:
musicPlayer.stop()
async def transferPlaylist(message):
msg = message.attachments[0]
if msg["filename"].endswith(".txt"):
if not dataIO.fileIO("playlists/" + msg["filename"], "check"): #returns false if file already exists
r = await aiohttp.get(msg["url"])
r = await r.text()
data = r.replace("\r", "")
data = data.split()
if isPlaylistValid(data) and isPlaylistNameValid(msg["filename"].replace(".txt", "")):
data = { "author" : message.author.id,
"playlist": data}
dataIO.fileIO("playlists/" + msg["filename"], "save", data)
await client.send_message(message.channel, "`Playlist added. Name: {}`".format(msg["filename"].replace(".txt", "")))
else:
await client.send_message(message.channel, "`Something is wrong with the playlist or its filename. Type " + settings["PREFIX"] + "audio help to read how to format it properly.`")
else:
await client.send_message(message.channel, "`A playlist with that name already exists. Change the filename and resubmit it.`")
def isPlaylistValid(data):
data = [y for y in data if y != ""] # removes all empty elements
data = [y for y in data if y != "\n"]
for link in data:
pattern = "^(https:\/\/www\.youtube\.com\/watch\?v=...........*)|^(https:\/\/youtu.be\/...........*)|^(https:\/\/youtube\.com\/watch\?v=...........*)"
rr = re.search(pattern, link, re.I | re.U)
if rr == None:
return False
return True
def isPlaylistNameValid(name):
for l in name:
if l.isdigit() or l.isalpha() or l == "_":
pass
else:
return False
return True
def isPlaylistLinkValid(link):
pattern = "^https:\/\/www.youtube.com\/playlist\?list=(.[^:/]*)"
rr = re.search(pattern, link, re.I | re.U)
if not rr == None:
return rr.group(1)
else:
return False
async def addPlaylist(message):
msg = message.content.split(" ")
if len(msg) == 3:
_, name, link = msg
if isPlaylistNameValid(name) and len(name) < 25 and isPlaylistLinkValid(link):
if dataIO.fileIO("playlists/" + name + ".txt", "check"):
await client.send_message(message.channel, "`A playlist with that name already exists.`")
return False
links = await youtubeparser.parsePlaylist(link)
if links:
data = { "author" : message.author.id,
"playlist": links}
dataIO.fileIO("playlists/" + name + ".txt", "save", data)
await client.send_message(message.channel, "`Playlist added. Name: {}`".format(name))
else:
await client.send_message(message.channel, "`Something went wrong. Either the link was incorrect or I was unable to retrieve the page.`")
else:
await client.send_message(message.channel, "`Something is wrong with the playlist's link or its filename. Remember, the name must be with only numbers, letters and underscores. Link must be this format: https://www.youtube.com/playlist?list=PLe8jmEHFkvsaDOOWcREvkgFoj6MD0pXXX`")
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "addplaylist [name] [link]`")
async def delPlaylist(message):
msg = message.content.split(" ")
if len(msg) == 2:
_, filename = msg
if dataIO.fileIO("playlists/" + filename + ".txt", "check"):
authorid = dataIO.fileIO("playlists/" + filename + ".txt", "load")["author"]
if message.author.id == authorid or isMemberAdmin(message):
os.remove("playlists/" + filename + ".txt")
await client.send_message(message.channel, "`Playlist {} removed.`".format(filename))
else:
await client.send_message(message.channel, "`Only the playlist's author and admins can do that.`")
else:
await client.send_message(message.channel, "`There is no playlist with that name.`")
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "delplaylist [name]`")
async def getSongTitle(message):
title = await youtubeparser.getTitle(currentPlaylist.playlist[currentPlaylist.current])
if title:
await client.send_message(message.channel, "`Current song: {}\n{}`".format(title, currentPlaylist.playlist[currentPlaylist.current]))
else:
await client.send_message(message.channel, "`I couldn't retrieve the current song's title.`")
async def addToFavorites(message):
if currentPlaylist:
if dataIO.fileIO("favorites/" + message.author.id + ".txt", "check"):
data = dataIO.fileIO("favorites/" + message.author.id + ".txt", "load")
else:
data = []
data.append(currentPlaylist.playlist[currentPlaylist.current])
dataIO.fileIO("favorites/" + message.author.id + ".txt", "save", data)
await client.send_message(message.channel, "{} `This song has been added to your favorites.`".format(message.author.mention))
else:
await client.send_message(message.channel, "{} `No song is being played`".format(message.author.mention))
async def removeFromFavorites(message):
if currentPlaylist:
if dataIO.fileIO("favorites/" + message.author.id + ".txt", "check"):
data = dataIO.fileIO("favorites/" + message.author.id + ".txt", "load")
if currentPlaylist.playlist[currentPlaylist.current] in data:
data.remove(currentPlaylist.playlist[currentPlaylist.current])
dataIO.fileIO("favorites/" + message.author.id + ".txt", "save", data)
await client.send_message(message.channel, "{} `This song has been removed from your favorites.`".format(message.author.mention))
else:
await client.send_message(message.channel, "{} `This song isn't in your favorites.`".format(message.author.mention))
else:
await client.send_message(message.channel, "{} `You don't have any favorites yet. Start adding them with " + settings["PREFIX"] + "addfavorite`".format(message.author.mention))
else:
await client.send_message(message.channel, "{} `No song is being played`".format(message.author.mention))
async def playFavorites(message):
global musicPlayer, currentPlaylist
if await checkVoice(message):
if dataIO.fileIO("favorites/" + message.author.id + ".txt", "check"):
data = {"filename" : message.author.id, "type" : "favorites"}
stopMusic()
currentPlaylist = Playlist(data)
await asyncio.sleep(2)
await currentPlaylist.songSwitcher()
else:
await client.send_message(message.channel, "{} `You don't have any favorites yet. Start adding them with !addfavorite`".format(message.author.mention))
async def sendPlaylist(message):
if currentPlaylist:
msg = "Here's the current playlist:\n```"
for track in currentPlaylist.playlist:
msg += track
msg += "\n"
if len(msg) >= 1900:
msg += "```"
await client.send_message(message.author, msg)
msg = "```"
if msg != "```":
msg += "```"
await client.send_message(message.author, msg)
async def setVolume(message):
global settings
p = settings["PREFIX"]
msg = message.content
if len(msg.split(" ")) == 2:
msg = msg.split(" ")
try:
vol = float(msg[1])
if vol >= 0 and vol <= 1:
settings["VOLUME"] = vol
await(client.send_message(message.channel, "`Volume set. Next track will have the desired volume.`"))
dataIO.fileIO("json/settings.json", "save", settings)
else:
await(client.send_message(message.channel, "`Volume must be between 0 and 1. Example: " + p + "volume 0.50`"))
except:
await(client.send_message(message.channel, "`Volume must be between 0 and 1. Example: " + p + "volume 0.15`"))
else:
await(client.send_message(message.channel, "`Volume must be between 0 and 1. Example: " + p + "volume 0.15`"))
async def downloadMode(message):
if isMemberAdmin(message):
if settings["DOWNLOADMODE"]:
settings["DOWNLOADMODE"] = False
await(client.send_message(message.channel, "`Download mode disabled. This mode is unstable and tracks might interrupt. Also, the volume settings will not have any effect.`"))
else:
settings["DOWNLOADMODE"] = True
await(client.send_message(message.channel, "`Download mode enabled.`"))
dataIO.fileIO("json/settings.json", "save", settings)
else:
await(client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name)))
############## ADMIN COMMANDS ###################
async def shutdown(message):
if isMemberAdmin(message):
await client.send_message(message.channel, "Daisy, Daisy, give me *your answer do...* ***Shutting down*** ")
await client.logout()
try:
exit(1)
except SystemExit: #clean exit
logger.info("Shutting down as requested by " + message.author.id + "...")
pass
else:
await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name))
async def join(message):
if isMemberAdmin(message):
msg = message.content.split()
if len(msg) > 1:
await client.accept_invite(msg[1])
else:
logger.warning("Join: missing parameters")
else:
await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name))
async def leave(message):
if isMemberAdmin(message):
await client.send_message(message.channel, "`Bye.`")
await client.leave_server(message.channel.server)
else:
await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name))
async def shush(message):
global shush_list
if isMemberAdmin(message):
await client.send_message(message.channel, "`Ok, I'll ignore this channel.`")
shush_list.append(message.channel.id)
dataIO.fileIO("json/shushlist.json", "save", shush_list)
logger.info("Saved silenced channels database.")
else:
await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name))
async def talk(message):
if isMemberAdmin(message):
if message.channel.id in shush_list:
shush_list.remove(message.channel.id)
dataIO.fileIO("json/shushlist.json", "save", shush_list)
logger.info("Saved silenced channels database.")
await client.send_message(message.channel, "`Aaand I'm back.`")
else:
await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name))
async def addBadWords(message):
global badwords
if isMemberAdmin(message):
msg = message.content.split()
if len(msg) >= 2:
del msg[0]
if not message.server.id in badwords:
badwords[message.server.id] = []
for word in msg:
if word.find("/") != -1:
word = word.replace("/", " ")
badwords[message.server.id].append(word)
await client.send_message(message.channel, "`Updated banned words database.`")
dataIO.fileIO("json/filter.json", "save", badwords)
logger.info("Saved filter words.")
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "addwords [word1] [word2] [phrase/with/many/words] (...)`")
else:
await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name))
async def removeBadWords(message):
global badwords
if isMemberAdmin(message):
msg = message.content.split()
if len(msg) >= 2:
del msg[0]
if message.server.id in badwords:
for w in msg:
try:
if w.find("/") != -1:
w = w.replace("/", " ")
badwords[message.server.id].remove(w)
except:
pass
await client.send_message(message.channel, "`Updated banned words database.`")
dataIO.fileIO("json/filter.json", "save", badwords)
logger.info("Saved filter words.")
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "removewords [word1] [word2] [phrase/with/many/words](...)`")
else:
await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name))
async def changeName(message):
global settings
if isMemberAdmin(message):
msg = message.content.split()
if len(msg) == 2:
try:
await client.edit_profile(settings["PASSWORD"], username=msg[1])
except Exception as e:
logger.error(e)
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "name [new name]`")
else:
await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name))
async def addRegex(message):
global badwords_regex
if isMemberAdmin(message):
msg = message.content
msg = msg[10:]
if not message.server.id in badwords_regex:
badwords_regex[message.server.id] = []
badwords_regex[message.server.id].append(msg)
await client.send_message(message.channel, "`Updated regex filter database.`")
dataIO.fileIO("json/regex_filter.json", "save", badwords_regex)
logger.info("Saved regex filter database.")
else:
await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name))
async def removeRegex(message):
global badwords_regex
if isMemberAdmin(message):
msg = message.content
msg = msg[13:]
if message.server.id in badwords_regex:
if msg in badwords_regex[message.server.id]:
badwords_regex[message.server.id].remove(msg)
await client.send_message(message.channel, "`Updated regex filter database.`")
dataIO.fileIO("json/regex_filter.json", "save", badwords_regex)
logger.info("Saved regex filter database.")
else:
await client.send_message(message.channel, "`No match.`")
else:
await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name))
async def reloadSettings(message):
if isMemberAdmin(message):
loadDataFromFiles(True)
await client.send_message(message.channel, "`Settings and files reloaded.`")
else:
await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name))
async def cleanup(message):
errorMsg = "`" + settings["PREFIX"] + "cleanup [number] " + settings["PREFIX"] + "cleanup [name/mention] [number]`"
if isMemberAdmin(message):
if canDeleteMessages(message):
try:
async for x in client.logs_from(message.channel, limit=1):
pass
except TypeError:
logger.error("Your discord.py is outdated. Update it to use cleanup.")
return False
msg = message.content.split()
if len(msg) == 2:
if msg[1].isdigit():
n = int(msg[1])
async for x in client.logs_from(message.channel, limit=n+1):
await client.delete_message(x)
else:
await client.send_message(message.channel, errorMsg)
elif len(msg) == 3:
_, name, limit = msg
try:
limit = int(limit)
except:
await client.send_message(message.channel, errorMsg)
return False
if message.mentions:
m = message.mentions[0]
else:
m = discord.utils.get(message.server.members, name=name)
if m and limit != 0:
checksLeft = 5
await client.delete_message(message)
while checksLeft != 0 and limit != 0:
async for x in client.logs_from(message.channel, limit=100):
if x.author == m and limit != 0:
await client.delete_message(x)
limit -= 1
checksLeft -= 1
else:
await client.send_message(message.channel, errorMsg)
else:
await client.send_message(message.channel, errorMsg)
else:
await client.send_message(message.channel, "`I need permissions to delete messages.`")
else:
await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name))
def isMemberAdmin(message):
if not message.channel.is_private:
if discord.utils.get(message.author.roles, name=settings["ADMINROLE"]) != None:
return True
else:
return False
else:
return False
def canDeleteMessages(message):
return message.channel.permissions_for(message.server.me).manage_messages
async def addTwitchAlert(message):
global twitchStreams
added = False
if isMemberAdmin(message):
msg = message.content.split(" ")
if len(msg) == 2:
if "twitch.tv/" in msg[1]:
await client.send_message(message.channel, "`Enter the name of the stream, not the URL.`")
return False
for i, stream in enumerate(twitchStreams):
if stream["NAME"] == msg[1] and message.channel.id in stream["CHANNELS"]:
await client.send_message(message.channel, "`I'm already monitoring that stream in this channel.`")
return False
for stream in twitchStreams:
if stream["NAME"] == msg[1] and message.channel.id not in stream["CHANNELS"]: # twitchAlert is already monitoring this streamer but not in this channel
twitchStreams[i]["CHANNELS"].append(message.channel.id)
added = True
if not added: # twitchAlert wasn't monitoring this streamer
twitchStreams.append({"CHANNELS" : [message.channel.id], "NAME" : msg[1], "ALREADY_ONLINE" : False})
dataIO.fileIO("json/twitch.json", "save", twitchStreams)
await client.send_message(message.channel, "`I will always send an alert in this channel whenever {}'s stream is online. Use !stoptwitchalert [name] to stop it.`".format(msg[1]))
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "twitchalert [name]`")
else:
await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name))
async def removeTwitchAlert(message):
global twitchStreams
if isMemberAdmin(message):
msg = message.content.split(" ")
if len(msg) == 2:
for i, stream in enumerate(twitchStreams):
if stream["NAME"] == msg[1] and message.channel.id in stream["CHANNELS"]:
if len(stream["CHANNELS"]) == 1:
twitchStreams.remove(stream)
else:
twitchStreams[i]["CHANNELS"].remove(message.channel.id)
dataIO.fileIO("json/twitch.json", "save", twitchStreams)
await client.send_message(message.channel, "`I will stop sending alerts about {}'s stream in this channel.`".format(msg[1]))
return True
await client.send_message(message.channel, "`There's no alert for {}'s stream in this channel.`".format(msg[1]))
else:
await client.send_message(message.channel, "`" + settings["PREFIX"] + "stoptwitchalert [name]`")
else:
await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name))
async def blacklist(message, mode):
global blacklisted_users
p = settings["PREFIX"]
if isMemberAdmin(message):
if message.mentions:
m = message.mentions[0]
else:
if len(message.content.split(" ")) >= 2:
if message.content.startswith(p + "blacklist"):
name = message.content[11:]
else:
name = message.content[9:]
m = discord.utils.get(message.server.members, name=name)
if m == None:
await client.send_message(message.channel, "`User not found.`")
return False
else:
return False
if mode == "add":
blacklisted_users.append(m.id)
await client.send_message(message.channel, "`{} is now in blacklist.`".format(m.name))
else:
if m.id in blacklisted_users:
blacklisted_users.remove(m.id)
await client.send_message(message.channel, "`{} has been removed from blacklist.`".format(m.name))
else:
await client.send_message(message.channel, "`User not in blacklist.`")
return False
dataIO.fileIO("json/blacklist.json", "save", blacklisted_users)
else:
await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name))
async def modifySettings(message):
global settings
if isMemberAdmin(message):
msg = message.content.split(" ")
if len(msg) == 3:
_, key, value = msg
if key.lower() == "password" or key.lower() == "email" or key.lower() == "debug_id":
await client.send_message(message.channel, "`You cannot modify EMAIL, PASSWORD or DEBUG_ID`")
return False
if key.lower() == "prefix" and len(value) != 1:
await client.send_message(message.channel, "`Prefix cannot be more than one character.`")
return False
if key in settings.keys():
if value.lower() == "true": value = True
elif value.lower() == "false": value = False
else:
try:
value = int(value)
except:
pass
settings[key] = value
dataIO.fileIO("json/settings.json", "save", settings)
loadHelp()
if "economy" in modules:
economy.settings = settings
economy.loadHelp()
await client.send_message(message.channel, "`'{}' set to '{}'`".format(key, str(value)))
else:
await client.send_message(message.channel, "`That setting doesn't exist`")
else:
msg = "```"
for k, v in settings.items():
if k != "EMAIL" and k != "PASSWORD":
msg += k + ": " + str(v) + "\n"
msg += "```\n"
msg += settings["PREFIX"] + "setting [setting] [value]"
await client.send_message(message.channel, msg)
else:
await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name))
################################################
@asyncio.coroutine
async def twitchAlert():
global twitchStreams
CHECK_DELAY = 10
while True:
if twitchStreams and client.is_logged_in:
to_delete = []
save = False
consistency_check = twitchStreams
for i, stream in enumerate(twitchStreams):
if twitchStreams == consistency_check: #prevents buggy behavior if twitchStreams gets modified during the iteration
try:
url = "https://api.twitch.tv/kraken/streams/" + stream["NAME"]
async with aiohttp.get(url) as r:
data = await r.json()
if "status" in data:
if data["status"] == 404: #Stream doesn't exist, remove from list
to_delete.append(stream)
elif "stream" in data:
if data["stream"] != None:
if not stream["ALREADY_ONLINE"]:
for channel in stream["CHANNELS"]:
try:
await client.send_message(client.get_channel(channel), "`{} is online!` {}".format(stream["NAME"], "http://www.twitch.tv/" + stream["NAME"]))
except: #In case of missing permissions
pass
twitchStreams[i]["ALREADY_ONLINE"] = True
save = True
else:
if stream["ALREADY_ONLINE"]:
twitchStreams[i]["ALREADY_ONLINE"] = False
save = True
except Exception as e:
logger.warning(e)
if save: #Saves online status, in case the bot needs to be restarted it can prevent message spam
dataIO.fileIO("json/twitch.json", "save", twitchStreams)
save = False
await asyncio.sleep(CHECK_DELAY)
else:
break
if to_delete:
for invalid_stream in to_delete:
twitchStreams.remove(invalid_stream)
dataIO.fileIO("json/twitch.json", "save", twitchStreams)
else:
await asyncio.sleep(5)
async def customCommand(message):
msg = message.content[1:]
if message.channel.server.id in commands:
cmdlist = commands[message.channel.server.id]
if msg in cmdlist:
await client.send_message(message.channel, cmdlist[msg] )
async def debug(message): # If you don't know what this is, *leave it alone*
if message.author.id == settings["DEBUG_ID"]: # Never assign DEBUG_ID to someone other than you
msg = message.content.split("`") # Example: !debug `message.author.id`
if len(msg) == 3:
_, cmd, _ = msg
try:
result = str(eval(cmd))
if settings["PASSWORD"].lower() not in result.lower() and settings["EMAIL"].lower() not in result.lower():
await client.send_message(message.channel, "```" + result + "```")
else:
await client.send_message(message.author, "`Are you trying to send my credentials in chat? Because that's how you send my credentials in chat.`")
except Exception as e:
await client.send_message(message.channel, "```" + str(e) + "```")
async def execFunc(message): #same warning as the other function ^
if message.author.id == settings["DEBUG_ID"]:
msg = message.content.split("`") # Example: !exec `import this`
if len(msg) == 3:
_, cmd, _ = msg
try:
result = exec(cmd)
#await client.send_message(message.channel, "```" + str(result) + "```")
except Exception as e:
await client.send_message(message.channel, "```" + str(e) + "```")
def console():
while True:
try:
exec(input(""))
except Exception:
traceback.print_exc()
print("\n")
def loadDataFromFiles(loadsettings=False):
global proverbs, commands, trivia_questions, badwords, badwords_regex, shush_list, twitchStreams, blacklisted_users, apis
proverbs = dataIO.loadProverbs()
logger.info("Loaded " + str(len(proverbs)) + " proverbs.")
commands = dataIO.fileIO("json/commands.json", "load")
logger.info("Loaded " + str(len(commands)) + " lists of custom commands.")
badwords = dataIO.fileIO("json/filter.json", "load")
logger.info("Loaded " + str(len(badwords)) + " lists of filtered words.")
blacklisted_users = dataIO.fileIO("json/blacklist.json", "load")
logger.info("Loaded " + str(len(blacklisted_users)) + " blacklisted users.")
badwords_regex = dataIO.fileIO("json/regex_filter.json", "load")
logger.info("Loaded " + str(len(badwords_regex)) + " regex lists.")
shush_list = dataIO.fileIO("json/shushlist.json", "load")
logger.info("Loaded " + str(len(shush_list)) + " silenced channels.")
twitchStreams = dataIO.fileIO("json/twitch.json", "load")
logger.info("Loaded " + str(len(twitchStreams)) + " streams to monitor.")
apis = dataIO.fileIO("json/apis.json", "load")
logger.info("Loaded APIs configuration.")
if loadsettings:
global settings
settings = dataIO.fileIO("json/settings.json", "load")
loadHelp()
if "economy" in modules:
economy.settings = settings
economy.loadHelp()
def main():
global ball, greetings, greetings_caps, stopwatches, trivia_sessions, message, gameSwitcher, uptime_timer, musicPlayer, currentPlaylist
global logger, settings, poll_sessions
logger = loggerSetup()
dataIO.logger = logger
dataIO.migration()
dataIO.createEmptyFiles()
settings = dataIO.loadAndCheckSettings()
loadDataFromFiles()
ball = ["As I see it, yes", "It is certain", "It is decidedly so", "Most likely", "Outlook good",
"Signs point to yes", "Without a doubt", "Yes", "Yes – definitely", "You may rely on it", "Reply hazy, try again",
"Ask again later", "Better not tell you now", "Cannot predict now", "Concentrate and ask again",
"Don't count on it", "My reply is no", "My sources say no", "Outlook not so good", "Very doubtful"]
greetings = ["Hey.", "Yes?", "Hi.", "I'm listening.", "Hello.", "I'm here."]
greetings_caps = ["DON'T SCREAM", "WHAT", "WHAT IS IT?!", "ì_ì", "NO CAPS LOCK"]
stopwatches = {}
trivia_sessions = []
poll_sessions = []
message = ""
gameSwitcher = botPlays()
if "economy" in modules:
economy.client = client
economy.initialize()
uptime_timer = int(time.perf_counter())
musicPlayer = None
currentPlaylist = None
loop.create_task(twitchAlert())
#client.run(settings["EMAIL"], settings["PASSWORD"])
yield from client.login(settings["EMAIL"], settings["PASSWORD"])
yield from client.connect()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
except discord.LoginFailure:
logger.error("The credentials you put in settings.json are wrong. Take a look.")
except Exception as e:
logger.error(e)
loop.run_until_complete(client.logout())
finally:
loop.close()
|
AnsonRS/HAL-9000
|
hal.py
|
Python
|
gpl-3.0
| 81,472
|
[
"Brian"
] |
de58cce232282129e9447bd8a944e443ca1fb270c4869dbec2da87ca1214c572
|
# _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Commander
# Copyright 2021 Keeper Security Inc.
# Contact: ops@keepersecurity.com
#
import argparse
import base64
import getpass
import logging
from typing import Optional
from .. import api, crypto, utils
from .base import GroupCommand, Command, dump_report_data
from ..params import KeeperParams
from ..error import CommandError
from ..proto import client_pb2 as client_proto, breachwatch_pb2 as breachwatch_proto
breachwatch_list_parser = argparse.ArgumentParser(prog='breachwatch-list')
breachwatch_list_parser.add_argument('--all', '-a', dest='all', action='store_true', help='Display all breached records')
#breachwatch_list_parser.add_argument('--ignored', '-i', dest='ignored', action='store_true', help='Display ignored records')
breachwatch_password_parser = argparse.ArgumentParser(prog='breachwatch-password')
breachwatch_password_parser.add_argument('passwords', type=str, nargs='*', help='Password')
breachwatch_scan_parser = argparse.ArgumentParser(prog='breachwatch-scan')
breachwatch_ignore_parser = argparse.ArgumentParser(prog='breachwatch-ignore')
breachwatch_ignore_parser.add_argument('records', type=str, nargs='+', help='Record UID to ignore')
def register_commands(commands):
commands['breachwatch'] = BreachWatchCommand()
def register_command_info(aliases, command_info):
aliases['bw'] = 'breachwatch'
command_info['breachwatch'] = 'Breach Watch.'
class BreachWatchCommand(GroupCommand):
def __init__(self):
super(BreachWatchCommand, self).__init__()
self.register_command('list', BreachWatchListCommand(), 'Displays a list of breached passwords.')
self.register_command('ignore', BreachWatchIgnoreCommand(), 'Ignores breached passwords.')
self.register_command('password', BreachWatchPasswordCommand(),
'Check a password against our database of breached accounts.')
self.register_command('scan', BreachWatchScanCommand(), 'Scan vault passwords.')
self.default_verb = 'list'
def validate(self, params): # type: (KeeperParams) -> None
if not params.breach_watch:
raise CommandError('breachwatch',
'BreachWatch is not active. Please visit the Web Vault at https://keepersecurity.com/vault')
class BreachWatchListCommand(Command):
def get_parser(self):
return breachwatch_list_parser
def execute(self, params, **kwargs): # type: (KeeperParams, ...) -> None
table = []
for record, _ in params.breach_watch.get_records_by_status(params, ['WEAK', 'BREACHED']):
row = [record.record_uid, record.title, record.login]
table.append(row)
if table:
table.sort(key=lambda x: x[1].casefold())
total = len(table)
if not kwargs.get('all', False) and total > 32:
table = table[:30]
dump_report_data(table, ['Record UID', 'Title', 'Login'], title='Detected High-Risk Password(s)')
if len(table) < total:
logging.info('')
logging.info('%d records skipped.', total - len(table))
else:
logging.info('No breached records detected')
has_records_to_scan = any(params.breach_watch.get_records_to_scan(params))
if has_records_to_scan:
logging.info('Some passwords in your vault has not been scanned.\n'
'Use "breachwatch scan" command to scan your passwords against our database '
'of breached accounts on the Dark Web.')
class BreachWatchPasswordCommand(Command):
def get_parser(self): # type: () -> Optional[argparse.ArgumentParser]
return breachwatch_password_parser
def execute(self, params, **kwargs): # type: (KeeperParams, **any) -> any
passwords = kwargs.get('passwords')
echo_password = True
if not passwords:
echo_password = False
passwords = []
try:
password = getpass.getpass(prompt='Password to Check: ', stream=None)
if not password:
return
passwords.append(password)
except KeyboardInterrupt:
print('')
euids = []
for result in params.breach_watch.scan_passwords(params, passwords):
if result[1].euid:
euids.append(result[1].euid)
pwd = result[0] if echo_password else "".rjust(len(result[0]), "*")
print(f'{pwd:>16s}: {"WEAK" if result[1].breachDetected else "GOOD" }')
if euids:
params.breach_watch.delete_euids(params, euids)
class BreachWatchScanCommand(Command):
def get_parser(self): # type: () -> Optional[argparse.ArgumentParser]
return breachwatch_scan_parser
def execute(self, params, **kwargs): # type: (KeeperParams, any) -> any
records = [x[0] for x in params.breach_watch.get_records_to_scan(params)]
passwords = set((x.password for x in records if x.password))
if len(passwords):
euid_to_delete = []
bw_requests = []
scans = {x[0]: x[1] for x in params.breach_watch.scan_passwords(params, passwords)}
for record in records:
if params.breach_watch_records:
if record.record_uid in params.breach_watch_records:
bwr = params.breach_watch_records[record.record_uid]
if 'data_unencrypted' in bwr:
passwords = bwr['data_unencrypted'].get('passwords', [])
for password in passwords:
euid = password.get('euid')
if euid:
euid_to_delete.append(base64.b64decode(euid))
if record.password in scans:
bwrq = breachwatch_proto.BreachWatchRecordRequest()
bwrq.recordUid = utils.base64_url_decode(record.record_uid)
bwrq.breachWatchInfoType = breachwatch_proto.RECORD
bwrq.updateUserWhoScanned = True
hash_status = scans[record.password]
bw_password = client_proto.BWPassword()
bw_password.value = record.password
bw_password.status = client_proto.WEAK if hash_status.breachDetected else client_proto.GOOD
bw_password.euid = hash_status.euid
bw_data = client_proto.BreachWatchData()
bw_data.passwords.append(bw_password)
data = bw_data.SerializeToString()
try:
record_key = params.record_cache[record.record_uid]['record_key_unencrypted']
bwrq.encryptedData = crypto.encrypt_aes_v2(data, record_key)
except:
continue
bw_requests.append(bwrq)
while bw_requests:
chunk = bw_requests[0:999]
bw_requests = bw_requests[999:]
rq = breachwatch_proto.BreachWatchUpdateRequest()
rq.breachWatchRecordRequest.extend(chunk)
rs = api.communicate_rest(params, rq, 'breachwatch/update_record_data',
rs_type=breachwatch_proto.BreachWatchUpdateResponse)
params.sync_data = True
if euid_to_delete:
params.breach_watch.delete_euids(params, euid_to_delete)
logging.info(f'Scanned {len(passwords)} passwords.')
class BreachWatchIgnoreCommand(Command):
def get_parser(self): # type: () -> Optional[argparse.ArgumentParser]
return breachwatch_ignore_parser
def execute(self, params, **kwargs): # type: (KeeperParams, any) -> any
if not params.record_cache:
return
if not params.breach_watch_records:
return
records = kwargs.get('records')
if not records:
return
record_uids = set()
for record_uid in records:
if record_uid in record_uids:
continue
record_uids.add(record_uid)
if record_uid not in params.record_cache:
logging.warning(f'Record UID "{record_uid}" not found. Skipping.')
continue
if record_uid not in params.breach_watch_records:
logging.warning(f'Record UID "{record_uid}": Breach Watch information not found')
continue
if len(record_uids) == 0:
return
bw_requests = []
for record, password in params.breach_watch.get_records_by_status(params, ['WEAK', 'BREACHED']):
if record.record_uid not in record_uids:
continue
record_uids.remove(record.record_uid)
bwrq = breachwatch_proto.BreachWatchRecordRequest()
bwrq.recordUid = utils.base64_url_decode(record.record_uid)
bwrq.breachWatchInfoType = breachwatch_proto.RECORD
bwrq.updateUserWhoScanned = False
bw_password = client_proto.BWPassword()
bw_password.value = password.get('value')
bw_password.resolved = utils.current_milli_time()
bw_password.status = client_proto.IGNORE
euid = password.get('euid')
if euid:
bw_password.euid = base64.b64decode(euid)
bw_data = client_proto.BreachWatchData()
bw_data.passwords.append(bw_password)
data = bw_data.SerializeToString()
try:
record_key = params.record_cache[record.record_uid]['record_key_unencrypted']
bwrq.encryptedData = crypto.encrypt_aes_v2(data, record_key)
except:
logging.warning(f'Record UID "{record.record_uid}" encryption error. Skipping.')
continue
bw_requests.append(bwrq)
for record_uid in record_uids:
logging.warning(f'Record UID "{record_uid}" cannot ignore. Skipping.')
if bw_requests:
params.sync_data = True
if params.breach_watch.send_audit_events:
params.queue_audit_event('bw_record_ignored')
while bw_requests:
chunk = bw_requests[0:999]
bw_requests = bw_requests[999:]
rq = breachwatch_proto.BreachWatchUpdateRequest()
rq.breachWatchRecordRequest.extend(chunk)
rs = api.communicate_rest(params, rq, 'breachwatch/update_record_data',
rs_type=breachwatch_proto.BreachWatchUpdateResponse)
for status in rs.breachWatchRecordStatus:
logging.info(f'{utils.base64_url_encode(status.recordUid)}: {status.status} {status.reason}')
|
Keeper-Security/Commander
|
keepercommander/commands/breachwatch.py
|
Python
|
mit
| 11,030
|
[
"VisIt"
] |
5eccdebceb1fe76902f7c9892f4ab4c130cad46bd63f1bccbe8cd12fff28ad74
|
# Copyright (C) 2012,2013,2015
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*************************
**espressopp.VerletList**
*************************
.. function:: espressopp.VerletList(system, cutoff, exclusionlist)
:param system:
:param cutoff:
:param exclusionlist: (default: [])
:type system:
:type cutoff:
:type exclusionlist:
.. function:: espressopp.VerletList.exclude(exclusionlist)
:param exclusionlist:
:type exclusionlist:
:rtype:
.. function:: espressopp.VerletList.getAllPairs()
:rtype:
.. function:: espressopp.VerletList.localSize()
:rtype:
.. function:: espressopp.VerletList.totalSize()
:rtype:
"""
from espressopp import pmi
import _espressopp
import espressopp
from espressopp.esutil import cxxinit
class VerletListLocal(_espressopp.VerletList):
def __init__(self, system, cutoff, exclusionlist=[]):
if pmi.workerIsActive():
if (exclusionlist == []):
# rebuild list in constructor
cxxinit(self, _espressopp.VerletList, system, cutoff, True)
else:
# do not rebuild list in constructor
cxxinit(self, _espressopp.VerletList, system, cutoff, False)
# add exclusions
for pair in exclusionlist:
pid1, pid2 = pair
self.cxxclass.exclude(self, pid1, pid2)
# now rebuild list with exclusions
self.cxxclass.rebuild(self)
def totalSize(self):
if pmi.workerIsActive():
return self.cxxclass.totalSize(self)
def localSize(self):
if pmi.workerIsActive():
return self.cxxclass.localSize(self)
def exclude(self, exclusionlist):
"""
Each processor takes the broadcasted exclusion list
and adds it to its list.
"""
if pmi.workerIsActive():
for pair in exclusionlist:
pid1, pid2 = pair
self.cxxclass.exclude(self, pid1, pid2)
# rebuild list with exclusions
self.cxxclass.rebuild(self)
def getAllPairs(self):
if pmi.workerIsActive():
pairs=[]
npairs=self.localSize()
for i in range(npairs):
pair=self.cxxclass.getPair(self, i+1)
pairs.append(pair)
return pairs
if pmi.isController:
class VerletList(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.VerletListLocal',
pmiproperty = [ 'builds' ],
pmicall = [ 'totalSize', 'exclude', 'connect', 'disconnect', 'getVerletCutoff' ],
pmiinvoke = [ 'getAllPairs' ]
)
|
capoe/espressopp.soap
|
src/VerletList.py
|
Python
|
gpl-3.0
| 3,547
|
[
"ESPResSo"
] |
89b1e790be51eb31cccce4ca97b68299a2214b67ebc640aea1ac46dd43d5eaed
|
import numpy as np
import itertools
import scipy.ndimage
from scipy.ndimage.filters import gaussian_filter
def smoothvoxels(data_4d, fwhm, time):
"""
Return a 'smoothed' version of data_4d.
Parameters
----------
data_4d : numpy array of 4 dimensions
The image data of one subject
fwhm : width of normal gaussian curve
time : time slice (4th dimension)
Returns
-------
smooth_results : array of the smoothed data from data_4d (same dimensions but super-voxels will be
indicated by the same number) in time slice indicated.
"""
time_slice = data_4d[..., time]
smooth_results = scipy.ndimage.filters.gaussian_filter(time_slice, fwhm)
return smooth_results
|
reychil/project-alpha-1
|
code/utils/functions/smooth.py
|
Python
|
bsd-3-clause
| 707
|
[
"Gaussian"
] |
a8a0bece4ce37fd5a64fabcda55f772cb2db67f0827b3403c0a0c2cfda3c6937
|
import math
from ..libmp.backend import xrange
class QuadratureRule(object):
"""
Quadrature rules are implemented using this class, in order to
simplify the code and provide a common infrastructure
for tasks such as error estimation and node caching.
You can implement a custom quadrature rule by subclassing
:class:`QuadratureRule` and implementing the appropriate
methods. The subclass can then be used by :func:`~mpmath.quad` by
passing it as the *method* argument.
:class:`QuadratureRule` instances are supposed to be singletons.
:class:`QuadratureRule` therefore implements instance caching
in :func:`~mpmath.__new__`.
"""
def __init__(self, ctx):
self.ctx = ctx
self.standard_cache = {}
self.transformed_cache = {}
self.interval_count = {}
def clear(self):
"""
Delete cached node data.
"""
self.standard_cache = {}
self.transformed_cache = {}
self.interval_count = {}
def calc_nodes(self, degree, prec, verbose=False):
r"""
Compute nodes for the standard interval `[-1, 1]`. Subclasses
should probably implement only this method, and use
:func:`~mpmath.get_nodes` method to retrieve the nodes.
"""
raise NotImplementedError
def get_nodes(self, a, b, degree, prec, verbose=False):
"""
Return nodes for given interval, degree and precision. The
nodes are retrieved from a cache if already computed;
otherwise they are computed by calling :func:`~mpmath.calc_nodes`
and are then cached.
Subclasses should probably not implement this method,
but just implement :func:`~mpmath.calc_nodes` for the actual
node computation.
"""
key = (a, b, degree, prec)
if key in self.transformed_cache:
return self.transformed_cache[key]
orig = self.ctx.prec
try:
self.ctx.prec = prec+20
# Get nodes on standard interval
if (degree, prec) in self.standard_cache:
nodes = self.standard_cache[degree, prec]
else:
nodes = self.calc_nodes(degree, prec, verbose)
self.standard_cache[degree, prec] = nodes
# Transform to general interval
nodes = self.transform_nodes(nodes, a, b, verbose)
if key in self.interval_count:
self.transformed_cache[key] = nodes
else:
self.interval_count[key] = True
finally:
self.ctx.prec = orig
return nodes
def transform_nodes(self, nodes, a, b, verbose=False):
r"""
Rescale standardized nodes (for `[-1, 1]`) to a general
interval `[a, b]`. For a finite interval, a simple linear
change of variables is used. Otherwise, the following
transformations are used:
.. math ::
\lbrack a, \infty \rbrack : t = \frac{1}{x} + (a-1)
\lbrack -\infty, b \rbrack : t = (b+1) - \frac{1}{x}
\lbrack -\infty, \infty \rbrack : t = \frac{x}{\sqrt{1-x^2}}
"""
ctx = self.ctx
a = ctx.convert(a)
b = ctx.convert(b)
one = ctx.one
if (a, b) == (-one, one):
return nodes
half = ctx.mpf(0.5)
new_nodes = []
if ctx.isinf(a) or ctx.isinf(b):
if (a, b) == (ctx.ninf, ctx.inf):
p05 = -half
for x, w in nodes:
x2 = x*x
px1 = one-x2
spx1 = px1**p05
x = x*spx1
w *= spx1/px1
new_nodes.append((x, w))
elif a == ctx.ninf:
b1 = b+1
for x, w in nodes:
u = 2/(x+one)
x = b1-u
w *= half*u**2
new_nodes.append((x, w))
elif b == ctx.inf:
a1 = a-1
for x, w in nodes:
u = 2/(x+one)
x = a1+u
w *= half*u**2
new_nodes.append((x, w))
elif a == ctx.inf or b == ctx.ninf:
return [(x,-w) for (x,w) in self.transform_nodes(nodes, b, a, verbose)]
else:
raise NotImplementedError
else:
# Simple linear change of variables
C = (b-a)/2
D = (b+a)/2
for x, w in nodes:
new_nodes.append((D+C*x, C*w))
return new_nodes
def guess_degree(self, prec):
"""
Given a desired precision `p` in bits, estimate the degree `m`
of the quadrature required to accomplish full accuracy for
typical integrals. By default, :func:`~mpmath.quad` will perform up
to `m` iterations. The value of `m` should be a slight
overestimate, so that "slightly bad" integrals can be dealt
with automatically using a few extra iterations. On the
other hand, it should not be too big, so :func:`~mpmath.quad` can
quit within a reasonable amount of time when it is given
an "unsolvable" integral.
The default formula used by :func:`~mpmath.guess_degree` is tuned
for both :class:`TanhSinh` and :class:`GaussLegendre`.
The output is roughly as follows:
+---------+---------+
| `p` | `m` |
+=========+=========+
| 50 | 6 |
+---------+---------+
| 100 | 7 |
+---------+---------+
| 500 | 10 |
+---------+---------+
| 3000 | 12 |
+---------+---------+
This formula is based purely on a limited amount of
experimentation and will sometimes be wrong.
"""
# Expected degree
# XXX: use mag
g = int(4 + max(0, self.ctx.log(prec/30.0, 2)))
# Reasonable "worst case"
g += 2
return g
def estimate_error(self, results, prec, epsilon):
r"""
Given results from integrations `[I_1, I_2, \ldots, I_k]` done
with a quadrature of rule of degree `1, 2, \ldots, k`, estimate
the error of `I_k`.
For `k = 2`, we estimate `|I_{\infty}-I_2|` as `|I_2-I_1|`.
For `k > 2`, we extrapolate `|I_{\infty}-I_k| \approx |I_{k+1}-I_k|`
from `|I_k-I_{k-1}|` and `|I_k-I_{k-2}|` under the assumption
that each degree increment roughly doubles the accuracy of
the quadrature rule (this is true for both :class:`TanhSinh`
and :class:`GaussLegendre`). The extrapolation formula is given
by Borwein, Bailey & Girgensohn. Although not very conservative,
this method seems to be very robust in practice.
"""
if len(results) == 2:
return abs(results[0]-results[1])
try:
if results[-1] == results[-2] == results[-3]:
return self.ctx.zero
D1 = self.ctx.log(abs(results[-1]-results[-2]), 10)
D2 = self.ctx.log(abs(results[-1]-results[-3]), 10)
except ValueError:
return epsilon
D3 = -prec
D4 = min(0, max(D1**2/D2, 2*D1, D3))
return self.ctx.mpf(10) ** int(D4)
def summation(self, f, points, prec, epsilon, max_degree, verbose=False):
"""
Main integration function. Computes the 1D integral over
the interval specified by *points*. For each subinterval,
performs quadrature of degree from 1 up to *max_degree*
until :func:`~mpmath.estimate_error` signals convergence.
:func:`~mpmath.summation` transforms each subintegration to
the standard interval and then calls :func:`~mpmath.sum_next`.
"""
ctx = self.ctx
I = err = ctx.zero
for i in xrange(len(points)-1):
a, b = points[i], points[i+1]
if a == b:
continue
# XXX: we could use a single variable transformation,
# but this is not good in practice. We get better accuracy
# by having 0 as an endpoint.
if (a, b) == (ctx.ninf, ctx.inf):
_f = f
f = lambda x: _f(-x) + _f(x)
a, b = (ctx.zero, ctx.inf)
results = []
for degree in xrange(1, max_degree+1):
nodes = self.get_nodes(a, b, degree, prec, verbose)
if verbose:
print("Integrating from %s to %s (degree %s of %s)" % \
(ctx.nstr(a), ctx.nstr(b), degree, max_degree))
results.append(self.sum_next(f, nodes, degree, prec, results, verbose))
if degree > 1:
err = self.estimate_error(results, prec, epsilon)
if err <= epsilon:
break
if verbose:
print("Estimated error:", ctx.nstr(err))
I += results[-1]
if err > epsilon:
if verbose:
print("Failed to reach full accuracy. Estimated error:", ctx.nstr(err))
return I, err
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
r"""
Evaluates the step sum `\sum w_k f(x_k)` where the *nodes* list
contains the `(w_k, x_k)` pairs.
:func:`~mpmath.summation` will supply the list *results* of
values computed by :func:`~mpmath.sum_next` at previous degrees, in
case the quadrature rule is able to reuse them.
"""
return self.ctx.fdot((w, f(x)) for (x,w) in nodes)
class TanhSinh(QuadratureRule):
r"""
This class implements "tanh-sinh" or "doubly exponential"
quadrature. This quadrature rule is based on the Euler-Maclaurin
integral formula. By performing a change of variables involving
nested exponentials / hyperbolic functions (hence the name), the
derivatives at the endpoints vanish rapidly. Since the error term
in the Euler-Maclaurin formula depends on the derivatives at the
endpoints, a simple step sum becomes extremely accurate. In
practice, this means that doubling the number of evaluation
points roughly doubles the number of accurate digits.
Comparison to Gauss-Legendre:
* Initial computation of nodes is usually faster
* Handles endpoint singularities better
* Handles infinite integration intervals better
* Is slower for smooth integrands once nodes have been computed
The implementation of the tanh-sinh algorithm is based on the
description given in Borwein, Bailey & Girgensohn, "Experimentation
in Mathematics - Computational Paths to Discovery", A K Peters,
2003, pages 312-313. In the present implementation, a few
improvements have been made:
* A more efficient scheme is used to compute nodes (exploiting
recurrence for the exponential function)
* The nodes are computed successively instead of all at once
Various documents describing the algorithm are available online, e.g.:
* http://crd.lbl.gov/~dhbailey/dhbpapers/dhb-tanh-sinh.pdf
* http://users.cs.dal.ca/~jborwein/tanh-sinh.pdf
"""
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
"""
Step sum for tanh-sinh quadrature of degree `m`. We exploit the
fact that half of the abscissas at degree `m` are precisely the
abscissas from degree `m-1`. Thus reusing the result from
the previous level allows a 2x speedup.
"""
h = self.ctx.mpf(2)**(-degree)
# Abscissas overlap, so reusing saves half of the time
if previous:
S = previous[-1]/(h*2)
else:
S = self.ctx.zero
S += self.ctx.fdot((w,f(x)) for (x,w) in nodes)
return h*S
def calc_nodes(self, degree, prec, verbose=False):
r"""
The abscissas and weights for tanh-sinh quadrature of degree
`m` are given by
.. math::
x_k = \tanh(\pi/2 \sinh(t_k))
w_k = \pi/2 \cosh(t_k) / \cosh(\pi/2 \sinh(t_k))^2
where `t_k = t_0 + hk` for a step length `h \sim 2^{-m}`. The
list of nodes is actually infinite, but the weights die off so
rapidly that only a few are needed.
"""
ctx = self.ctx
nodes = []
extra = 20
ctx.prec += extra
tol = ctx.ldexp(1, -prec-10)
pi4 = ctx.pi/4
# For simplicity, we work in steps h = 1/2^n, with the first point
# offset so that we can reuse the sum from the previous degree
# We define degree 1 to include the "degree 0" steps, including
# the point x = 0. (It doesn't work well otherwise; not sure why.)
t0 = ctx.ldexp(1, -degree)
if degree == 1:
#nodes.append((mpf(0), pi4))
#nodes.append((-mpf(0), pi4))
nodes.append((ctx.zero, ctx.pi/2))
h = t0
else:
h = t0*2
# Since h is fixed, we can compute the next exponential
# by simply multiplying by exp(h)
expt0 = ctx.exp(t0)
a = pi4 * expt0
b = pi4 / expt0
udelta = ctx.exp(h)
urdelta = 1/udelta
for k in xrange(0, 20*2**degree+1):
# Reference implementation:
# t = t0 + k*h
# x = tanh(pi/2 * sinh(t))
# w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2
# Fast implementation. Note that c = exp(pi/2 * sinh(t))
c = ctx.exp(a-b)
d = 1/c
co = (c+d)/2
si = (c-d)/2
x = si / co
w = (a+b) / co**2
diff = abs(x-1)
if diff <= tol:
break
nodes.append((x, w))
nodes.append((-x, w))
a *= udelta
b *= urdelta
if verbose and k % 300 == 150:
# Note: the number displayed is rather arbitrary. Should
# figure out how to print something that looks more like a
# percentage
print("Calculating nodes:", ctx.nstr(-ctx.log(diff, 10) / prec))
ctx.prec -= extra
return nodes
class GaussLegendre(QuadratureRule):
r"""
This class implements Gauss-Legendre quadrature, which is
exceptionally efficient for polynomials and polynomial-like (i.e.
very smooth) integrands.
The abscissas and weights are given by roots and values of
Legendre polynomials, which are the orthogonal polynomials
on `[-1, 1]` with respect to the unit weight
(see :func:`~mpmath.legendre`).
In this implementation, we take the "degree" `m` of the quadrature
to denote a Gauss-Legendre rule of degree `3 \cdot 2^m` (following
Borwein, Bailey & Girgensohn). This way we get quadratic, rather
than linear, convergence as the degree is incremented.
Comparison to tanh-sinh quadrature:
* Is faster for smooth integrands once nodes have been computed
* Initial computation of nodes is usually slower
* Handles endpoint singularities worse
* Handles infinite integration intervals worse
"""
def calc_nodes(self, degree, prec, verbose=False):
r"""
Calculates the abscissas and weights for Gauss-Legendre
quadrature of degree of given degree (actually `3 \cdot 2^m`).
"""
ctx = self.ctx
# It is important that the epsilon is set lower than the
# "real" epsilon
epsilon = ctx.ldexp(1, -prec-8)
# Fairly high precision might be required for accurate
# evaluation of the roots
orig = ctx.prec
ctx.prec = int(prec*1.5)
if degree == 1:
x = ctx.sqrt(ctx.mpf(3)/5)
w = ctx.mpf(5)/9
nodes = [(-x,w),(ctx.zero,ctx.mpf(8)/9),(x,w)]
ctx.prec = orig
return nodes
nodes = []
n = 3*2**(degree-1)
upto = n//2 + 1
for j in xrange(1, upto):
# Asymptotic formula for the roots
r = ctx.mpf(math.cos(math.pi*(j-0.25)/(n+0.5)))
# Newton iteration
while 1:
t1, t2 = 1, 0
# Evaluates the Legendre polynomial using its defining
# recurrence relation
for j1 in xrange(1,n+1):
t3, t2, t1 = t2, t1, ((2*j1-1)*r*t1 - (j1-1)*t2)/j1
t4 = n*(r*t1-t2)/(r**2-1)
a = t1/t4
r = r - a
if abs(a) < epsilon:
break
x = r
w = 2/((1-r**2)*t4**2)
if verbose and j % 30 == 15:
print("Computing nodes (%i of %i)" % (j, upto))
nodes.append((x, w))
nodes.append((-x, w))
ctx.prec = orig
return nodes
class QuadratureMethods(object):
def __init__(ctx, *args, **kwargs):
ctx._gauss_legendre = GaussLegendre(ctx)
ctx._tanh_sinh = TanhSinh(ctx)
def quad(ctx, f, *points, **kwargs):
r"""
Computes a single, double or triple integral over a given
1D interval, 2D rectangle, or 3D cuboid. A basic example::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> quad(sin, [0, pi])
2.0
A basic 2D integral::
>>> f = lambda x, y: cos(x+y/2)
>>> quad(f, [-pi/2, pi/2], [0, pi])
4.0
**Interval format**
The integration range for each dimension may be specified
using a list or tuple. Arguments are interpreted as follows:
``quad(f, [x1, x2])`` -- calculates
`\int_{x_1}^{x_2} f(x) \, dx`
``quad(f, [x1, x2], [y1, y2])`` -- calculates
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} f(x,y) \, dy \, dx`
``quad(f, [x1, x2], [y1, y2], [z1, z2])`` -- calculates
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} \int_{z_1}^{z_2} f(x,y,z)
\, dz \, dy \, dx`
Endpoints may be finite or infinite. An interval descriptor
may also contain more than two points. In this
case, the integration is split into subintervals, between
each pair of consecutive points. This is useful for
dealing with mid-interval discontinuities, or integrating
over large intervals where the function is irregular or
oscillates.
**Options**
:func:`~mpmath.quad` recognizes the following keyword arguments:
*method*
Chooses integration algorithm (described below).
*error*
If set to true, :func:`~mpmath.quad` returns `(v, e)` where `v` is the
integral and `e` is the estimated error.
*maxdegree*
Maximum degree of the quadrature rule to try before
quitting.
*verbose*
Print details about progress.
**Algorithms**
Mpmath presently implements two integration algorithms: tanh-sinh
quadrature and Gauss-Legendre quadrature. These can be selected
using *method='tanh-sinh'* or *method='gauss-legendre'* or by
passing the classes *method=TanhSinh*, *method=GaussLegendre*.
The functions :func:`~mpmath.quadts` and :func:`~mpmath.quadgl` are also available
as shortcuts.
Both algorithms have the property that doubling the number of
evaluation points roughly doubles the accuracy, so both are ideal
for high precision quadrature (hundreds or thousands of digits).
At high precision, computing the nodes and weights for the
integration can be expensive (more expensive than computing the
function values). To make repeated integrations fast, nodes
are automatically cached.
The advantages of the tanh-sinh algorithm are that it tends to
handle endpoint singularities well, and that the nodes are cheap
to compute on the first run. For these reasons, it is used by
:func:`~mpmath.quad` as the default algorithm.
Gauss-Legendre quadrature often requires fewer function
evaluations, and is therefore often faster for repeated use, but
the algorithm does not handle endpoint singularities as well and
the nodes are more expensive to compute. Gauss-Legendre quadrature
can be a better choice if the integrand is smooth and repeated
integrations are required (e.g. for multiple integrals).
See the documentation for :class:`TanhSinh` and
:class:`GaussLegendre` for additional details.
**Examples of 1D integrals**
Intervals may be infinite or half-infinite. The following two
examples evaluate the limits of the inverse tangent function
(`\int 1/(1+x^2) = \tan^{-1} x`), and the Gaussian integral
`\int_{\infty}^{\infty} \exp(-x^2)\,dx = \sqrt{\pi}`::
>>> mp.dps = 15
>>> quad(lambda x: 2/(x**2+1), [0, inf])
3.14159265358979
>>> quad(lambda x: exp(-x**2), [-inf, inf])**2
3.14159265358979
Integrals can typically be resolved to high precision.
The following computes 50 digits of `\pi` by integrating the
area of the half-circle defined by `x^2 + y^2 \le 1`,
`-1 \le x \le 1`, `y \ge 0`::
>>> mp.dps = 50
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1])
3.1415926535897932384626433832795028841971693993751
One can just as well compute 1000 digits (output truncated)::
>>> mp.dps = 1000
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) #doctest:+ELLIPSIS
3.141592653589793238462643383279502884...216420199
Complex integrals are supported. The following computes
a residue at `z = 0` by integrating counterclockwise along the
diamond-shaped path from `1` to `+i` to `-1` to `-i` to `1`::
>>> mp.dps = 15
>>> chop(quad(lambda z: 1/z, [1,j,-1,-j,1]))
(0.0 + 6.28318530717959j)
**Examples of 2D and 3D integrals**
Here are several nice examples of analytically solvable
2D integrals (taken from MathWorld [1]) that can be evaluated
to high precision fairly rapidly by :func:`~mpmath.quad`::
>>> mp.dps = 30
>>> f = lambda x, y: (x-1)/((1-x*y)*log(x*y))
>>> quad(f, [0, 1], [0, 1])
0.577215664901532860606512090082
>>> +euler
0.577215664901532860606512090082
>>> f = lambda x, y: 1/sqrt(1+x**2+y**2)
>>> quad(f, [-1, 1], [-1, 1])
3.17343648530607134219175646705
>>> 4*log(2+sqrt(3))-2*pi/3
3.17343648530607134219175646705
>>> f = lambda x, y: 1/(1-x**2 * y**2)
>>> quad(f, [0, 1], [0, 1])
1.23370055013616982735431137498
>>> pi**2 / 8
1.23370055013616982735431137498
>>> quad(lambda x, y: 1/(1-x*y), [0, 1], [0, 1])
1.64493406684822643647241516665
>>> pi**2 / 6
1.64493406684822643647241516665
Multiple integrals may be done over infinite ranges::
>>> mp.dps = 15
>>> print(quad(lambda x,y: exp(-x-y), [0, inf], [1, inf]))
0.367879441171442
>>> print(1/e)
0.367879441171442
For nonrectangular areas, one can call :func:`~mpmath.quad` recursively.
For example, we can replicate the earlier example of calculating
`\pi` by integrating over the unit-circle, and actually use double
quadrature to actually measure the area circle::
>>> f = lambda x: quad(lambda y: 1, [-sqrt(1-x**2), sqrt(1-x**2)])
>>> quad(f, [-1, 1])
3.14159265358979
Here is a simple triple integral::
>>> mp.dps = 15
>>> f = lambda x,y,z: x*y/(1+z)
>>> quad(f, [0,1], [0,1], [1,2], method='gauss-legendre')
0.101366277027041
>>> (log(3)-log(2))/4
0.101366277027041
**Singularities**
Both tanh-sinh and Gauss-Legendre quadrature are designed to
integrate smooth (infinitely differentiable) functions. Neither
algorithm copes well with mid-interval singularities (such as
mid-interval discontinuities in `f(x)` or `f'(x)`).
The best solution is to split the integral into parts::
>>> mp.dps = 15
>>> quad(lambda x: abs(sin(x)), [0, 2*pi]) # Bad
3.99900894176779
>>> quad(lambda x: abs(sin(x)), [0, pi, 2*pi]) # Good
4.0
The tanh-sinh rule often works well for integrands having a
singularity at one or both endpoints::
>>> mp.dps = 15
>>> quad(log, [0, 1], method='tanh-sinh') # Good
-1.0
>>> quad(log, [0, 1], method='gauss-legendre') # Bad
-0.999932197413801
However, the result may still be inaccurate for some functions::
>>> quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
1.99999999946942
This problem is not due to the quadrature rule per se, but to
numerical amplification of errors in the nodes. The problem can be
circumvented by temporarily increasing the precision::
>>> mp.dps = 30
>>> a = quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
>>> mp.dps = 15
>>> +a
2.0
**Highly variable functions**
For functions that are smooth (in the sense of being infinitely
differentiable) but contain sharp mid-interval peaks or many
"bumps", :func:`~mpmath.quad` may fail to provide full accuracy. For
example, with default settings, :func:`~mpmath.quad` is able to integrate
`\sin(x)` accurately over an interval of length 100 but not over
length 1000::
>>> quad(sin, [0, 100]); 1-cos(100) # Good
0.137681127712316
0.137681127712316
>>> quad(sin, [0, 1000]); 1-cos(1000) # Bad
-37.8587612408485
0.437620923709297
One solution is to break the integration into 10 intervals of
length 100::
>>> quad(sin, linspace(0, 1000, 10)) # Good
0.437620923709297
Another is to increase the degree of the quadrature::
>>> quad(sin, [0, 1000], maxdegree=10) # Also good
0.437620923709297
Whether splitting the interval or increasing the degree is
more efficient differs from case to case. Another example is the
function `1/(1+x^2)`, which has a sharp peak centered around
`x = 0`::
>>> f = lambda x: 1/(1+x**2)
>>> quad(f, [-100, 100]) # Bad
3.64804647105268
>>> quad(f, [-100, 100], maxdegree=10) # Good
3.12159332021646
>>> quad(f, [-100, 0, 100]) # Also good
3.12159332021646
**References**
1. http://mathworld.wolfram.com/DoubleIntegral.html
"""
rule = kwargs.get('method', 'tanh-sinh')
if type(rule) is str:
if rule == 'tanh-sinh':
rule = ctx._tanh_sinh
elif rule == 'gauss-legendre':
rule = ctx._gauss_legendre
else:
raise ValueError("unknown quadrature rule: %s" % rule)
else:
rule = rule(ctx)
verbose = kwargs.get('verbose')
dim = len(points)
orig = prec = ctx.prec
epsilon = ctx.eps/8
m = kwargs.get('maxdegree') or rule.guess_degree(prec)
points = [ctx._as_points(p) for p in points]
try:
ctx.prec += 20
if dim == 1:
v, err = rule.summation(f, points[0], prec, epsilon, m, verbose)
elif dim == 2:
v, err = rule.summation(lambda x: \
rule.summation(lambda y: f(x,y), \
points[1], prec, epsilon, m)[0],
points[0], prec, epsilon, m, verbose)
elif dim == 3:
v, err = rule.summation(lambda x: \
rule.summation(lambda y: \
rule.summation(lambda z: f(x,y,z), \
points[2], prec, epsilon, m)[0],
points[1], prec, epsilon, m)[0],
points[0], prec, epsilon, m, verbose)
else:
raise NotImplementedError("quadrature must have dim 1, 2 or 3")
finally:
ctx.prec = orig
if kwargs.get("error"):
return +v, err
return +v
def quadts(ctx, *args, **kwargs):
"""
Performs tanh-sinh quadrature. The call
quadts(func, *points, ...)
is simply a shortcut for:
quad(func, *points, ..., method=TanhSinh)
For example, a single integral and a double integral:
quadts(lambda x: exp(cos(x)), [0, 1])
quadts(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
See the documentation for quad for information about how points
arguments and keyword arguments are parsed.
See documentation for TanhSinh for algorithmic information about
tanh-sinh quadrature.
"""
kwargs['method'] = 'tanh-sinh'
return ctx.quad(*args, **kwargs)
def quadgl(ctx, *args, **kwargs):
"""
Performs Gauss-Legendre quadrature. The call
quadgl(func, *points, ...)
is simply a shortcut for:
quad(func, *points, ..., method=GaussLegendre)
For example, a single integral and a double integral:
quadgl(lambda x: exp(cos(x)), [0, 1])
quadgl(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
See the documentation for quad for information about how points
arguments and keyword arguments are parsed.
See documentation for TanhSinh for algorithmic information about
tanh-sinh quadrature.
"""
kwargs['method'] = 'gauss-legendre'
return ctx.quad(*args, **kwargs)
def quadosc(ctx, f, interval, omega=None, period=None, zeros=None):
r"""
Calculates
.. math ::
I = \int_a^b f(x) dx
where at least one of `a` and `b` is infinite and where
`f(x) = g(x) \cos(\omega x + \phi)` for some slowly
decreasing function `g(x)`. With proper input, :func:`~mpmath.quadosc`
can also handle oscillatory integrals where the oscillation
rate is different from a pure sine or cosine wave.
In the standard case when `|a| < \infty, b = \infty`,
:func:`~mpmath.quadosc` works by evaluating the infinite series
.. math ::
I = \int_a^{x_1} f(x) dx +
\sum_{k=1}^{\infty} \int_{x_k}^{x_{k+1}} f(x) dx
where `x_k` are consecutive zeros (alternatively
some other periodic reference point) of `f(x)`.
Accordingly, :func:`~mpmath.quadosc` requires information about the
zeros of `f(x)`. For a periodic function, you can specify
the zeros by either providing the angular frequency `\omega`
(*omega*) or the *period* `2 \pi/\omega`. In general, you can
specify the `n`-th zero by providing the *zeros* arguments.
Below is an example of each::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> f = lambda x: sin(3*x)/(x**2+1)
>>> quadosc(f, [0,inf], omega=3)
0.37833007080198
>>> quadosc(f, [0,inf], period=2*pi/3)
0.37833007080198
>>> quadosc(f, [0,inf], zeros=lambda n: pi*n/3)
0.37833007080198
>>> (ei(3)*exp(-3)-exp(3)*ei(-3))/2 # Computed by Mathematica
0.37833007080198
Note that *zeros* was specified to multiply `n` by the
*half-period*, not the full period. In theory, it does not matter
whether each partial integral is done over a half period or a full
period. However, if done over half-periods, the infinite series
passed to :func:`~mpmath.nsum` becomes an *alternating series* and this
typically makes the extrapolation much more efficient.
Here is an example of an integration over the entire real line,
and a half-infinite integration starting at `-\infty`::
>>> quadosc(lambda x: cos(x)/(1+x**2), [-inf, inf], omega=1)
1.15572734979092
>>> pi/e
1.15572734979092
>>> quadosc(lambda x: cos(x)/x**2, [-inf, -1], period=2*pi)
-0.0844109505595739
>>> cos(1)+si(1)-pi/2
-0.0844109505595738
Of course, the integrand may contain a complex exponential just as
well as a real sine or cosine::
>>> quadosc(lambda x: exp(3*j*x)/(1+x**2), [-inf,inf], omega=3)
(0.156410688228254 + 0.0j)
>>> pi/e**3
0.156410688228254
>>> quadosc(lambda x: exp(3*j*x)/(2+x+x**2), [-inf,inf], omega=3)
(0.00317486988463794 - 0.0447701735209082j)
>>> 2*pi/sqrt(7)/exp(3*(j+sqrt(7))/2)
(0.00317486988463794 - 0.0447701735209082j)
**Non-periodic functions**
If `f(x) = g(x) h(x)` for some function `h(x)` that is not
strictly periodic, *omega* or *period* might not work, and it might
be necessary to use *zeros*.
A notable exception can be made for Bessel functions which, though not
periodic, are "asymptotically periodic" in a sufficiently strong sense
that the sum extrapolation will work out::
>>> quadosc(j0, [0, inf], period=2*pi)
1.0
>>> quadosc(j1, [0, inf], period=2*pi)
1.0
More properly, one should provide the exact Bessel function zeros::
>>> j0zero = lambda n: findroot(j0, pi*(n-0.25))
>>> quadosc(j0, [0, inf], zeros=j0zero)
1.0
For an example where *zeros* becomes necessary, consider the
complete Fresnel integrals
.. math ::
\int_0^{\infty} \cos x^2\,dx = \int_0^{\infty} \sin x^2\,dx
= \sqrt{\frac{\pi}{8}}.
Although the integrands do not decrease in magnitude as
`x \to \infty`, the integrals are convergent since the oscillation
rate increases (causing consecutive periods to asymptotically
cancel out). These integrals are virtually impossible to calculate
to any kind of accuracy using standard quadrature rules. However,
if one provides the correct asymptotic distribution of zeros
(`x_n \sim \sqrt{n}`), :func:`~mpmath.quadosc` works::
>>> mp.dps = 30
>>> f = lambda x: cos(x**2)
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
0.626657068657750125603941321203
>>> f = lambda x: sin(x**2)
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
0.626657068657750125603941321203
>>> sqrt(pi/8)
0.626657068657750125603941321203
(Interestingly, these integrals can still be evaluated if one
places some other constant than `\pi` in the square root sign.)
In general, if `f(x) \sim g(x) \cos(h(x))`, the zeros follow
the inverse-function distribution `h^{-1}(x)`::
>>> mp.dps = 15
>>> f = lambda x: sin(exp(x))
>>> quadosc(f, [1,inf], zeros=lambda n: log(n))
-0.25024394235267
>>> pi/2-si(e)
-0.250243942352671
**Non-alternating functions**
If the integrand oscillates around a positive value, without
alternating signs, the extrapolation might fail. A simple trick
that sometimes works is to multiply or divide the frequency by 2::
>>> f = lambda x: 1/x**2+sin(x)/x**4
>>> quadosc(f, [1,inf], omega=1) # Bad
1.28642190869861
>>> quadosc(f, [1,inf], omega=0.5) # Perfect
1.28652953559617
>>> 1+(cos(1)+ci(1)+sin(1))/6
1.28652953559617
**Fast decay**
:func:`~mpmath.quadosc` is primarily useful for slowly decaying
integrands. If the integrand decreases exponentially or faster,
:func:`~mpmath.quad` will likely handle it without trouble (and generally be
much faster than :func:`~mpmath.quadosc`)::
>>> quadosc(lambda x: cos(x)/exp(x), [0, inf], omega=1)
0.5
>>> quad(lambda x: cos(x)/exp(x), [0, inf])
0.5
"""
a, b = ctx._as_points(interval)
a = ctx.convert(a)
b = ctx.convert(b)
if [omega, period, zeros].count(None) != 2:
raise ValueError( \
"must specify exactly one of omega, period, zeros")
if a == ctx.ninf and b == ctx.inf:
s1 = ctx.quadosc(f, [a, 0], omega=omega, zeros=zeros, period=period)
s2 = ctx.quadosc(f, [0, b], omega=omega, zeros=zeros, period=period)
return s1 + s2
if a == ctx.ninf:
if zeros:
return ctx.quadosc(lambda x:f(-x), [-b,-a], lambda n: zeros(-n))
else:
return ctx.quadosc(lambda x:f(-x), [-b,-a], omega=omega, period=period)
if b != ctx.inf:
raise ValueError("quadosc requires an infinite integration interval")
if not zeros:
if omega:
period = 2*ctx.pi/omega
zeros = lambda n: n*period/2
#for n in range(1,10):
# p = zeros(n)
# if p > a:
# break
#if n >= 9:
# raise ValueError("zeros do not appear to be correctly indexed")
n = 1
s = ctx.quadgl(f, [a, zeros(n)])
def term(k):
return ctx.quadgl(f, [zeros(k), zeros(k+1)])
s += ctx.nsum(term, [n, ctx.inf])
return s
if __name__ == '__main__':
import doctest
doctest.testmod()
|
JensGrabner/mpmath
|
mpmath/calculus/quadrature.py
|
Python
|
bsd-3-clause
| 38,312
|
[
"Gaussian"
] |
30e971b37565594a2c27a7d0ea8d6866c37864de844b6345f3352c9a3965384c
|
""" The CountryMapping module performs the necessary CS gymnastics to resolve country codes """
__RCSID__ = "$Id$"
from DIRAC import gConfig, S_OK, S_ERROR
def getCountryMapping(country):
""" Determines the associated country from the country code"""
mappedCountries = [country]
while True:
mappedCountry = gConfig.getValue('/Resources/Countries/%s/AssignedTo' % country, country)
if mappedCountry == country:
break
elif mappedCountry in mappedCountries:
return S_ERROR('Circular mapping detected for %s' % country)
else:
country = mappedCountry
mappedCountries.append(mappedCountry)
return S_OK(mappedCountry)
def getCountryMappingTier1(country):
""" Returns the Tier1 site mapped to a country code """
res = getCountryMapping(country)
if not res['OK']:
return res
mappedCountry = res['Value']
tier1 = gConfig.getValue('/Resources/Countries/%s/Tier1' % mappedCountry, '')
if not tier1:
return S_ERROR("No Tier1 assigned to %s" % mappedCountry)
return S_OK(tier1)
|
fstagni/DIRAC
|
Core/Utilities/CountryMapping.py
|
Python
|
gpl-3.0
| 1,042
|
[
"DIRAC"
] |
375dd289e353ac1b69ed7d6aa869df9e0869727251b3c596dd27b00e5282024d
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides templates which allow variable sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import traceback
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.deprecation import deprecated
__all__ = ["make_template"]
def make_template(name_, func_, create_scope_now_=False, unique_name_=None,
custom_getter_=None, **kwargs):
"""Given an arbitrary function, wrap it so that it does variable sharing.
This wraps `func_` in a Template and partially evaluates it. Templates are
functions that create variables the first time they are called and reuse them
thereafter. In order for `func_` to be compatible with a `Template` it must
have the following properties:
* The function should create all trainable variables and any variables that
should be reused by calling `tf.get_variable`. If a trainable variable is
created using `tf.Variable`, then a ValueError will be thrown. Variables
that are intended to be locals can be created by specifying
`tf.Variable(..., trainable=false)`.
* The function may use variable scopes and other templates internally to
create and reuse variables, but it shouldn't use `tf.global_variables` to
capture variables that are defined outside of the scope of the function.
* Internal scopes and variable names should not depend on any arguments that
are not supplied to `make_template`. In general you will get a ValueError
telling you that you are trying to reuse a variable that doesn't exist
if you make a mistake.
In the following example, both `z` and `w` will be scaled by the same `y`. It
is important to note that if we didn't assign `scalar_name` and used a
different name for z and w that a `ValueError` would be thrown because it
couldn't reuse the variable.
```python
def my_op(x, scalar_name):
var1 = tf.get_variable(scalar_name,
shape=[],
initializer=tf.constant_initializer(1))
return x * var1
scale_by_y = tf.make_template('scale_by_y', my_op, scalar_name='y')
z = scale_by_y(input1)
w = scale_by_y(input2)
```
As a safe-guard, the returned function will raise a `ValueError` after the
first call if trainable variables are created by calling `tf.Variable`.
If all of these are true, then 2 properties are enforced by the template:
1. Calling the same template multiple times will share all non-local
variables.
2. Two different templates are guaranteed to be unique, unless you reenter the
same variable scope as the initial definition of a template and redefine
it. An examples of this exception:
```python
def my_op(x, scalar_name):
var1 = tf.get_variable(scalar_name,
shape=[],
initializer=tf.constant_initializer(1))
return x * var1
with tf.variable_scope('scope') as vs:
scale_by_y = tf.make_template('scale_by_y', my_op, scalar_name='y')
z = scale_by_y(input1)
w = scale_by_y(input2)
# Creates a template that reuses the variables above.
with tf.variable_scope(vs, reuse=True):
scale_by_y2 = tf.make_template('scale_by_y', my_op, scalar_name='y')
z2 = scale_by_y2(input1)
w2 = scale_by_y2(input2)
```
Depending on the value of `create_scope_now_`, the full variable scope may be
captured either at the time of first call or at the time of construction. If
this option is set to True, then all Tensors created by repeated calls to the
template will have an extra trailing _N+1 to their name, as the first time the
scope is entered in the Template constructor no Tensors are created.
Note: `name_`, `func_` and `create_scope_now_` have a trailing underscore to
reduce the likelihood of collisions with kwargs.
Args:
name_: A name for the scope created by this template. If necessary, the name
will be made unique by appending `_N` to the name.
func_: The function to wrap.
create_scope_now_: Boolean controlling whether the scope should be created
when the template is constructed or when the template is called. Default
is False, meaning the scope is created when the template is called.
unique_name_: When used, it overrides name_ and is not made unique. If a
template of the same scope/unique_name already exists and reuse is false,
an error is raised. Defaults to None.
custom_getter_: Optional custom getter for variables used in `func_`. See
the @{tf.get_variable} `custom_getter` documentation for
more information.
**kwargs: Keyword arguments to apply to `func_`.
Returns:
A function to encapsulate a set of variables which should be created once
and reused. An enclosing scope will created, either where `make_template`
is called, or wherever the result is called, depending on the value of
`create_scope_now_`. Regardless of the value, the first time the template
is called it will enter the scope with no reuse, and call `func_` to create
variables, which are guaranteed to be unique. All subsequent calls will
re-enter the scope and reuse those variables.
Raises:
ValueError: if the name is None.
"""
if kwargs:
func_ = functools.partial(func_, **kwargs)
return Template(
name_, func_, create_scope_now=create_scope_now_,
unique_name=unique_name_, custom_getter=custom_getter_)
def _skip_common_stack_elements(stacktrace, base_case):
"""Skips items that the target stacktrace shares with the base stacktrace."""
for i, (trace, base) in enumerate(zip(stacktrace, base_case)):
if trace != base:
return stacktrace[i:]
return stacktrace[-1:]
class Template(object):
"""Wrap a function to aid in variable sharing.
Templates are functions that create variables the first time they are called
and reuse them thereafter. See `make_template` for full documentation.
Note: By default, the full variable scope is captured at the time of first
call. If `create_scope_now_` is passed as True to the constructor, the full
scope will be captured there, but no variables will created until the first
call.
"""
def __init__(self, name, func, create_scope_now=False, unique_name=None,
custom_getter=None):
"""Creates a template for the given function.
Args:
name: A name for the scope created by this template. The
name will be made unique by appending `_N` to the it (see how
`tf.variable_scope` treats the `default_name` for details).
func: The function to apply each time.
create_scope_now: Whether to create the scope at Template construction
time, rather than first call. Defaults to false. Creating the scope at
construction time may be more convenient if the template is to passed
through much lower level code, and you want to be sure of the scope
name without knowing exactly where it will be first called. If set to
True, the scope will be created in the constructor, and all subsequent
times in __call__, leading to a trailing numeral being added to the
names of all created Tensors. If set to False, the scope will be created
at the first call location.
unique_name: When used, it overrides name_ and is not made unique. If a
template of the same scope/unique_name already exists and reuse is
false, an error is raised. Defaults to None.
custom_getter: optional custom getter to pass to variable_scope()
Raises:
ValueError: if the name is None.
"""
self._func = func
self._stacktrace = traceback.format_stack()[:-2]
self._name = name
self._unique_name = unique_name
self._custom_getter = custom_getter
if name is None:
raise ValueError("name cannot be None.")
if create_scope_now:
with variable_scope._pure_variable_scope( # pylint:disable=protected-access
(self._unique_name or
variable_scope._get_unique_variable_scope(self._name)), # pylint:disable=protected-access
custom_getter=self._custom_getter) as vs:
self._variable_scope = vs
else:
self._variable_scope = None
# This variable keeps track of whether the template has been called yet,
# which is not the same as whether the scope has been created.
self._variables_created = False
def _call_func(self, args, kwargs, check_for_new_variables):
try:
vars_at_start = len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
trainable_at_start = len(
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
result = self._func(*args, **kwargs)
if check_for_new_variables:
trainable_variables = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
# If a variable that we intend to train is created as a side effect
# of creating a template, then that is almost certainly an error.
if trainable_at_start != len(trainable_variables):
raise ValueError("Trainable variable created when calling a template "
"after the first time, perhaps you used tf.Variable "
"when you meant tf.get_variable: %s" %
(trainable_variables[trainable_at_start:],))
# Non-trainable tracking variables are a legitimate reason why a new
# variable would be created, but it is a relatively advanced use-case,
# so log it.
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
if vars_at_start != len(variables):
logging.info("New variables created when calling a template after "
"the first time, perhaps you used tf.Variable when you "
"meant tf.get_variable: %s",
variables[vars_at_start:])
return result
except Exception as exc:
# Reraise the exception, but append the original definition to the
# trace.
args = exc.args
if not args:
arg0 = ""
else:
arg0 = args[0]
trace = "".join(_skip_common_stack_elements(self._stacktrace,
traceback.format_stack()))
arg0 = "%s\n\noriginally defined at:\n%s" % (arg0, trace)
new_args = [arg0]
new_args.extend(args[1:])
exc.args = tuple(new_args)
raise
def __call__(self, *args, **kwargs):
if self._variable_scope:
if self._variables_created:
# This is not the first visit to __call__, so variables have already
# been created, and we want to reuse them.
with variable_scope.variable_scope(self._variable_scope, reuse=True):
return self._call_func(args, kwargs, check_for_new_variables=True)
else:
# This is the first visit to __call__, but the scope has already been
# created in the constructor. Set _variables_created after the inner
# function is successfully called so that subsequent calls take the if
# branch above.
with variable_scope.variable_scope(self._variable_scope):
result = self._call_func(args, kwargs, check_for_new_variables=False)
self._variables_created = True
return result
else:
# The scope was not created at construction time, so create it here.
# Subsequent calls should reuse variables.
with variable_scope.variable_scope(
self._unique_name, self._name,
custom_getter=self._custom_getter) as vs:
self._variable_scope = vs
result = self._call_func(args, kwargs, check_for_new_variables=False)
self._variables_created = True
return result
@property
def name(self):
"""Returns the name given to this Template."""
return self._name
@property
def func(self):
"""Returns the func given to this Template."""
return self._func
@property
def variable_scope(self):
"""Returns the variable scope object created by this Template."""
return self._variable_scope
@property
def variable_scope_name(self):
"""Returns the variable scope name created by this Template."""
if self._variable_scope:
name = self._variable_scope.name
# To prevent partial matches on the scope_name, we add '/' at the end.
return name if name[-1] == "/" else name + "/"
@property
def trainable_variables(self):
"""Returns the list of trainable variables created by the Template."""
if self._variables_created:
return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES,
self.variable_scope_name)
else:
return []
@property
def global_variables(self):
"""Returns the list of global variables created by the Template."""
if self._variables_created:
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
self.variable_scope_name)
else:
return []
@property
def local_variables(self):
"""Returns the list of global variables created by the Template."""
if self._variables_created:
return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES,
self.variable_scope_name)
else:
return []
@property
@deprecated(
"2017-02-21", "The .var_scope property is deprecated. Please change your "
"code to use the .variable_scope property")
def var_scope(self):
"""Returns the variable scope object created by this Template."""
return self._variable_scope
|
dyoung418/tensorflow
|
tensorflow/python/ops/template.py
|
Python
|
apache-2.0
| 14,441
|
[
"VisIt"
] |
c282ad847120309f5fe3d063708ade118245f729a7e887d8f4e7d1c2cab1c84c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module is used for converting data from pkl format to raw.
"""
import argparse
import numpy as np
import misc
import qmisc
def main():
parser = argparse.ArgumentParser(description=__doc__) # 'Simple VTK Viewer')
parser.add_argument('-i','--inputfile', default=None,
help='File as .pkl')
parser.add_argument('-o','--outputfile', default=None,
help='Output file. Filetype is given by extension.')
parser.add_argument('-k','--key', default='data3d',
help='Which key should be writen to output file. \
Default is "data3d". You can use "segmentation"')
args = parser.parse_args()
data = misc.obj_from_file(args.inputfile, filetype = 'pickle')
data3d_uncrop = qmisc.uncrop(data[args.key], data['crinfo'], data['orig_shape'])
#import ipdb; ipdb.set_trace() # BREAKPOINT
import SimpleITK as sitk
sitk_img = sitk.GetImageFromArray(data3d_uncrop.astype(np.uint16), isVector=True)
sitk.WriteImage(sitk_img, args.outputfile)
print("Warning: .mhd and .raw format has corupted metadta. You can edit it manually.")
if __name__ == "__main__":
main()
|
mjirik/lisa
|
lisa/convert_pkl.py
|
Python
|
bsd-3-clause
| 1,231
|
[
"VTK"
] |
4563b77e05fc263b343085acbcde9d198a4cdc0d9eb56337c1cdcccb37e9df12
|
#!/usr/bin/env python
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import chigger
reader = chigger.exodus.ExodusReader('../input/mug_blocks_out.e')
mug = chigger.exodus.ExodusResult(reader, variable='diffused', cmap='viridis')
cbar = chigger.exodus.ExodusColorBar(mug)
window = chigger.RenderWindow(mug, cbar, size=[300,300], test=True)
# Render the results and write a file
for i in range(2):
reader.setOptions(timestep=i)
window.write('none_' + str(i) + '.png')
window.start()
|
liuwenf/moose
|
python/chigger/tests/range/none.py
|
Python
|
lgpl-2.1
| 1,336
|
[
"MOOSE"
] |
88f93906a6386166a7cbf702acfbcaf0d239801ea4616a2acff29cd281efdb6e
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyscf.geomopt.addons import as_pyscf_method
def optimize(method, *args, **kwargs):
try:
from pyscf.pbc.geomopt import geometric_solver as geom
except ImportError as e1:
raise e1
return geom.optimize(method, *args, **kwargs)
|
sunqm/pyscf
|
pyscf/pbc/geomopt/__init__.py
|
Python
|
apache-2.0
| 871
|
[
"PySCF"
] |
c93200da92497eeb6d017fac3bc398a4acbbf9f742882ea90e7bf40fb2ff9566
|
"""
The B{0install show} command-line interface.
"""
# Copyright (C) 2012, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
from zeroinstall import _
from zeroinstall.cmd import select, UsageError
from zeroinstall.injector import qdom, selections
syntax = "APP | SELECTIONS"
def add_options(parser):
parser.add_option("-r", "--root-uri", help=_("display just the root interface URI"), action='store_true')
parser.add_option("", "--xml", help=_("print selections as XML"), action='store_true')
def handle(config, options, args):
if len(args) != 1:
raise UsageError()
app = config.app_mgr.lookup_app(args[0], missing_ok = True)
if app is not None:
sels = app.get_selections()
r = app.get_requirements()
if r.extra_restrictions and not options.xml:
print("User-provided restrictions in force:")
for uri, expr in r.extra_restrictions.items():
print(" {uri}: {expr}".format(uri = uri, expr = expr))
print()
else:
with open(args[0], 'rb') as stream:
sels = selections.Selections(qdom.parse(stream))
if options.root_uri:
print(sels.interface)
elif options.xml:
select.show_xml(sels)
else:
select.show_human(sels, config.stores)
def complete(completion, args, cword):
if len(args) != 1: return
completion.expand_apps()
completion.expand_files()
|
dsqmoore/0install
|
zeroinstall/cmd/show.py
|
Python
|
lgpl-2.1
| 1,362
|
[
"VisIt"
] |
41ae820b59c25f933a5af3197a274bbba48000a451abb59b52165f5621187b64
|
# $HeadURL$
"""
SystemLoggingHandler is the implementation of the Logging service
in the DISET framework
The following methods are available in the Service interface
addMessages()
"""
__RCSID__ = "$Id$"
from types import ListType, StringTypes, StringTypes
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.FrameworkSystem.private.logging.Message import tupleToMessage
from DIRAC.FrameworkSystem.DB.SystemLoggingDB import SystemLoggingDB
# This is a global instance of the SystemLoggingDB class
gLogDB = False
def initializeSystemLoggingHandler( serviceInfo ):
""" Check that we can connect to the DB and that the tables are properly created or updated
"""
global gLogDB
gLogDB = SystemLoggingDB()
res = gLogDB._connect()
if not res['OK']:
return res
res = gLogDB._checkTable()
if not res['OK'] and not res['Message'] == 'The requested table already exist':
return res
return S_OK()
class SystemLoggingHandler( RequestHandler ):
""" This is server
"""
def __addMessage( self, messageObject, site, nodeFQDN ):
""" This is the function that actually adds the Message to
the log Database
"""
credentials = self.getRemoteCredentials()
if credentials.has_key( 'DN' ):
userDN = credentials['DN']
else:
userDN = 'unknown'
if credentials.has_key( 'group' ):
userGroup = credentials['group']
else:
userGroup = 'unknown'
remoteAddress = self.getRemoteAddress()[0]
return gLogDB.insertMessage( messageObject, site, nodeFQDN, userDN, userGroup, remoteAddress )
types_addMessages = [ ListType, StringTypes, StringTypes ]
#A normal exported function (begins with export_)
def export_addMessages( self, messagesList, site, nodeFQDN ):
""" This is the interface to the service
inputs:
msgList contains a list of Message Objects.
outputs:
S_OK if no exception was raised
S_ERROR if an exception was raised
"""
for messageTuple in messagesList:
messageObject = tupleToMessage( messageTuple )
result = self.__addMessage( messageObject, site, nodeFQDN )
if not result['OK']:
gLogger.error( 'The Log Message could not be inserted into the DB',
'because: "%s"' % result['Message'] )
return S_ERROR( result['Message'] )
return S_OK()
|
avedaee/DIRAC
|
FrameworkSystem/Service/SystemLoggingHandler.py
|
Python
|
gpl-3.0
| 2,500
|
[
"DIRAC"
] |
50123aecf9929570c5b3ca2074930ed88bc5fab46bb90aff30c366d269fb2553
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import subprocess
from TestHarnessTestCase import TestHarnessTestCase
class TestHarnessTester(TestHarnessTestCase):
def testMissingGold(self):
"""
Test for Missing Gold file
"""
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.runTests('-i', 'missing_gold')
e = cm.exception
self.assertRegexpMatches(e.output.decode('utf-8'), 'test_harness\.exodiff.*?FAILED \(MISSING GOLD FILE\)')
self.assertRegexpMatches(e.output.decode('utf-8'), 'test_harness\.csvdiff.*?FAILED \(MISSING GOLD FILE\)')
# Verify return code is a general failure related (0x80)
self.assertIs(0x80, e.returncode)
|
nuclear-wizard/moose
|
python/TestHarness/tests/test_MissingGold.py
|
Python
|
lgpl-2.1
| 997
|
[
"MOOSE"
] |
92639b0657f54fc60541329006186b5f5989ffcc65366e07cd8239c88e44f942
|
"""Minimal Python 2 & 3 shim around all Qt bindings
DOCUMENTATION
Qt.py was born in the film and visual effects industry to address
the growing need for the development of software capable of running
with more than one flavour of the Qt bindings for Python - PySide,
PySide2, PyQt4 and PyQt5.
1. Build for one, run with all
2. Explicit is better than implicit
3. Support co-existence
Default resolution order:
- PySide2
- PyQt5
- PySide
- PyQt4
Usage:
>> import sys
>> from Qt import QtWidgets
>> app = QtWidgets.QApplication(sys.argv)
>> button = QtWidgets.QPushButton("Hello World")
>> button.show()
>> app.exec_()
All members of PySide2 are mapped from other bindings, should they exist.
If no equivalent member exist, it is excluded from Qt.py and inaccessible.
The idea is to highlight members that exist across all supported binding,
and guarantee that code that runs on one binding runs on all others.
For more details, visit https://github.com/mottosso/Qt.py
LICENSE
See end of file for license (MIT, BSD) information.
"""
import os
import sys
import types
import shutil
import importlib
import json
__version__ = "1.3.5"
# Enable support for `from Qt import *`
__all__ = []
# Flags from environment variables
QT_VERBOSE = bool(os.getenv("QT_VERBOSE"))
QT_PREFERRED_BINDING_JSON = os.getenv("QT_PREFERRED_BINDING_JSON", "")
QT_PREFERRED_BINDING = os.getenv("QT_PREFERRED_BINDING", "")
QT_SIP_API_HINT = os.getenv("QT_SIP_API_HINT")
# Reference to Qt.py
Qt = sys.modules[__name__]
Qt.QtCompat = types.ModuleType("QtCompat")
try:
long
except NameError:
# Python 3 compatibility
long = int
"""Common members of all bindings
This is where each member of Qt.py is explicitly defined.
It is based on a "lowest common denominator" of all bindings;
including members found in each of the 4 bindings.
The "_common_members" dictionary is generated using the
build_membership.sh script.
"""
_common_members = {
"QtCore": [
"QAbstractAnimation",
"QAbstractEventDispatcher",
"QAbstractItemModel",
"QAbstractListModel",
"QAbstractState",
"QAbstractTableModel",
"QAbstractTransition",
"QAnimationGroup",
"QBasicTimer",
"QBitArray",
"QBuffer",
"QByteArray",
"QByteArrayMatcher",
"QChildEvent",
"QCoreApplication",
"QCryptographicHash",
"QDataStream",
"QDate",
"QDateTime",
"QDir",
"QDirIterator",
"QDynamicPropertyChangeEvent",
"QEasingCurve",
"QElapsedTimer",
"QEvent",
"QEventLoop",
"QEventTransition",
"QFile",
"QFileInfo",
"QFileSystemWatcher",
"QFinalState",
"QGenericArgument",
"QGenericReturnArgument",
"QHistoryState",
"QItemSelectionRange",
"QIODevice",
"QLibraryInfo",
"QLine",
"QLineF",
"QLocale",
"QMargins",
"QMetaClassInfo",
"QMetaEnum",
"QMetaMethod",
"QMetaObject",
"QMetaProperty",
"QMimeData",
"QModelIndex",
"QMutex",
"QMutexLocker",
"QObject",
"QParallelAnimationGroup",
"QPauseAnimation",
"QPersistentModelIndex",
"QPluginLoader",
"QPoint",
"QPointF",
"QProcess",
"QProcessEnvironment",
"QPropertyAnimation",
"QReadLocker",
"QReadWriteLock",
"QRect",
"QRectF",
"QRegExp",
"QResource",
"QRunnable",
"QSemaphore",
"QSequentialAnimationGroup",
"QSettings",
"QSignalMapper",
"QSignalTransition",
"QSize",
"QSizeF",
"QSocketNotifier",
"QState",
"QStateMachine",
"QSysInfo",
"QSystemSemaphore",
"QT_TRANSLATE_NOOP",
"QT_TR_NOOP",
"QT_TR_NOOP_UTF8",
"QTemporaryFile",
"QTextBoundaryFinder",
"QTextCodec",
"QTextDecoder",
"QTextEncoder",
"QTextStream",
"QTextStreamManipulator",
"QThread",
"QThreadPool",
"QTime",
"QTimeLine",
"QTimer",
"QTimerEvent",
"QTranslator",
"QUrl",
"QVariantAnimation",
"QWaitCondition",
"QWriteLocker",
"QXmlStreamAttribute",
"QXmlStreamAttributes",
"QXmlStreamEntityDeclaration",
"QXmlStreamEntityResolver",
"QXmlStreamNamespaceDeclaration",
"QXmlStreamNotationDeclaration",
"QXmlStreamReader",
"QXmlStreamWriter",
"Qt",
"QtCriticalMsg",
"QtDebugMsg",
"QtFatalMsg",
"QtMsgType",
"QtSystemMsg",
"QtWarningMsg",
"qAbs",
"qAddPostRoutine",
"qChecksum",
"qCritical",
"qDebug",
"qFatal",
"qFuzzyCompare",
"qIsFinite",
"qIsInf",
"qIsNaN",
"qIsNull",
"qRegisterResourceData",
"qUnregisterResourceData",
"qVersion",
"qWarning",
"qrand",
"qsrand"
],
"QtGui": [
"QAbstractTextDocumentLayout",
"QActionEvent",
"QBitmap",
"QBrush",
"QClipboard",
"QCloseEvent",
"QColor",
"QConicalGradient",
"QContextMenuEvent",
"QCursor",
"QDesktopServices",
"QDoubleValidator",
"QDrag",
"QDragEnterEvent",
"QDragLeaveEvent",
"QDragMoveEvent",
"QDropEvent",
"QFileOpenEvent",
"QFocusEvent",
"QFont",
"QFontDatabase",
"QFontInfo",
"QFontMetrics",
"QFontMetricsF",
"QGradient",
"QHelpEvent",
"QHideEvent",
"QHoverEvent",
"QIcon",
"QIconDragEvent",
"QIconEngine",
"QImage",
"QImageIOHandler",
"QImageReader",
"QImageWriter",
"QInputEvent",
"QInputMethodEvent",
"QIntValidator",
"QKeyEvent",
"QKeySequence",
"QLinearGradient",
"QMatrix2x2",
"QMatrix2x3",
"QMatrix2x4",
"QMatrix3x2",
"QMatrix3x3",
"QMatrix3x4",
"QMatrix4x2",
"QMatrix4x3",
"QMatrix4x4",
"QMouseEvent",
"QMoveEvent",
"QMovie",
"QPaintDevice",
"QPaintEngine",
"QPaintEngineState",
"QPaintEvent",
"QPainter",
"QPainterPath",
"QPainterPathStroker",
"QPalette",
"QPen",
"QPicture",
"QPictureIO",
"QPixmap",
"QPixmapCache",
"QPolygon",
"QPolygonF",
"QQuaternion",
"QRadialGradient",
"QRegExpValidator",
"QRegion",
"QResizeEvent",
"QSessionManager",
"QShortcutEvent",
"QShowEvent",
"QStandardItem",
"QStandardItemModel",
"QStatusTipEvent",
"QSyntaxHighlighter",
"QTabletEvent",
"QTextBlock",
"QTextBlockFormat",
"QTextBlockGroup",
"QTextBlockUserData",
"QTextCharFormat",
"QTextCursor",
"QTextDocument",
"QTextDocumentFragment",
"QTextFormat",
"QTextFragment",
"QTextFrame",
"QTextFrameFormat",
"QTextImageFormat",
"QTextInlineObject",
"QTextItem",
"QTextLayout",
"QTextLength",
"QTextLine",
"QTextList",
"QTextListFormat",
"QTextObject",
"QTextObjectInterface",
"QTextOption",
"QTextTable",
"QTextTableCell",
"QTextTableCellFormat",
"QTextTableFormat",
"QTouchEvent",
"QTransform",
"QValidator",
"QVector2D",
"QVector3D",
"QVector4D",
"QWhatsThisClickedEvent",
"QWheelEvent",
"QWindowStateChangeEvent",
"qAlpha",
"qBlue",
"qGray",
"qGreen",
"qIsGray",
"qRed",
"qRgb",
"qRgba"
],
"QtHelp": [
"QHelpContentItem",
"QHelpContentModel",
"QHelpContentWidget",
"QHelpEngine",
"QHelpEngineCore",
"QHelpIndexModel",
"QHelpIndexWidget",
"QHelpSearchEngine",
"QHelpSearchQuery",
"QHelpSearchQueryWidget",
"QHelpSearchResultWidget"
],
"QtMultimedia": [
"QAbstractVideoBuffer",
"QAbstractVideoSurface",
"QAudio",
"QAudioDeviceInfo",
"QAudioFormat",
"QAudioInput",
"QAudioOutput",
"QVideoFrame",
"QVideoSurfaceFormat"
],
"QtNetwork": [
"QAbstractNetworkCache",
"QAbstractSocket",
"QAuthenticator",
"QHostAddress",
"QHostInfo",
"QLocalServer",
"QLocalSocket",
"QNetworkAccessManager",
"QNetworkAddressEntry",
"QNetworkCacheMetaData",
"QNetworkConfiguration",
"QNetworkConfigurationManager",
"QNetworkCookie",
"QNetworkCookieJar",
"QNetworkDiskCache",
"QNetworkInterface",
"QNetworkProxy",
"QNetworkProxyFactory",
"QNetworkProxyQuery",
"QNetworkReply",
"QNetworkRequest",
"QNetworkSession",
"QSsl",
"QTcpServer",
"QTcpSocket",
"QUdpSocket"
],
"QtOpenGL": [
"QGL",
"QGLContext",
"QGLFormat",
"QGLWidget"
],
"QtPrintSupport": [
"QAbstractPrintDialog",
"QPageSetupDialog",
"QPrintDialog",
"QPrintEngine",
"QPrintPreviewDialog",
"QPrintPreviewWidget",
"QPrinter",
"QPrinterInfo"
],
"QtSql": [
"QSql",
"QSqlDatabase",
"QSqlDriver",
"QSqlDriverCreatorBase",
"QSqlError",
"QSqlField",
"QSqlIndex",
"QSqlQuery",
"QSqlQueryModel",
"QSqlRecord",
"QSqlRelation",
"QSqlRelationalDelegate",
"QSqlRelationalTableModel",
"QSqlResult",
"QSqlTableModel"
],
"QtSvg": [
"QGraphicsSvgItem",
"QSvgGenerator",
"QSvgRenderer",
"QSvgWidget"
],
"QtTest": [
"QTest"
],
"QtWidgets": [
"QAbstractButton",
"QAbstractGraphicsShapeItem",
"QAbstractItemDelegate",
"QAbstractItemView",
"QAbstractScrollArea",
"QAbstractSlider",
"QAbstractSpinBox",
"QAction",
"QActionGroup",
"QApplication",
"QBoxLayout",
"QButtonGroup",
"QCalendarWidget",
"QCheckBox",
"QColorDialog",
"QColumnView",
"QComboBox",
"QCommandLinkButton",
"QCommonStyle",
"QCompleter",
"QDataWidgetMapper",
"QDateEdit",
"QDateTimeEdit",
"QDesktopWidget",
"QDial",
"QDialog",
"QDialogButtonBox",
"QDirModel",
"QDockWidget",
"QDoubleSpinBox",
"QErrorMessage",
"QFileDialog",
"QFileIconProvider",
"QFileSystemModel",
"QFocusFrame",
"QFontComboBox",
"QFontDialog",
"QFormLayout",
"QFrame",
"QGesture",
"QGestureEvent",
"QGestureRecognizer",
"QGraphicsAnchor",
"QGraphicsAnchorLayout",
"QGraphicsBlurEffect",
"QGraphicsColorizeEffect",
"QGraphicsDropShadowEffect",
"QGraphicsEffect",
"QGraphicsEllipseItem",
"QGraphicsGridLayout",
"QGraphicsItem",
"QGraphicsItemGroup",
"QGraphicsLayout",
"QGraphicsLayoutItem",
"QGraphicsLineItem",
"QGraphicsLinearLayout",
"QGraphicsObject",
"QGraphicsOpacityEffect",
"QGraphicsPathItem",
"QGraphicsPixmapItem",
"QGraphicsPolygonItem",
"QGraphicsProxyWidget",
"QGraphicsRectItem",
"QGraphicsRotation",
"QGraphicsScale",
"QGraphicsScene",
"QGraphicsSceneContextMenuEvent",
"QGraphicsSceneDragDropEvent",
"QGraphicsSceneEvent",
"QGraphicsSceneHelpEvent",
"QGraphicsSceneHoverEvent",
"QGraphicsSceneMouseEvent",
"QGraphicsSceneMoveEvent",
"QGraphicsSceneResizeEvent",
"QGraphicsSceneWheelEvent",
"QGraphicsSimpleTextItem",
"QGraphicsTextItem",
"QGraphicsTransform",
"QGraphicsView",
"QGraphicsWidget",
"QGridLayout",
"QGroupBox",
"QHBoxLayout",
"QHeaderView",
"QInputDialog",
"QItemDelegate",
"QItemEditorCreatorBase",
"QItemEditorFactory",
"QKeyEventTransition",
"QLCDNumber",
"QLabel",
"QLayout",
"QLayoutItem",
"QLineEdit",
"QListView",
"QListWidget",
"QListWidgetItem",
"QMainWindow",
"QMdiArea",
"QMdiSubWindow",
"QMenu",
"QMenuBar",
"QMessageBox",
"QMouseEventTransition",
"QPanGesture",
"QPinchGesture",
"QPlainTextDocumentLayout",
"QPlainTextEdit",
"QProgressBar",
"QProgressDialog",
"QPushButton",
"QRadioButton",
"QRubberBand",
"QScrollArea",
"QScrollBar",
"QShortcut",
"QSizeGrip",
"QSizePolicy",
"QSlider",
"QSpacerItem",
"QSpinBox",
"QSplashScreen",
"QSplitter",
"QSplitterHandle",
"QStackedLayout",
"QStackedWidget",
"QStatusBar",
"QStyle",
"QStyleFactory",
"QStyleHintReturn",
"QStyleHintReturnMask",
"QStyleHintReturnVariant",
"QStyleOption",
"QStyleOptionButton",
"QStyleOptionComboBox",
"QStyleOptionComplex",
"QStyleOptionDockWidget",
"QStyleOptionFocusRect",
"QStyleOptionFrame",
"QStyleOptionGraphicsItem",
"QStyleOptionGroupBox",
"QStyleOptionHeader",
"QStyleOptionMenuItem",
"QStyleOptionProgressBar",
"QStyleOptionRubberBand",
"QStyleOptionSizeGrip",
"QStyleOptionSlider",
"QStyleOptionSpinBox",
"QStyleOptionTab",
"QStyleOptionTabBarBase",
"QStyleOptionTabWidgetFrame",
"QStyleOptionTitleBar",
"QStyleOptionToolBar",
"QStyleOptionToolBox",
"QStyleOptionToolButton",
"QStyleOptionViewItem",
"QStylePainter",
"QStyledItemDelegate",
"QSwipeGesture",
"QSystemTrayIcon",
"QTabBar",
"QTabWidget",
"QTableView",
"QTableWidget",
"QTableWidgetItem",
"QTableWidgetSelectionRange",
"QTapAndHoldGesture",
"QTapGesture",
"QTextBrowser",
"QTextEdit",
"QTimeEdit",
"QToolBar",
"QToolBox",
"QToolButton",
"QToolTip",
"QTreeView",
"QTreeWidget",
"QTreeWidgetItem",
"QTreeWidgetItemIterator",
"QUndoCommand",
"QUndoGroup",
"QUndoStack",
"QUndoView",
"QVBoxLayout",
"QWhatsThis",
"QWidget",
"QWidgetAction",
"QWidgetItem",
"QWizard",
"QWizardPage"
],
"QtX11Extras": [
"QX11Info"
],
"QtXml": [
"QDomAttr",
"QDomCDATASection",
"QDomCharacterData",
"QDomComment",
"QDomDocument",
"QDomDocumentFragment",
"QDomDocumentType",
"QDomElement",
"QDomEntity",
"QDomEntityReference",
"QDomImplementation",
"QDomNamedNodeMap",
"QDomNode",
"QDomNodeList",
"QDomNotation",
"QDomProcessingInstruction",
"QDomText",
"QXmlAttributes",
"QXmlContentHandler",
"QXmlDTDHandler",
"QXmlDeclHandler",
"QXmlDefaultHandler",
"QXmlEntityResolver",
"QXmlErrorHandler",
"QXmlInputSource",
"QXmlLexicalHandler",
"QXmlLocator",
"QXmlNamespaceSupport",
"QXmlParseException",
"QXmlReader",
"QXmlSimpleReader"
],
"QtXmlPatterns": [
"QAbstractMessageHandler",
"QAbstractUriResolver",
"QAbstractXmlNodeModel",
"QAbstractXmlReceiver",
"QSourceLocation",
"QXmlFormatter",
"QXmlItem",
"QXmlName",
"QXmlNamePool",
"QXmlNodeModelIndex",
"QXmlQuery",
"QXmlResultItems",
"QXmlSchema",
"QXmlSchemaValidator",
"QXmlSerializer"
]
}
""" Missing members
This mapping describes members that have been deprecated
in one or more bindings and have been left out of the
_common_members mapping.
The member can provide an extra details string to be
included in exceptions and warnings.
"""
_missing_members = {
"QtGui": {
"QMatrix": "Deprecated in PyQt5",
},
}
def _qInstallMessageHandler(handler):
"""Install a message handler that works in all bindings
Args:
handler: A function that takes 3 arguments, or None
"""
def messageOutputHandler(*args):
# In Qt4 bindings, message handlers are passed 2 arguments
# In Qt5 bindings, message handlers are passed 3 arguments
# The first argument is a QtMsgType
# The last argument is the message to be printed
# The Middle argument (if passed) is a QMessageLogContext
if len(args) == 3:
msgType, logContext, msg = args
elif len(args) == 2:
msgType, msg = args
logContext = None
else:
raise TypeError(
"handler expected 2 or 3 arguments, got {0}".format(len(args)))
if isinstance(msg, bytes):
# In python 3, some bindings pass a bytestring, which cannot be
# used elsewhere. Decoding a python 2 or 3 bytestring object will
# consistently return a unicode object.
msg = msg.decode()
handler(msgType, logContext, msg)
passObject = messageOutputHandler if handler else handler
if Qt.IsPySide or Qt.IsPyQt4:
return Qt._QtCore.qInstallMsgHandler(passObject)
elif Qt.IsPySide2 or Qt.IsPyQt5:
return Qt._QtCore.qInstallMessageHandler(passObject)
def _getcpppointer(object):
if hasattr(Qt, "_shiboken2"):
return getattr(Qt, "_shiboken2").getCppPointer(object)[0]
elif hasattr(Qt, "_shiboken"):
return getattr(Qt, "_shiboken").getCppPointer(object)[0]
elif hasattr(Qt, "_sip"):
return getattr(Qt, "_sip").unwrapinstance(object)
raise AttributeError("'module' has no attribute 'getCppPointer'")
def _wrapinstance(ptr, base=None):
"""Enable implicit cast of pointer to most suitable class
This behaviour is available in sip per default.
Based on http://nathanhorne.com/pyqtpyside-wrap-instance
Usage:
This mechanism kicks in under these circumstances.
1. Qt.py is using PySide 1 or 2.
2. A `base` argument is not provided.
See :func:`QtCompat.wrapInstance()`
Arguments:
ptr (long): Pointer to QObject in memory
base (QObject, optional): Base class to wrap with. Defaults to QObject,
which should handle anything.
"""
assert isinstance(ptr, long), "Argument 'ptr' must be of type <long>"
assert (base is None) or issubclass(base, Qt.QtCore.QObject), (
"Argument 'base' must be of type <QObject>")
if Qt.IsPyQt4 or Qt.IsPyQt5:
func = getattr(Qt, "_sip").wrapinstance
elif Qt.IsPySide2:
func = getattr(Qt, "_shiboken2").wrapInstance
elif Qt.IsPySide:
func = getattr(Qt, "_shiboken").wrapInstance
else:
raise AttributeError("'module' has no attribute 'wrapInstance'")
if base is None:
if Qt.IsPyQt4 or Qt.IsPyQt5:
base = Qt.QtCore.QObject
else:
q_object = func(long(ptr), Qt.QtCore.QObject)
meta_object = q_object.metaObject()
while True:
class_name = meta_object.className()
try:
base = getattr(Qt.QtWidgets, class_name)
except AttributeError:
try:
base = getattr(Qt.QtCore, class_name)
except AttributeError:
meta_object = meta_object.superClass()
continue
break
return func(long(ptr), base)
def _isvalid(object):
"""Check if the object is valid to use in Python runtime.
Usage:
See :func:`QtCompat.isValid()`
Arguments:
object (QObject): QObject to check the validity of.
"""
assert isinstance(object, Qt.QtCore.QObject)
if hasattr(Qt, "_shiboken2"):
return getattr(Qt, "_shiboken2").isValid(object)
elif hasattr(Qt, "_shiboken"):
return getattr(Qt, "_shiboken").isValid(object)
elif hasattr(Qt, "_sip"):
return not getattr(Qt, "_sip").isdeleted(object)
else:
raise AttributeError("'module' has no attribute isValid")
def _translate(context, sourceText, *args):
# In Qt4 bindings, translate can be passed 2 or 3 arguments
# In Qt5 bindings, translate can be passed 2 arguments
# The first argument is disambiguation[str]
# The last argument is n[int]
# The middle argument can be encoding[QtCore.QCoreApplication.Encoding]
if len(args) == 3:
disambiguation, encoding, n = args
elif len(args) == 2:
disambiguation, n = args
encoding = None
else:
raise TypeError(
"Expected 4 or 5 arguments, got {0}.".format(len(args) + 2))
if hasattr(Qt.QtCore, "QCoreApplication"):
app = getattr(Qt.QtCore, "QCoreApplication")
else:
raise NotImplementedError(
"Missing QCoreApplication implementation for {binding}".format(
binding=Qt.__binding__,
)
)
if Qt.__binding__ in ("PySide2", "PyQt5"):
sanitized_args = [context, sourceText, disambiguation, n]
else:
sanitized_args = [
context,
sourceText,
disambiguation,
encoding or app.CodecForTr,
n
]
return app.translate(*sanitized_args)
def _loadUi(uifile, baseinstance=None):
"""Dynamically load a user interface from the given `uifile`
This function calls `uic.loadUi` if using PyQt bindings,
else it implements a comparable binding for PySide.
Documentation:
http://pyqt.sourceforge.net/Docs/PyQt5/designer.html#PyQt5.uic.loadUi
Arguments:
uifile (str): Absolute path to Qt Designer file.
baseinstance (QWidget): Instantiated QWidget or subclass thereof
Return:
baseinstance if `baseinstance` is not `None`. Otherwise
return the newly created instance of the user interface.
"""
if hasattr(Qt, "_uic"):
return Qt._uic.loadUi(uifile, baseinstance)
elif hasattr(Qt, "_QtUiTools"):
# Implement `PyQt5.uic.loadUi` for PySide(2)
class _UiLoader(Qt._QtUiTools.QUiLoader):
"""Create the user interface in a base instance.
Unlike `Qt._QtUiTools.QUiLoader` itself this class does not
create a new instance of the top-level widget, but creates the user
interface in an existing instance of the top-level class if needed.
This mimics the behaviour of `PyQt5.uic.loadUi`.
"""
def __init__(self, baseinstance):
super(_UiLoader, self).__init__(baseinstance)
self.baseinstance = baseinstance
self.custom_widgets = {}
def _loadCustomWidgets(self, etree):
"""
Workaround to pyside-77 bug.
From QUiLoader doc we should use registerCustomWidget method.
But this causes a segfault on some platforms.
Instead we fetch from customwidgets DOM node the python class
objects. Then we can directly use them in createWidget method.
"""
def headerToModule(header):
"""
Translate a header file to python module path
foo/bar.h => foo.bar
"""
# Remove header extension
module = os.path.splitext(header)[0]
# Replace os separator by python module separator
return module.replace("/", ".").replace("\\", ".")
custom_widgets = etree.find("customwidgets")
if custom_widgets is None:
return
for custom_widget in custom_widgets:
class_name = custom_widget.find("class").text
header = custom_widget.find("header").text
module = importlib.import_module(headerToModule(header))
self.custom_widgets[class_name] = getattr(module,
class_name)
def load(self, uifile, *args, **kwargs):
from xml.etree.ElementTree import ElementTree
# For whatever reason, if this doesn't happen then
# reading an invalid or non-existing .ui file throws
# a RuntimeError.
etree = ElementTree()
etree.parse(uifile)
self._loadCustomWidgets(etree)
widget = Qt._QtUiTools.QUiLoader.load(
self, uifile, *args, **kwargs)
# Workaround for PySide 1.0.9, see issue #208
widget.parentWidget()
return widget
def createWidget(self, class_name, parent=None, name=""):
"""Called for each widget defined in ui file
Overridden here to populate `baseinstance` instead.
"""
if parent is None and self.baseinstance:
# Supposed to create the top-level widget,
# return the base instance instead
return self.baseinstance
# For some reason, Line is not in the list of available
# widgets, but works fine, so we have to special case it here.
if class_name in self.availableWidgets() + ["Line"]:
# Create a new widget for child widgets
widget = Qt._QtUiTools.QUiLoader.createWidget(self,
class_name,
parent,
name)
elif class_name in self.custom_widgets:
widget = self.custom_widgets[class_name](parent=parent)
else:
raise Exception("Custom widget '%s' not supported"
% class_name)
if self.baseinstance:
# Set an attribute for the new child widget on the base
# instance, just like PyQt5.uic.loadUi does.
setattr(self.baseinstance, name, widget)
return widget
widget = _UiLoader(baseinstance).load(uifile)
Qt.QtCore.QMetaObject.connectSlotsByName(widget)
return widget
else:
raise NotImplementedError("No implementation available for loadUi")
"""Misplaced members
These members from the original submodule are misplaced relative PySide2
"""
_misplaced_members = {
"PySide2": {
"QtCore.QStringListModel": "QtCore.QStringListModel",
"QtGui.QStringListModel": "QtCore.QStringListModel",
"QtCore.Property": "QtCore.Property",
"QtCore.Signal": "QtCore.Signal",
"QtCore.Slot": "QtCore.Slot",
"QtCore.QAbstractProxyModel": "QtCore.QAbstractProxyModel",
"QtCore.QSortFilterProxyModel": "QtCore.QSortFilterProxyModel",
"QtCore.QItemSelection": "QtCore.QItemSelection",
"QtCore.QItemSelectionModel": "QtCore.QItemSelectionModel",
"QtCore.QItemSelectionRange": "QtCore.QItemSelectionRange",
"QtUiTools.QUiLoader": ["QtCompat.loadUi", _loadUi],
"shiboken2.wrapInstance": ["QtCompat.wrapInstance", _wrapinstance],
"shiboken2.getCppPointer": ["QtCompat.getCppPointer", _getcpppointer],
"shiboken2.isValid": ["QtCompat.isValid", _isvalid],
"QtWidgets.qApp": "QtWidgets.QApplication.instance()",
"QtCore.QCoreApplication.translate": [
"QtCompat.translate", _translate
],
"QtWidgets.QApplication.translate": [
"QtCompat.translate", _translate
],
"QtCore.qInstallMessageHandler": [
"QtCompat.qInstallMessageHandler", _qInstallMessageHandler
],
"QtWidgets.QStyleOptionViewItem": "QtCompat.QStyleOptionViewItemV4",
},
"PyQt5": {
"QtCore.pyqtProperty": "QtCore.Property",
"QtCore.pyqtSignal": "QtCore.Signal",
"QtCore.pyqtSlot": "QtCore.Slot",
"QtCore.QAbstractProxyModel": "QtCore.QAbstractProxyModel",
"QtCore.QSortFilterProxyModel": "QtCore.QSortFilterProxyModel",
"QtCore.QStringListModel": "QtCore.QStringListModel",
"QtCore.QItemSelection": "QtCore.QItemSelection",
"QtCore.QItemSelectionModel": "QtCore.QItemSelectionModel",
"QtCore.QItemSelectionRange": "QtCore.QItemSelectionRange",
"uic.loadUi": ["QtCompat.loadUi", _loadUi],
"sip.wrapinstance": ["QtCompat.wrapInstance", _wrapinstance],
"sip.unwrapinstance": ["QtCompat.getCppPointer", _getcpppointer],
"sip.isdeleted": ["QtCompat.isValid", _isvalid],
"QtWidgets.qApp": "QtWidgets.QApplication.instance()",
"QtCore.QCoreApplication.translate": [
"QtCompat.translate", _translate
],
"QtWidgets.QApplication.translate": [
"QtCompat.translate", _translate
],
"QtCore.qInstallMessageHandler": [
"QtCompat.qInstallMessageHandler", _qInstallMessageHandler
],
"QtWidgets.QStyleOptionViewItem": "QtCompat.QStyleOptionViewItemV4",
},
"PySide": {
"QtGui.QAbstractProxyModel": "QtCore.QAbstractProxyModel",
"QtGui.QSortFilterProxyModel": "QtCore.QSortFilterProxyModel",
"QtGui.QStringListModel": "QtCore.QStringListModel",
"QtGui.QItemSelection": "QtCore.QItemSelection",
"QtGui.QItemSelectionModel": "QtCore.QItemSelectionModel",
"QtCore.Property": "QtCore.Property",
"QtCore.Signal": "QtCore.Signal",
"QtCore.Slot": "QtCore.Slot",
"QtGui.QItemSelectionRange": "QtCore.QItemSelectionRange",
"QtGui.QAbstractPrintDialog": "QtPrintSupport.QAbstractPrintDialog",
"QtGui.QPageSetupDialog": "QtPrintSupport.QPageSetupDialog",
"QtGui.QPrintDialog": "QtPrintSupport.QPrintDialog",
"QtGui.QPrintEngine": "QtPrintSupport.QPrintEngine",
"QtGui.QPrintPreviewDialog": "QtPrintSupport.QPrintPreviewDialog",
"QtGui.QPrintPreviewWidget": "QtPrintSupport.QPrintPreviewWidget",
"QtGui.QPrinter": "QtPrintSupport.QPrinter",
"QtGui.QPrinterInfo": "QtPrintSupport.QPrinterInfo",
"QtUiTools.QUiLoader": ["QtCompat.loadUi", _loadUi],
"shiboken.wrapInstance": ["QtCompat.wrapInstance", _wrapinstance],
"shiboken.unwrapInstance": ["QtCompat.getCppPointer", _getcpppointer],
"shiboken.isValid": ["QtCompat.isValid", _isvalid],
"QtGui.qApp": "QtWidgets.QApplication.instance()",
"QtCore.QCoreApplication.translate": [
"QtCompat.translate", _translate
],
"QtGui.QApplication.translate": [
"QtCompat.translate", _translate
],
"QtCore.qInstallMsgHandler": [
"QtCompat.qInstallMessageHandler", _qInstallMessageHandler
],
"QtGui.QStyleOptionViewItemV4": "QtCompat.QStyleOptionViewItemV4",
},
"PyQt4": {
"QtGui.QAbstractProxyModel": "QtCore.QAbstractProxyModel",
"QtGui.QSortFilterProxyModel": "QtCore.QSortFilterProxyModel",
"QtGui.QItemSelection": "QtCore.QItemSelection",
"QtGui.QStringListModel": "QtCore.QStringListModel",
"QtGui.QItemSelectionModel": "QtCore.QItemSelectionModel",
"QtCore.pyqtProperty": "QtCore.Property",
"QtCore.pyqtSignal": "QtCore.Signal",
"QtCore.pyqtSlot": "QtCore.Slot",
"QtGui.QItemSelectionRange": "QtCore.QItemSelectionRange",
"QtGui.QAbstractPrintDialog": "QtPrintSupport.QAbstractPrintDialog",
"QtGui.QPageSetupDialog": "QtPrintSupport.QPageSetupDialog",
"QtGui.QPrintDialog": "QtPrintSupport.QPrintDialog",
"QtGui.QPrintEngine": "QtPrintSupport.QPrintEngine",
"QtGui.QPrintPreviewDialog": "QtPrintSupport.QPrintPreviewDialog",
"QtGui.QPrintPreviewWidget": "QtPrintSupport.QPrintPreviewWidget",
"QtGui.QPrinter": "QtPrintSupport.QPrinter",
"QtGui.QPrinterInfo": "QtPrintSupport.QPrinterInfo",
# "QtCore.pyqtSignature": "QtCore.Slot",
"uic.loadUi": ["QtCompat.loadUi", _loadUi],
"sip.wrapinstance": ["QtCompat.wrapInstance", _wrapinstance],
"sip.unwrapinstance": ["QtCompat.getCppPointer", _getcpppointer],
"sip.isdeleted": ["QtCompat.isValid", _isvalid],
"QtCore.QString": "str",
"QtGui.qApp": "QtWidgets.QApplication.instance()",
"QtCore.QCoreApplication.translate": [
"QtCompat.translate", _translate
],
"QtGui.QApplication.translate": [
"QtCompat.translate", _translate
],
"QtCore.qInstallMsgHandler": [
"QtCompat.qInstallMessageHandler", _qInstallMessageHandler
],
"QtGui.QStyleOptionViewItemV4": "QtCompat.QStyleOptionViewItemV4",
}
}
""" Compatibility Members
This dictionary is used to build Qt.QtCompat objects that provide a consistent
interface for obsolete members, and differences in binding return values.
{
"binding": {
"classname": {
"targetname": "binding_namespace",
}
}
}
"""
_compatibility_members = {
"PySide2": {
"QWidget": {
"grab": "QtWidgets.QWidget.grab",
},
"QHeaderView": {
"sectionsClickable": "QtWidgets.QHeaderView.sectionsClickable",
"setSectionsClickable":
"QtWidgets.QHeaderView.setSectionsClickable",
"sectionResizeMode": "QtWidgets.QHeaderView.sectionResizeMode",
"setSectionResizeMode":
"QtWidgets.QHeaderView.setSectionResizeMode",
"sectionsMovable": "QtWidgets.QHeaderView.sectionsMovable",
"setSectionsMovable": "QtWidgets.QHeaderView.setSectionsMovable",
},
"QFileDialog": {
"getOpenFileName": "QtWidgets.QFileDialog.getOpenFileName",
"getOpenFileNames": "QtWidgets.QFileDialog.getOpenFileNames",
"getSaveFileName": "QtWidgets.QFileDialog.getSaveFileName",
},
},
"PyQt5": {
"QWidget": {
"grab": "QtWidgets.QWidget.grab",
},
"QHeaderView": {
"sectionsClickable": "QtWidgets.QHeaderView.sectionsClickable",
"setSectionsClickable":
"QtWidgets.QHeaderView.setSectionsClickable",
"sectionResizeMode": "QtWidgets.QHeaderView.sectionResizeMode",
"setSectionResizeMode":
"QtWidgets.QHeaderView.setSectionResizeMode",
"sectionsMovable": "QtWidgets.QHeaderView.sectionsMovable",
"setSectionsMovable": "QtWidgets.QHeaderView.setSectionsMovable",
},
"QFileDialog": {
"getOpenFileName": "QtWidgets.QFileDialog.getOpenFileName",
"getOpenFileNames": "QtWidgets.QFileDialog.getOpenFileNames",
"getSaveFileName": "QtWidgets.QFileDialog.getSaveFileName",
},
},
"PySide": {
"QWidget": {
"grab": "QtWidgets.QPixmap.grabWidget",
},
"QHeaderView": {
"sectionsClickable": "QtWidgets.QHeaderView.isClickable",
"setSectionsClickable": "QtWidgets.QHeaderView.setClickable",
"sectionResizeMode": "QtWidgets.QHeaderView.resizeMode",
"setSectionResizeMode": "QtWidgets.QHeaderView.setResizeMode",
"sectionsMovable": "QtWidgets.QHeaderView.isMovable",
"setSectionsMovable": "QtWidgets.QHeaderView.setMovable",
},
"QFileDialog": {
"getOpenFileName": "QtWidgets.QFileDialog.getOpenFileName",
"getOpenFileNames": "QtWidgets.QFileDialog.getOpenFileNames",
"getSaveFileName": "QtWidgets.QFileDialog.getSaveFileName",
},
},
"PyQt4": {
"QWidget": {
"grab": "QtWidgets.QPixmap.grabWidget",
},
"QHeaderView": {
"sectionsClickable": "QtWidgets.QHeaderView.isClickable",
"setSectionsClickable": "QtWidgets.QHeaderView.setClickable",
"sectionResizeMode": "QtWidgets.QHeaderView.resizeMode",
"setSectionResizeMode": "QtWidgets.QHeaderView.setResizeMode",
"sectionsMovable": "QtWidgets.QHeaderView.isMovable",
"setSectionsMovable": "QtWidgets.QHeaderView.setMovable",
},
"QFileDialog": {
"getOpenFileName": "QtWidgets.QFileDialog.getOpenFileName",
"getOpenFileNames": "QtWidgets.QFileDialog.getOpenFileNames",
"getSaveFileName": "QtWidgets.QFileDialog.getSaveFileName",
},
},
}
def _apply_site_config():
try:
import QtSiteConfig
except ImportError:
# If no QtSiteConfig module found, no modifications
# to _common_members are needed.
pass
else:
# Provide the ability to modify the dicts used to build Qt.py
if hasattr(QtSiteConfig, 'update_members'):
QtSiteConfig.update_members(_common_members)
if hasattr(QtSiteConfig, 'update_misplaced_members'):
QtSiteConfig.update_misplaced_members(members=_misplaced_members)
if hasattr(QtSiteConfig, 'update_compatibility_members'):
QtSiteConfig.update_compatibility_members(
members=_compatibility_members)
def _new_module(name):
return types.ModuleType(__name__ + "." + name)
def _import_sub_module(module, name):
"""import_sub_module will mimic the function of importlib.import_module"""
module = __import__(module.__name__ + "." + name)
for level in name.split("."):
module = getattr(module, level)
return module
def _setup(module, extras):
"""Install common submodules"""
Qt.__binding__ = module.__name__
def _warn_import_error(exc, module):
msg = str(exc)
if "No module named" in msg:
return
_warn("ImportError(%s): %s" % (module, msg))
for name in list(_common_members) + extras:
try:
submodule = _import_sub_module(
module, name)
except ImportError as e:
try:
# For extra modules like sip and shiboken that may not be
# children of the binding.
submodule = __import__(name)
except ImportError as e2:
_warn_import_error(e, name)
_warn_import_error(e2, name)
continue
setattr(Qt, "_" + name, submodule)
if name not in extras:
# Store reference to original binding,
# but don't store speciality modules
# such as uic or QtUiTools
setattr(Qt, name, _new_module(name))
def _reassign_misplaced_members(binding):
"""Apply misplaced members from `binding` to Qt.py
Arguments:
binding (dict): Misplaced members
"""
for src, dst in _misplaced_members[binding].items():
dst_value = None
src_parts = src.split(".")
src_module = src_parts[0]
src_member = None
if len(src_parts) > 1:
src_member = src_parts[1:]
if isinstance(dst, (list, tuple)):
dst, dst_value = dst
dst_parts = dst.split(".")
dst_module = dst_parts[0]
dst_member = None
if len(dst_parts) > 1:
dst_member = dst_parts[1]
# Get the member we want to store in the namesapce.
if not dst_value:
try:
_part = getattr(Qt, "_" + src_module)
while src_member:
member = src_member.pop(0)
_part = getattr(_part, member)
dst_value = _part
except AttributeError:
# If the member we want to store in the namespace does not
# exist, there is no need to continue. This can happen if a
# request was made to rename a member that didn't exist, for
# example if QtWidgets isn't available on the target platform.
_log("Misplaced member has no source: {0}".format(src))
continue
try:
src_object = getattr(Qt, dst_module)
except AttributeError:
if dst_module not in _common_members:
# Only create the Qt parent module if its listed in
# _common_members. Without this check, if you remove QtCore
# from _common_members, the default _misplaced_members will add
# Qt.QtCore so it can add Signal, Slot, etc.
msg = 'Not creating missing member module "{m}" for "{c}"'
_log(msg.format(m=dst_module, c=dst_member))
continue
# If the dst is valid but the Qt parent module does not exist
# then go ahead and create a new module to contain the member.
setattr(Qt, dst_module, _new_module(dst_module))
src_object = getattr(Qt, dst_module)
# Enable direct import of the new module
sys.modules[__name__ + "." + dst_module] = src_object
if not dst_value:
dst_value = getattr(Qt, "_" + src_module)
if src_member:
dst_value = getattr(dst_value, src_member)
setattr(
src_object,
dst_member or dst_module,
dst_value
)
def _build_compatibility_members(binding, decorators=None):
"""Apply `binding` to QtCompat
Arguments:
binding (str): Top level binding in _compatibility_members.
decorators (dict, optional): Provides the ability to decorate the
original Qt methods when needed by a binding. This can be used
to change the returned value to a standard value. The key should
be the classname, the value is a dict where the keys are the
target method names, and the values are the decorator functions.
"""
decorators = decorators or dict()
# Allow optional site-level customization of the compatibility members.
# This method does not need to be implemented in QtSiteConfig.
try:
import QtSiteConfig
except ImportError:
pass
else:
if hasattr(QtSiteConfig, 'update_compatibility_decorators'):
QtSiteConfig.update_compatibility_decorators(binding, decorators)
_QtCompat = type("QtCompat", (object,), {})
for classname, bindings in _compatibility_members[binding].items():
attrs = {}
for target, binding in bindings.items():
namespaces = binding.split('.')
try:
src_object = getattr(Qt, "_" + namespaces[0])
except AttributeError as e:
_log("QtCompat: AttributeError: %s" % e)
# Skip reassignment of non-existing members.
# This can happen if a request was made to
# rename a member that didn't exist, for example
# if QtWidgets isn't available on the target platform.
continue
# Walk down any remaining namespace getting the object assuming
# that if the first namespace exists the rest will exist.
for namespace in namespaces[1:]:
src_object = getattr(src_object, namespace)
# decorate the Qt method if a decorator was provided.
if target in decorators.get(classname, []):
# staticmethod must be called on the decorated method to
# prevent a TypeError being raised when the decorated method
# is called.
src_object = staticmethod(
decorators[classname][target](src_object))
attrs[target] = src_object
# Create the QtCompat class and install it into the namespace
compat_class = type(classname, (_QtCompat,), attrs)
setattr(Qt.QtCompat, classname, compat_class)
def _pyside2():
"""Initialise PySide2
These functions serve to test the existence of a binding
along with set it up in such a way that it aligns with
the final step; adding members from the original binding
to Qt.py
"""
import PySide2 as module
extras = ["QtUiTools"]
try:
try:
# Before merge of PySide and shiboken
import shiboken2
except ImportError:
# After merge of PySide and shiboken, May 2017
from PySide2 import shiboken2
extras.append("shiboken2")
except ImportError:
pass
_setup(module, extras)
Qt.__binding_version__ = module.__version__
if hasattr(Qt, "_shiboken2"):
Qt.QtCompat.wrapInstance = _wrapinstance
Qt.QtCompat.getCppPointer = _getcpppointer
Qt.QtCompat.delete = shiboken2.delete
if hasattr(Qt, "_QtUiTools"):
Qt.QtCompat.loadUi = _loadUi
if hasattr(Qt, "_QtCore"):
Qt.__qt_version__ = Qt._QtCore.qVersion()
Qt.QtCompat.dataChanged = (
lambda self, topleft, bottomright, roles=None:
self.dataChanged.emit(topleft, bottomright, roles or [])
)
if hasattr(Qt, "_QtWidgets"):
Qt.QtCompat.setSectionResizeMode = \
Qt._QtWidgets.QHeaderView.setSectionResizeMode
_reassign_misplaced_members("PySide2")
_build_compatibility_members("PySide2")
def _pyside():
"""Initialise PySide"""
import PySide as module
extras = ["QtUiTools"]
try:
try:
# Before merge of PySide and shiboken
import shiboken
except ImportError:
# After merge of PySide and shiboken, May 2017
from PySide import shiboken
extras.append("shiboken")
except ImportError:
pass
_setup(module, extras)
Qt.__binding_version__ = module.__version__
if hasattr(Qt, "_shiboken"):
Qt.QtCompat.wrapInstance = _wrapinstance
Qt.QtCompat.getCppPointer = _getcpppointer
Qt.QtCompat.delete = shiboken.delete
if hasattr(Qt, "_QtUiTools"):
Qt.QtCompat.loadUi = _loadUi
if hasattr(Qt, "_QtGui"):
setattr(Qt, "QtWidgets", _new_module("QtWidgets"))
setattr(Qt, "_QtWidgets", Qt._QtGui)
if hasattr(Qt._QtGui, "QX11Info"):
setattr(Qt, "QtX11Extras", _new_module("QtX11Extras"))
Qt.QtX11Extras.QX11Info = Qt._QtGui.QX11Info
Qt.QtCompat.setSectionResizeMode = Qt._QtGui.QHeaderView.setResizeMode
if hasattr(Qt, "_QtCore"):
Qt.__qt_version__ = Qt._QtCore.qVersion()
Qt.QtCompat.dataChanged = (
lambda self, topleft, bottomright, roles=None:
self.dataChanged.emit(topleft, bottomright)
)
_reassign_misplaced_members("PySide")
_build_compatibility_members("PySide")
def _pyqt5():
"""Initialise PyQt5"""
import PyQt5 as module
extras = ["uic"]
try:
# Relevant to PyQt5 5.11 and above
from PyQt5 import sip
extras += ["sip"]
except ImportError:
try:
import sip
extras += ["sip"]
except ImportError:
sip = None
_setup(module, extras)
if hasattr(Qt, "_sip"):
Qt.QtCompat.wrapInstance = _wrapinstance
Qt.QtCompat.getCppPointer = _getcpppointer
Qt.QtCompat.delete = sip.delete
if hasattr(Qt, "_uic"):
Qt.QtCompat.loadUi = _loadUi
if hasattr(Qt, "_QtCore"):
Qt.__binding_version__ = Qt._QtCore.PYQT_VERSION_STR
Qt.__qt_version__ = Qt._QtCore.QT_VERSION_STR
Qt.QtCompat.dataChanged = (
lambda self, topleft, bottomright, roles=None:
self.dataChanged.emit(topleft, bottomright, roles or [])
)
if hasattr(Qt, "_QtWidgets"):
Qt.QtCompat.setSectionResizeMode = \
Qt._QtWidgets.QHeaderView.setSectionResizeMode
_reassign_misplaced_members("PyQt5")
_build_compatibility_members('PyQt5')
def _pyqt4():
"""Initialise PyQt4"""
import sip
# Validation of envivornment variable. Prevents an error if
# the variable is invalid since it's just a hint.
try:
hint = int(QT_SIP_API_HINT)
except TypeError:
hint = None # Variable was None, i.e. not set.
except ValueError:
raise ImportError("QT_SIP_API_HINT=%s must be a 1 or 2")
for api in ("QString",
"QVariant",
"QDate",
"QDateTime",
"QTextStream",
"QTime",
"QUrl"):
try:
sip.setapi(api, hint or 2)
except AttributeError:
raise ImportError("PyQt4 < 4.6 isn't supported by Qt.py")
except ValueError:
actual = sip.getapi(api)
if not hint:
raise ImportError("API version already set to %d" % actual)
else:
# Having provided a hint indicates a soft constraint, one
# that doesn't throw an exception.
sys.stderr.write(
"Warning: API '%s' has already been set to %d.\n"
% (api, actual)
)
import PyQt4 as module
extras = ["uic"]
try:
import sip
extras.append(sip.__name__)
except ImportError:
sip = None
_setup(module, extras)
if hasattr(Qt, "_sip"):
Qt.QtCompat.wrapInstance = _wrapinstance
Qt.QtCompat.getCppPointer = _getcpppointer
Qt.QtCompat.delete = sip.delete
if hasattr(Qt, "_uic"):
Qt.QtCompat.loadUi = _loadUi
if hasattr(Qt, "_QtGui"):
setattr(Qt, "QtWidgets", _new_module("QtWidgets"))
setattr(Qt, "_QtWidgets", Qt._QtGui)
if hasattr(Qt._QtGui, "QX11Info"):
setattr(Qt, "QtX11Extras", _new_module("QtX11Extras"))
Qt.QtX11Extras.QX11Info = Qt._QtGui.QX11Info
Qt.QtCompat.setSectionResizeMode = \
Qt._QtGui.QHeaderView.setResizeMode
if hasattr(Qt, "_QtCore"):
Qt.__binding_version__ = Qt._QtCore.PYQT_VERSION_STR
Qt.__qt_version__ = Qt._QtCore.QT_VERSION_STR
Qt.QtCompat.dataChanged = (
lambda self, topleft, bottomright, roles=None:
self.dataChanged.emit(topleft, bottomright)
)
_reassign_misplaced_members("PyQt4")
# QFileDialog QtCompat decorator
def _standardizeQFileDialog(some_function):
"""Decorator that makes PyQt4 return conform to other bindings"""
def wrapper(*args, **kwargs):
ret = (some_function(*args, **kwargs))
# PyQt4 only returns the selected filename, force it to a
# standard return of the selected filename, and a empty string
# for the selected filter
return ret, ''
wrapper.__doc__ = some_function.__doc__
wrapper.__name__ = some_function.__name__
return wrapper
decorators = {
"QFileDialog": {
"getOpenFileName": _standardizeQFileDialog,
"getOpenFileNames": _standardizeQFileDialog,
"getSaveFileName": _standardizeQFileDialog,
}
}
_build_compatibility_members('PyQt4', decorators)
def _none():
"""Internal option (used in installer)"""
Mock = type("Mock", (), {"__getattr__": lambda Qt, attr: None})
Qt.__binding__ = "None"
Qt.__qt_version__ = "0.0.0"
Qt.__binding_version__ = "0.0.0"
Qt.QtCompat.loadUi = lambda uifile, baseinstance=None: None
Qt.QtCompat.setSectionResizeMode = lambda *args, **kwargs: None
for submodule in _common_members.keys():
setattr(Qt, submodule, Mock())
setattr(Qt, "_" + submodule, Mock())
def _log(text):
if QT_VERBOSE:
sys.stdout.write("Qt.py [info]: %s\n" % text)
def _warn(text):
try:
sys.stderr.write("Qt.py [warning]: %s\n" % text)
except UnicodeDecodeError:
import locale
encoding = locale.getpreferredencoding()
sys.stderr.write("Qt.py [warning]: %s\n" % text.decode(encoding))
def _convert(lines):
"""Convert compiled .ui file from PySide2 to Qt.py
Arguments:
lines (list): Each line of of .ui file
Usage:
>> with open("myui.py") as f:
.. lines = _convert(f.readlines())
"""
def parse(line):
line = line.replace("from PySide2 import", "from Qt import QtCompat,")
line = line.replace("QtWidgets.QApplication.translate",
"QtCompat.translate")
if "QtCore.SIGNAL" in line:
raise NotImplementedError("QtCore.SIGNAL is missing from PyQt5 "
"and so Qt.py does not support it: you "
"should avoid defining signals inside "
"your ui files.")
return line
parsed = list()
for line in lines:
line = parse(line)
parsed.append(line)
return parsed
def _cli(args):
"""Qt.py command-line interface"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--convert",
help="Path to compiled Python module, e.g. my_ui.py")
parser.add_argument("--compile",
help="Accept raw .ui file and compile with native "
"PySide2 compiler.")
parser.add_argument("--stdout",
help="Write to stdout instead of file",
action="store_true")
parser.add_argument("--stdin",
help="Read from stdin instead of file",
action="store_true")
args = parser.parse_args(args)
if args.stdout:
raise NotImplementedError("--stdout")
if args.stdin:
raise NotImplementedError("--stdin")
if args.compile:
raise NotImplementedError("--compile")
if args.convert:
sys.stdout.write("#\n"
"# WARNING: --convert is an ALPHA feature.\n#\n"
"# See https://github.com/mottosso/Qt.py/pull/132\n"
"# for details.\n"
"#\n")
#
# ------> Read
#
with open(args.convert) as f:
lines = _convert(f.readlines())
backup = "%s_backup%s" % os.path.splitext(args.convert)
sys.stdout.write("Creating \"%s\"..\n" % backup)
shutil.copy(args.convert, backup)
#
# <------ Write
#
with open(args.convert, "w") as f:
f.write("".join(lines))
sys.stdout.write("Successfully converted \"%s\"\n" % args.convert)
class MissingMember(object):
"""
A placeholder type for a missing Qt object not
included in Qt.py
Args:
name (str): The name of the missing type
details (str): An optional custom error message
"""
ERR_TMPL = ("{} is not a common object across PySide2 "
"and the other Qt bindings. It is not included "
"as a common member in the Qt.py layer")
def __init__(self, name, details=''):
self.__name = name
self.__err = self.ERR_TMPL.format(name)
if details:
self.__err = "{}: {}".format(self.__err, details)
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self.__name)
def __getattr__(self, name):
raise NotImplementedError(self.__err)
def __call__(self, *a, **kw):
raise NotImplementedError(self.__err)
def _install():
# Default order (customize order and content via QT_PREFERRED_BINDING)
default_order = ("PySide2", "PyQt5", "PySide", "PyQt4")
preferred_order = None
if QT_PREFERRED_BINDING_JSON:
# A per-vendor preferred binding customization was defined
# This should be a dictionary of the full Qt.py module namespace to
# apply binding settings to. The "default" key can be used to apply
# custom bindings to all modules not explicitly defined. If the json
# data is invalid this will raise a exception.
# Example:
# {"mylibrary.vendor.Qt": ["PySide2"], "default":["PyQt5","PyQt4"]}
try:
preferred_bindings = json.loads(QT_PREFERRED_BINDING_JSON)
except ValueError:
# Python 2 raises ValueError, Python 3 raises json.JSONDecodeError
# a subclass of ValueError
_warn("Failed to parse QT_PREFERRED_BINDING_JSON='%s'"
% QT_PREFERRED_BINDING_JSON)
_warn("Falling back to default preferred order")
else:
preferred_order = preferred_bindings.get(__name__)
# If no matching binding was used, optionally apply a default.
if preferred_order is None:
preferred_order = preferred_bindings.get("default", None)
if preferred_order is None:
# If a json preferred binding was not used use, respect the
# QT_PREFERRED_BINDING environment variable if defined.
preferred_order = list(
b for b in QT_PREFERRED_BINDING.split(os.pathsep) if b
)
order = preferred_order or default_order
available = {
"PySide2": _pyside2,
"PyQt5": _pyqt5,
"PySide": _pyside,
"PyQt4": _pyqt4,
"None": _none
}
_log("Order: '%s'" % "', '".join(order))
# Allow site-level customization of the available modules.
_apply_site_config()
found_binding = False
for name in order:
_log("Trying %s" % name)
try:
available[name]()
found_binding = True
break
except ImportError as e:
_log("ImportError: %s" % e)
except KeyError:
_log("ImportError: Preferred binding '%s' not found." % name)
if not found_binding:
# If not binding were found, throw this error
raise ImportError("No Qt binding were found.")
# Install individual members
for name, members in _common_members.items():
try:
their_submodule = getattr(Qt, "_%s" % name)
except AttributeError:
continue
our_submodule = getattr(Qt, name)
# Enable import *
__all__.append(name)
# Enable direct import of submodule,
# e.g. import Qt.QtCore
sys.modules[__name__ + "." + name] = our_submodule
for member in members:
# Accept that a submodule may miss certain members.
try:
their_member = getattr(their_submodule, member)
except AttributeError:
_log("'%s.%s' was missing." % (name, member))
continue
setattr(our_submodule, member, their_member)
# Install missing member placeholders
for name, members in _missing_members.items():
our_submodule = getattr(Qt, name)
for member in members:
# If the submodule already has this member installed,
# either by the common members, or the site config,
# then skip installing this one over it.
if hasattr(our_submodule, member):
continue
placeholder = MissingMember("{}.{}".format(name, member),
details=members[member])
setattr(our_submodule, member, placeholder)
# Enable direct import of QtCompat
sys.modules[__name__ + ".QtCompat"] = Qt.QtCompat
# Backwards compatibility
if hasattr(Qt.QtCompat, 'loadUi'):
Qt.QtCompat.load_ui = Qt.QtCompat.loadUi
_install()
# Setup Binding Enum states
Qt.IsPySide2 = Qt.__binding__ == 'PySide2'
Qt.IsPyQt5 = Qt.__binding__ == 'PyQt5'
Qt.IsPySide = Qt.__binding__ == 'PySide'
Qt.IsPyQt4 = Qt.__binding__ == 'PyQt4'
"""Augment QtCompat
QtCompat contains wrappers and added functionality
to the original bindings, such as the CLI interface
and otherwise incompatible members between bindings,
such as `QHeaderView.setSectionResizeMode`.
"""
Qt.QtCompat._cli = _cli
Qt.QtCompat._convert = _convert
# Enable command-line interface
if __name__ == "__main__":
_cli(sys.argv[1:])
# The MIT License (MIT)
#
# Copyright (c) 2016-2017 Marcus Ottosson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# In PySide(2), loadUi does not exist, so we implement it
#
# `_UiLoader` is adapted from the qtpy project, which was further influenced
# by qt-helpers which was released under a 3-clause BSD license which in turn
# is based on a solution at:
#
# - https://gist.github.com/cpbotha/1b42a20c8f3eb9bb7cb8
#
# The License for this code is as follows:
#
# qt-helpers - a common front-end to various Qt modules
#
# Copyright (c) 2015, Chris Beaumont and Thomas Robitaille
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Glue project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Which itself was based on the solution at
#
# https://gist.github.com/cpbotha/1b42a20c8f3eb9bb7cb8
#
# which was released under the MIT license:
#
# Copyright (c) 2011 Sebastian Wiesner <lunaryorn@gmail.com>
# Modifications by Charl Botha <cpbotha@vxlabs.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files
# (the "Software"),to deal in the Software without restriction,
# including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
bohdon/maya-pulse
|
src/pulse/scripts/pulse/vendor/Qt/__init__.py
|
Python
|
mit
| 66,075
|
[
"VisIt"
] |
23844db5bb99e6265c91aa4951a95eb24021ee576fe806912c032b84cf1bc4f9
|
from PyOpenWorm.neuron import Neuron
from PyOpenWorm.data import DataUser
import neuroml as N
class NeuroML(DataUser):
@classmethod
def generate(cls, o, t=2):
"""
Get a NeuroML object that represents the given object. The ``type``
determines what content is included in the NeuroML object:
:param o: The object to generate neuroml from
:param t: The what kind of content should be included in the document
- 0=full morphology+biophysics
- 1=cell body only+biophysics
- 2=full morphology only
:returns: A NeuroML object that represents the given object.
:rtype: NeuroMLDocument
"""
if isinstance(o, Neuron):
# read in the morphology data
d = N.NeuroMLDocument(id=o.name())
c = N.Cell(id=o.name())
c.morphology = o.morphology()
d.cells.append(c)
return d
else:
raise "Not a valid object for conversion to neuroml"
@classmethod
def write(cls, o, n):
"""
Write the given neuroml document object out to a file
:param o: The NeuroMLDocument to write
:param n: The name of the file to write to
"""
N.writers.NeuroMLWriter.write(o, n)
@classmethod
def validate(cls, o):
pass
|
gsarma/PyOpenWorm
|
PyOpenWorm/my_neuroml.py
|
Python
|
mit
| 1,377
|
[
"NEURON"
] |
626036e29a8987e66a8393c3c0c9015286c5332639a550334f3cf34eef13f72e
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import ctypes
import copy
import numpy
from pyscf import lib
from pyscf import gto
import pyscf.df
from pyscf.scf import _vhf
from pyscf.pbc.gto import _pbcintor
from pyscf.pbc.lib.kpts_helper import is_zero, gamma_point, unique, KPT_DIFF_TOL
libpbc = lib.load_library('libpbc')
def make_auxmol(cell, auxbasis=None):
'''
See pyscf.df.addons.make_auxmol
'''
auxcell = pyscf.df.addons.make_auxmol(cell, auxbasis)
auxcell.rcut = max([auxcell.bas_rcut(ib, cell.precision)
for ib in range(auxcell.nbas)])
return auxcell
make_auxcell = make_auxmol
def format_aux_basis(cell, auxbasis='weigend+etb'):
'''For backward compatibility'''
return make_auxmol(cell, auxbasis)
def aux_e2(cell, auxcell_or_auxbasis, intor='int3c2e', aosym='s1', comp=None,
kptij_lst=numpy.zeros((1,2,3)), shls_slice=None, **kwargs):
r'''3-center AO integrals (ij|L) with double lattice sum:
\sum_{lm} (i[l]j[m]|L[0]), where L is the auxiliary basis.
Returns:
(nao_pair, naux) array
'''
if isinstance(auxcell_or_auxbasis, gto.Mole):
auxcell = auxcell_or_auxbasis
else:
auxcell = make_auxcell(cell, auxcell_or_auxbasis)
# For some unkown reasons, the pre-decontracted basis 'is slower than
# if shls_slice is None and cell.nao_nr() < 200:
## Slighly decontract basis. The decontracted basis has better locality.
## The locality can be used in the lattice sum to reduce cost.
# cell, contr_coeff = pbcgto.cell._split_basis(cell)
# else:
# contr_coeff = None
intor, comp = gto.moleintor._get_intor_and_comp(cell._add_suffix(intor), comp)
if shls_slice is None:
shls_slice = (0, cell.nbas, 0, cell.nbas, 0, auxcell.nbas)
ao_loc = cell.ao_loc_nr()
aux_loc = auxcell.ao_loc_nr(auxcell.cart or 'ssc' in intor)[:shls_slice[5]+1]
ni = ao_loc[shls_slice[1]] - ao_loc[shls_slice[0]]
nj = ao_loc[shls_slice[3]] - ao_loc[shls_slice[2]]
naux = aux_loc[shls_slice[5]] - aux_loc[shls_slice[4]]
nkptij = len(kptij_lst)
kpti = kptij_lst[:,0]
kptj = kptij_lst[:,1]
j_only = is_zero(kpti-kptj)
if j_only and aosym[:2] == 's2':
assert(shls_slice[2] == 0)
nao_pair = (ao_loc[shls_slice[1]]*(ao_loc[shls_slice[1]]+1)//2 -
ao_loc[shls_slice[0]]*(ao_loc[shls_slice[0]]+1)//2)
else:
nao_pair = ni * nj
if gamma_point(kptij_lst):
dtype = numpy.double
else:
dtype = numpy.complex128
int3c = wrap_int3c(cell, auxcell, intor, aosym, comp, kptij_lst, **kwargs)
out = numpy.empty((nkptij,comp,nao_pair,naux), dtype=dtype)
out = int3c(shls_slice, out)
# if contr_coeff is not None:
# if aosym == 's2':
# tmp = out.reshape(nkptij,comp,ni,ni,naux)
# idx, idy = numpy.tril_indices(ni)
# tmp[:,:,idy,idx] = out.conj()
# tmp[:,:,idx,idy] = out
# out, tmp = tmp, None
# out = lib.einsum('kcpql,pi->kciql', out, contr_coeff)
# out = lib.einsum('kciql,qj->kcijl', out, contr_coeff)
# idx, idy = numpy.tril_indices(contr_coeff.shape[1])
# out = out[:,:,idx,idy]
# else:
# out = out.reshape(nkptij,comp,ni,nj,naux)
# out = lib.einsum('kcpql,pi->kciql', out, contr_coeff)
# out = lib.einsum('kciql,qj->kcijl', out, contr_coeff)
# out = out.reshape(nkptij,comp,-1,naux)
if comp == 1:
out = out[:,0]
if nkptij == 1:
out = out[0]
return out
def wrap_int3c(cell, auxcell, intor='int3c2e', aosym='s1', comp=1,
kptij_lst=numpy.zeros((1,2,3)), cintopt=None, pbcopt=None):
intor = cell._add_suffix(intor)
pcell = copy.copy(cell)
pcell._atm, pcell._bas, pcell._env = \
atm, bas, env = gto.conc_env(cell._atm, cell._bas, cell._env,
cell._atm, cell._bas, cell._env)
ao_loc = gto.moleintor.make_loc(bas, intor)
aux_loc = auxcell.ao_loc_nr(auxcell.cart or 'ssc' in intor)
ao_loc = numpy.asarray(numpy.hstack([ao_loc, ao_loc[-1]+aux_loc[1:]]),
dtype=numpy.int32)
atm, bas, env = gto.conc_env(atm, bas, env,
auxcell._atm, auxcell._bas, auxcell._env)
rcut = max(cell.rcut, auxcell.rcut)
Ls = cell.get_lattice_Ls(rcut=rcut)
nimgs = len(Ls)
nbas = cell.nbas
kpti = kptij_lst[:,0]
kptj = kptij_lst[:,1]
if gamma_point(kptij_lst):
kk_type = 'g'
nkpts = nkptij = 1
kptij_idx = numpy.array([0], dtype=numpy.int32)
expkL = numpy.ones(1)
elif is_zero(kpti-kptj): # j_only
kk_type = 'k'
kpts = kptij_idx = numpy.asarray(kpti, order='C')
expkL = numpy.exp(1j * numpy.dot(kpts, Ls.T))
nkpts = nkptij = len(kpts)
else:
kk_type = 'kk'
kpts = unique(numpy.vstack([kpti,kptj]))[0]
expkL = numpy.exp(1j * numpy.dot(kpts, Ls.T))
wherei = numpy.where(abs(kpti.reshape(-1,1,3)-kpts).sum(axis=2) < KPT_DIFF_TOL)[1]
wherej = numpy.where(abs(kptj.reshape(-1,1,3)-kpts).sum(axis=2) < KPT_DIFF_TOL)[1]
nkpts = len(kpts)
kptij_idx = numpy.asarray(wherei*nkpts+wherej, dtype=numpy.int32)
nkptij = len(kptij_lst)
fill = 'PBCnr3c_fill_%s%s' % (kk_type, aosym[:2])
drv = libpbc.PBCnr3c_drv
if cintopt is None:
if nbas > 0:
cintopt = _vhf.make_cintopt(atm, bas, env, intor)
else:
cintopt = lib.c_null_ptr()
# Remove the precomputed pair data because the pair data corresponds to the
# integral of cell #0 while the lattice sum moves shls to all repeated images.
if intor[:3] != 'ECP':
libpbc.CINTdel_pairdata_optimizer(cintopt)
if pbcopt is None:
pbcopt = _pbcintor.PBCOpt(pcell).init_rcut_cond(pcell)
if isinstance(pbcopt, _pbcintor.PBCOpt):
cpbcopt = pbcopt._this
else:
cpbcopt = lib.c_null_ptr()
def int3c(shls_slice, out):
shls_slice = (shls_slice[0], shls_slice[1],
nbas+shls_slice[2], nbas+shls_slice[3],
nbas*2+shls_slice[4], nbas*2+shls_slice[5])
drv(getattr(libpbc, intor), getattr(libpbc, fill),
out.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nkptij), ctypes.c_int(nkpts),
ctypes.c_int(comp), ctypes.c_int(nimgs),
Ls.ctypes.data_as(ctypes.c_void_p),
expkL.ctypes.data_as(ctypes.c_void_p),
kptij_idx.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*6)(*shls_slice),
ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, cpbcopt,
atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(cell.natm),
bas.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nbas), # need to pass cell.nbas to libpbc.PBCnr3c_drv
env.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(env.size))
return out
return int3c
def fill_2c2e(cell, auxcell_or_auxbasis, intor='int2c2e', hermi=0, kpt=numpy.zeros(3)):
'''2-center 2-electron AO integrals (L|ij), where L is the auxiliary basis.
'''
if isinstance(auxcell_or_auxbasis, gto.Mole):
auxcell = auxcell_or_auxbasis
else:
auxcell = make_auxcell(cell, auxcell_or_auxbasis)
if hermi != 0:
hermi = pyscf.lib.HERMITIAN
# pbcopt use the value of AO-pair to prescreening PBC integrals in the lattice
# summation. Pass NULL pointer to pbcopt to prevent the prescreening
return auxcell.pbc_intor(intor, 1, hermi, kpt, pbcopt=lib.c_null_ptr())
|
sunqm/pyscf
|
pyscf/pbc/df/incore.py
|
Python
|
apache-2.0
| 8,293
|
[
"PySCF"
] |
71b169a413ba38823afdeda367322b37fef3a94805c4716cfe3019424d33e954
|
# encoding: utf-8
"""
An application for IPython.
All top-level applications should use the classes in this module for
handling configuration and creating componenets.
The job of an :class:`Application` is to create the master configuration
object and then create the configurable objects, passing the config to them.
Authors:
* Brian Granger
* Fernando Perez
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import atexit
import glob
import logging
import os
import shutil
import sys
from IPython.config.application import Application, catch_config_error
from IPython.config.loader import ConfigFileNotFound
from IPython.core import release, crashhandler
from IPython.core.profiledir import ProfileDir, ProfileDirError
from IPython.utils import py3compat
from IPython.utils.path import get_ipython_dir, get_ipython_package_dir
from IPython.utils.traitlets import List, Unicode, Type, Bool, Dict, Set, Instance
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Base Application Class
#-----------------------------------------------------------------------------
# aliases and flags
base_aliases = {
'profile-dir' : 'ProfileDir.location',
'profile' : 'BaseIPythonApplication.profile',
'ipython-dir' : 'BaseIPythonApplication.ipython_dir',
'log-level' : 'Application.log_level',
'config' : 'BaseIPythonApplication.extra_config_file',
}
base_flags = dict(
debug = ({'Application' : {'log_level' : logging.DEBUG}},
"set log level to logging.DEBUG (maximize logging output)"),
quiet = ({'Application' : {'log_level' : logging.CRITICAL}},
"set log level to logging.CRITICAL (minimize logging output)"),
init = ({'BaseIPythonApplication' : {
'copy_config_files' : True,
'auto_create' : True}
}, """Initialize profile with default config files. This is equivalent
to running `ipython profile create <profile>` prior to startup.
""")
)
class BaseIPythonApplication(Application):
name = Unicode(u'ipython')
description = Unicode(u'IPython: an enhanced interactive Python shell.')
version = Unicode(release.version)
aliases = Dict(base_aliases)
flags = Dict(base_flags)
classes = List([ProfileDir])
# Track whether the config_file has changed,
# because some logic happens only if we aren't using the default.
config_file_specified = Set()
config_file_name = Unicode()
def _config_file_name_default(self):
return self.name.replace('-','_') + u'_config.py'
def _config_file_name_changed(self, name, old, new):
if new != old:
self.config_file_specified.add(new)
# The directory that contains IPython's builtin profiles.
builtin_profile_dir = Unicode(
os.path.join(get_ipython_package_dir(), u'config', u'profile', u'default')
)
config_file_paths = List(Unicode)
def _config_file_paths_default(self):
return [os.getcwdu()]
extra_config_file = Unicode(config=True,
help="""Path to an extra config file to load.
If specified, load this config file in addition to any other IPython config.
""")
def _extra_config_file_changed(self, name, old, new):
try:
self.config_files.remove(old)
except ValueError:
pass
self.config_file_specified.add(new)
self.config_files.append(new)
profile = Unicode(u'default', config=True,
help="""The IPython profile to use."""
)
def _profile_changed(self, name, old, new):
self.builtin_profile_dir = os.path.join(
get_ipython_package_dir(), u'config', u'profile', new
)
ipython_dir = Unicode(get_ipython_dir(), config=True,
help="""
The name of the IPython directory. This directory is used for logging
configuration (through profiles), history storage, etc. The default
is usually $HOME/.ipython. This options can also be specified through
the environment variable IPYTHONDIR.
"""
)
_in_init_profile_dir = False
profile_dir = Instance(ProfileDir)
def _profile_dir_default(self):
# avoid recursion
if self._in_init_profile_dir:
return
# profile_dir requested early, force initialization
self.init_profile_dir()
return self.profile_dir
overwrite = Bool(False, config=True,
help="""Whether to overwrite existing config files when copying""")
auto_create = Bool(False, config=True,
help="""Whether to create profile dir if it doesn't exist""")
config_files = List(Unicode)
def _config_files_default(self):
return [self.config_file_name]
copy_config_files = Bool(False, config=True,
help="""Whether to install the default config files into the profile dir.
If a new profile is being created, and IPython contains config files for that
profile, then they will be staged into the new directory. Otherwise,
default config files will be automatically generated.
""")
verbose_crash = Bool(False, config=True,
help="""Create a massive crash report when IPython encounters what may be an
internal error. The default is to append a short message to the
usual traceback""")
# The class to use as the crash handler.
crash_handler_class = Type(crashhandler.CrashHandler)
@catch_config_error
def __init__(self, **kwargs):
super(BaseIPythonApplication, self).__init__(**kwargs)
# ensure current working directory exists
try:
directory = os.getcwdu()
except:
# raise exception
self.log.error("Current working directory doesn't exist.")
raise
# ensure even default IPYTHONDIR exists
if not os.path.exists(self.ipython_dir):
self._ipython_dir_changed('ipython_dir', self.ipython_dir, self.ipython_dir)
#-------------------------------------------------------------------------
# Various stages of Application creation
#-------------------------------------------------------------------------
def init_crash_handler(self):
"""Create a crash handler, typically setting sys.excepthook to it."""
self.crash_handler = self.crash_handler_class(self)
sys.excepthook = self.excepthook
def unset_crashhandler():
sys.excepthook = sys.__excepthook__
atexit.register(unset_crashhandler)
def excepthook(self, etype, evalue, tb):
"""this is sys.excepthook after init_crashhandler
set self.verbose_crash=True to use our full crashhandler, instead of
a regular traceback with a short message (crash_handler_lite)
"""
if self.verbose_crash:
return self.crash_handler(etype, evalue, tb)
else:
return crashhandler.crash_handler_lite(etype, evalue, tb)
def _ipython_dir_changed(self, name, old, new):
str_old = py3compat.cast_bytes_py2(os.path.abspath(old),
sys.getfilesystemencoding()
)
if str_old in sys.path:
sys.path.remove(str_old)
str_path = py3compat.cast_bytes_py2(os.path.abspath(new),
sys.getfilesystemencoding()
)
sys.path.append(str_path)
if not os.path.isdir(new):
os.makedirs(new, mode=0o777)
readme = os.path.join(new, 'README')
if not os.path.exists(readme):
path = os.path.join(get_ipython_package_dir(), u'config', u'profile')
shutil.copy(os.path.join(path, 'README'), readme)
self.log.debug("IPYTHONDIR set to: %s" % new)
def load_config_file(self, suppress_errors=True):
"""Load the config file.
By default, errors in loading config are handled, and a warning
printed on screen. For testing, the suppress_errors option is set
to False, so errors will make tests fail.
"""
self.log.debug("Searching path %s for config files", self.config_file_paths)
base_config = 'ipython_config.py'
self.log.debug("Attempting to load config file: %s" %
base_config)
try:
Application.load_config_file(
self,
base_config,
path=self.config_file_paths
)
except ConfigFileNotFound:
# ignore errors loading parent
self.log.debug("Config file %s not found", base_config)
pass
for config_file_name in self.config_files:
if not config_file_name or config_file_name == base_config:
continue
self.log.debug("Attempting to load config file: %s" %
self.config_file_name)
try:
Application.load_config_file(
self,
config_file_name,
path=self.config_file_paths
)
except ConfigFileNotFound:
# Only warn if the default config file was NOT being used.
if config_file_name in self.config_file_specified:
msg = self.log.warn
else:
msg = self.log.debug
msg("Config file not found, skipping: %s", config_file_name)
except:
# For testing purposes.
if not suppress_errors:
raise
self.log.warn("Error loading config file: %s" %
self.config_file_name, exc_info=True)
def init_profile_dir(self):
"""initialize the profile dir"""
self._in_init_profile_dir = True
if self.profile_dir is not None:
# already ran
return
try:
# location explicitly specified:
location = self.config.ProfileDir.location
except AttributeError:
# location not specified, find by profile name
try:
p = ProfileDir.find_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
except ProfileDirError:
# not found, maybe create it (always create default profile)
if self.auto_create or self.profile == 'default':
try:
p = ProfileDir.create_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
except ProfileDirError:
self.log.fatal("Could not create profile: %r"%self.profile)
self.exit(1)
else:
self.log.info("Created profile dir: %r"%p.location)
else:
self.log.fatal("Profile %r not found."%self.profile)
self.exit(1)
else:
self.log.info("Using existing profile dir: %r"%p.location)
else:
# location is fully specified
try:
p = ProfileDir.find_profile_dir(location, self.config)
except ProfileDirError:
# not found, maybe create it
if self.auto_create:
try:
p = ProfileDir.create_profile_dir(location, self.config)
except ProfileDirError:
self.log.fatal("Could not create profile directory: %r"%location)
self.exit(1)
else:
self.log.info("Creating new profile dir: %r"%location)
else:
self.log.fatal("Profile directory %r not found."%location)
self.exit(1)
else:
self.log.info("Using existing profile dir: %r"%location)
# if profile_dir is specified explicitly, set profile name
dir_name = os.path.basename(p.location)
if dir_name.startswith('profile_'):
self.profile = dir_name[8:]
self.profile_dir = p
self.config_file_paths.append(p.location)
self._in_init_profile_dir = False
def init_config_files(self):
"""[optionally] copy default config files into profile dir."""
# copy config files
path = self.builtin_profile_dir
if self.copy_config_files:
src = self.profile
cfg = self.config_file_name
if path and os.path.exists(os.path.join(path, cfg)):
self.log.warn("Staging %r from %s into %r [overwrite=%s]"%(
cfg, src, self.profile_dir.location, self.overwrite)
)
self.profile_dir.copy_config_file(cfg, path=path, overwrite=self.overwrite)
else:
self.stage_default_config_file()
else:
# Still stage *bundled* config files, but not generated ones
# This is necessary for `ipython profile=sympy` to load the profile
# on the first go
files = glob.glob(os.path.join(path, '*.py'))
for fullpath in files:
cfg = os.path.basename(fullpath)
if self.profile_dir.copy_config_file(cfg, path=path, overwrite=False):
# file was copied
self.log.warn("Staging bundled %s from %s into %r"%(
cfg, self.profile, self.profile_dir.location)
)
def stage_default_config_file(self):
"""auto generate default config file, and stage it into the profile."""
s = self.generate_config_file()
fname = os.path.join(self.profile_dir.location, self.config_file_name)
if self.overwrite or not os.path.exists(fname):
self.log.warn("Generating default config file: %r"%(fname))
with open(fname, 'w') as f:
f.write(s)
@catch_config_error
def initialize(self, argv=None):
# don't hook up crash handler before parsing command-line
self.parse_command_line(argv)
self.init_crash_handler()
if self.subapp is not None:
# stop here if subapp is taking over
return
cl_config = self.config
self.init_profile_dir()
self.init_config_files()
self.load_config_file()
# enforce cl-opts override configfile opts:
self.update_config(cl_config)
|
noslenfa/tdjangorest
|
uw/lib/python2.7/site-packages/IPython/core/application.py
|
Python
|
apache-2.0
| 15,149
|
[
"Brian"
] |
b3b5cc9502b0922f13cf95d33b4b9ffb9b98b5f04335ab29f87fb2eca272d6f3
|
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Window')
from data_400ms import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:18]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_2 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Plush-Toy-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=1)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True',numbers_alpha=2)
#show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,130.3,0,1.2])
grid('True')
show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_kNN/Single_Contact_Classification/Time_Window/test10_cross_validate_objects_400ms.py
|
Python
|
mit
| 4,258
|
[
"Mayavi"
] |
c237c591be793cf87aab14899ae87c6f21f78d5e0cd80cc55fe7229ad7468a16
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import zip
from builtins import range
from builtins import object
import itertools as it
import warnings
import numpy as np
from scipy.special import gammaln
try:
from bottleneck import nansum, nanmedian
except ImportError:
from numpy import nansum
try:
from numpy import nanmedian
except ImportError:
from scipy.stats import nanmedian
from scipy.stats.mstats import mquantiles
from . import _motion as mc
import sima.motion.frame_align
import sima.misc
from sima.motion import MotionEstimationStrategy
np.seterr(invalid='ignore', divide='ignore')
def _parse_granularity(granularity):
if isinstance(granularity, int):
return (granularity, 1)
elif isinstance(granularity, str):
return {'frame': (0, 1),
'plane': (1, 1),
'row': (2, 1),
'column': (3, 1)}[granularity]
elif isinstance(granularity, tuple):
return granularity
else:
raise TypeError(
'granularity must be of type str, int, or tuple of int')
def _pixel_distribution(dataset, tolerance=0.001, min_frames=1000):
"""Estimate the distribution of pixel intensities for each channel.
Parameters
----------
tolerance : float
The maximum relative error in the estimates that must be
achieved for termination.
min_frames: int
The minimum number of frames that must be evaluated before
termination.
Returns
-------
mean_est : array
Mean intensities of each channel.
var_est :
Variances of the intensity of each channel.
"""
# TODO: separate distributions for each plane
sums = np.zeros(dataset.frame_shape[-1]).astype(float)
sum_squares = np.zeros_like(sums)
counts = np.zeros_like(sums)
t = 0
for frame in it.chain.from_iterable(dataset):
for plane in frame:
if t > 0:
mean_est = sums / counts
var_est = (sum_squares / counts) - (mean_est ** 2)
if t > min_frames and np.all(
np.sqrt(var_est / counts) / mean_est < tolerance):
break
sums += np.nan_to_num(nansum(nansum(plane, axis=0), axis=0))
sum_squares += np.nan_to_num(
nansum(nansum(plane ** 2, axis=0), axis=0))
counts += np.isfinite(plane).sum(axis=0).sum(axis=0)
t += 1
assert np.all(mean_est > 0)
assert np.all(var_est > 0)
return mean_est, var_est
def _whole_frame_shifting(dataset, shifts):
"""Line up the data by the frame-shift estimates
Parameters
----------
shifts : array
DxT or DxTxP array with the estimated shifts for each frame/plane.
Returns
-------
reference : array
Time average of each channel after frame-by-frame alignment.
Size: (num_channels, num_rows, num_columns).
variances : array
Variance of each channel after frame-by-frame alignment.
Size: (num_channels, num_rows, num_columns)
offset : array
The displacement to add to each shift to align the minimal shift
with the edge of the corrected image.
"""
min_shifts = np.nanmin([np.nanmin(s.reshape(-1, s.shape[-1]), 0)
for s in shifts], 0)
assert np.all(min_shifts == 0)
max_shifts = np.nanmax([np.nanmax(s.reshape(-1, s.shape[-1]), 0)
for s in shifts], 0)
out_shape = list(dataset.frame_shape)
if len(min_shifts) == 2:
out_shape[1] += max_shifts[0] - min_shifts[0]
out_shape[2] += max_shifts[1] - min_shifts[1]
elif len(min_shifts) == 3:
for i in range(3):
out_shape[i] += max_shifts[i] - min_shifts[i]
else:
raise Exception
reference = np.zeros(out_shape)
sum_squares = np.zeros_like(reference)
count = np.zeros_like(reference)
for frame, shift in zip(it.chain.from_iterable(dataset),
it.chain.from_iterable(shifts)):
if shift.ndim == 1: # single shift for the whole volume
if any(x is np.ma.masked for x in shift):
continue
l = shift - min_shifts
h = shift + frame.shape[:-1]
reference[l[0]:h[0], l[1]:h[1], l[2]:h[2]] += np.nan_to_num(frame)
sum_squares[l[0]:h[0], l[1]:h[1], l[2]:h[2]] += np.nan_to_num(
frame ** 2)
count[l[0]:h[0], l[1]:h[1], l[2]:h[2]] += np.isfinite(frame)
else: # plane-specific shifts
for plane, p_shifts, ref, ssq, cnt in zip(
frame, shift, reference, sum_squares, count):
if any(x is np.ma.masked for x in p_shifts):
continue
low = p_shifts - min_shifts # TOOD: NaN considerations
high = low + plane.shape[:-1]
ref[low[0]:high[0], low[1]:high[1]] += np.nan_to_num(plane)
ssq[low[0]:high[0], low[1]:high[1]] += np.nan_to_num(
plane ** 2)
cnt[low[0]:high[0], low[1]:high[1]] += np.isfinite(plane)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
reference /= count
assert np.all(np.isnan(reference[np.equal(count, 0)]))
variances = (sum_squares / count) - reference ** 2
assert not np.any(variances < 0)
return reference, variances
def _discrete_transition_prob(r, log_transition_probs, n):
"""Calculate the transition probability between two discrete position
states.
Parameters
----------
r : array
The location being transitioned to.
transition_probs : function
The continuous transition probability function.
n : int
The number of partitions along each axis.
Returns
-------
float
The discrete transition probability between the two states.
"""
def _log_add(a, b):
"""Add two log probabilities to get a new log probability.
Returns log(exp(a) + exp(b))
"""
m = min(a, b)
M = max(a, b)
if M == -np.inf:
return -np.inf
return M + np.log(1. + np.exp(m - M))
logp = - np.inf
for x in np.linspace(-1, 1, n + 2)[1:-1]:
for y in np.linspace(-1, 1, n + 2)[1:-1]:
if len(r) == 2:
logp = _log_add(log_transition_probs(r + np.array([y, x])) +
np.log(1 - abs(y)) + np.log(1 - abs(x)), logp)
else:
for z in np.linspace(-1, 1, n + 2)[1:-1]:
new_logp = _log_add(
log_transition_probs(r + np.array([z, y, x])) +
np.log(1 - abs(z)) + np.log(1 - abs(y)) +
np.log(1 - abs(x)), logp)
if not np.isnan(new_logp):
logp = new_logp
else:
raise Exception
return logp - len(r) * np.log(n)
def _threshold_gradient(im):
"""Indicate pixel locations with gradient below the bottom 10th percentile
Parameters
----------
im : array
The mean intensity images for each channel.
Size: (num_channels, num_rows, num_columns).
Returns
-------
array
Binary values indicating whether the magnitude of the gradient is below
the 10th percentile. Same size as im.
"""
if im.shape[0] > 1:
# Calculate directional relative derivatives
_, g_x, g_y = np.gradient(np.log(im))
else:
# Calculate directional relative derivatives
g_x, g_y = np.gradient(np.log(im[0]))
g_x = g_x.reshape([1, g_x.shape[0], g_x.shape[1]])
g_y = g_y.reshape([1, g_y.shape[0], g_y.shape[1]])
gradient_magnitudes = np.sqrt((g_x ** 2) + (g_y ** 2))
below_threshold = []
for chan in gradient_magnitudes:
threshold = mquantiles(chan[np.isfinite(chan)].flatten(), [0.1])[0]
below_threshold.append(chan < threshold)
return np.array(below_threshold)
def _initial_distribution(decay, noise_cov, mean_shift):
"""Get the initial distribution of the displacements."""
initial_cov = np.linalg.solve(np.diag([1, 1]) - decay * decay.T,
noise_cov.newbyteorder('>').byteswap())
for _ in range(1000):
initial_cov = decay * initial_cov * decay.T + noise_cov
# don't let C be singular
initial_cov[0, 0] = max(initial_cov[0, 0], 0.1)
initial_cov[1, 1] = max(initial_cov[1, 1], 0.1)
return lambda x: np.exp(
-0.5 * np.dot(
x - mean_shift, np.linalg.solve(initial_cov, x - mean_shift))
) / np.sqrt(2.0 * np.pi * np.linalg.det(initial_cov))
def _lookup_tables(position_bounds, log_markov_matrix):
"""Generate lookup tables to speed up the algorithm performance.
Parameters
----------
position_bounds : array of int
The minimum and maximum (+1) allowable coordinates.
step_bounds : array of int
The minimum and maximum (+1) allowable steps.
log_markov_matrix :
The log transition probabilities.
Returns
-------
position_tbl : array
Lookup table used to index each possible displacement.
transition_tbl : array
Lookup table used to find the indices of displacements to which
transitions can occur from the position.
log_markov_matrix_tbl : array
Lookup table used to find the transition probability of the transitions
from transition_tbl.
"""
position_tbl = np.array(
list(it.product(*[list(range(m, M))
for m, M in zip(*position_bounds)])),
dtype=int)
position_dict = {tuple(position): i
for i, position in enumerate(position_tbl)}
# create transition lookup and create lookup for transition probability
transition_tbl = []
log_markov_matrix_tbl = []
for step in it.product(
*[list(range(-s + 1, s)) for s in log_markov_matrix.shape]):
if len(step) == 2:
step = (0,) + step
tmp_tbl = []
for pos in position_tbl:
new_position = tuple(pos + np.array(step))
try:
tmp_tbl.append(position_dict[new_position])
except KeyError:
tmp_tbl.append(-1)
transition_tbl.append(tmp_tbl)
log_markov_matrix_tbl.append(
log_markov_matrix[tuple(abs(s) for s in step)])
transition_tbl = np.array(transition_tbl, dtype=int)
log_markov_matrix_tbl = np.fromiter(log_markov_matrix_tbl, dtype=float)
return position_tbl, transition_tbl, log_markov_matrix_tbl
def _backtrace(start_idx, backpointer, states, position_tbl):
"""Perform the backtracing stop of the Viterbi algorithm.
Parameters
----------
start_idx : int
...
Returns:
--------
trajectory : array
The maximum aposteriori trajectory of displacements.
Shape: (2, len(states))
"""
T = len(states)
dim = len(position_tbl[0])
i = start_idx
trajectory = np.zeros([T, dim], dtype=int)
trajectory[-1] = position_tbl[states[-1][i]]
for t in range(T - 2, -1, -1):
# NOTE: backpointer index 0 corresponds to second timestep
i = backpointer[t][i]
trajectory[t] = position_tbl[states[t][i]]
return trajectory
class _HiddenMarkov(MotionEstimationStrategy):
def __init__(self, granularity=2, num_states_retained=50,
max_displacement=None, n_processes=1, restarts=None,
verbose=True):
if isinstance(granularity, int) or isinstance(granularity, str):
granularity = (granularity, 1)
elif not isinstance(granularity, tuple):
raise TypeError(
'granularity must be of type str, int, or tuple')
if isinstance(granularity[0], str):
granularity = ({'frame': 0,
'plane': 1,
'row': 2,
'column': 3}[granularity[0]], granularity[1])
self._params = dict(locals())
del self._params['self']
def _neighbor_viterbi(
self, dataset, references, gains, movement_model,
min_displacements, max_displacements, pixel_means, pixel_variances,
max_step=1):
"""Estimate the MAP trajectory with the Viterbi Algorithm."""
assert references.ndim == 4
granularity = self._params['granularity']
scaled_refs = references / gains
displacement_tbl, transition_tbl, log_markov_tbl, = _lookup_tables(
[min_displacements, max_displacements + 1],
movement_model.log_transition_matrix(
max_distance=max_step,
dt=granularity[1] / np.prod(references.shape[:granularity[0]]))
)
assert displacement_tbl.dtype == int
tmp_states, log_p = movement_model.initial_probs(
displacement_tbl, min_displacements, max_displacements)
displacements = []
for i, sequence in enumerate(dataset):
if self._params['verbose']:
print('Estimating displacements for cycle ', i)
imdata = NormalizedIterator(sequence, gains, pixel_means,
pixel_variances, granularity)
positions = PositionIterator(sequence.shape[:-1], granularity)
restarts = self._params['restarts']
if restarts is not None:
restart_period = np.prod(
sequence.shape[(restarts+1):(granularity[0]+1)]
) // granularity[1]
else:
restart_period = None
disp = _beam_search(
imdata, positions,
it.repeat((transition_tbl, log_markov_tbl)), scaled_refs,
displacement_tbl, (tmp_states, log_p),
self._params['num_states_retained'], restart_period)
new_shape = sequence.shape[:granularity[0]] + \
(sequence.shape[granularity[0]] // granularity[1],) + \
(disp.shape[-1],)
displacements.append(np.repeat(disp.reshape(new_shape),
repeats=granularity[1],
axis=granularity[0]))
return displacements
def _estimate(self, dataset):
"""Estimate and save the displacements for the time series.
Parameters
----------
num_states_retained : int
Number of states to retain at each time step of the HMM.
max_displacement : array of int
The maximum allowed displacement magnitudes in [y,x].
Returns
-------
dict
The estimated displacements and partial results of motion
correction.
"""
params = self._params
if params['verbose']:
print('Estimating model parameters.')
shifts = self._estimate_shifts(dataset)
references, variances = _whole_frame_shifting(dataset, shifts)
if params['max_displacement'] is None:
max_displacement = np.array(dataset.frame_shape[:3]) // 2
else:
max_displacement = np.array(params['max_displacement'])
gains = nanmedian(
(variances / references).reshape(-1, references.shape[-1]))
if not (np.all(np.isfinite(gains)) and np.all(gains > 0)):
raise Exception('Failed to estimate positive gains')
pixel_means, pixel_variances = _pixel_distribution(dataset)
movement_model = MovementModel.estimate(shifts)
if shifts[0].shape[-1] == 2:
shifts = [np.concatenate([np.zeros(s.shape[:-1] + (1,), dtype=int),
s], axis=-1) for s in shifts]
min_shifts = np.nanmin([np.nanmin(s.reshape(-1, s.shape[-1]), 0)
for s in shifts], 0)
max_shifts = np.nanmax([np.nanmax(s.reshape(-1, s.shape[-1]), 0)
for s in shifts], 0)
# add a bit of extra room to move around
if max_displacement.size == 2:
max_displacement = np.hstack(([0], max_displacement))
extra_buffer = ((max_displacement - max_shifts + min_shifts) // 2
).astype(int)
min_displacements = min_shifts - extra_buffer
max_displacements = max_shifts + extra_buffer
displacements = self._neighbor_viterbi(
dataset, references, gains, movement_model, min_displacements,
max_displacements, pixel_means, pixel_variances)
return self._post_process(displacements)
def _post_process(self, displacements):
return displacements
class HiddenMarkov2D(_HiddenMarkov):
"""
Hidden Markov model (HMM) in two dimensions.
Parameters
----------
granularity : int, str, or tuple, optional
The granularity of the calculated displacements. A separate
displacement can be calculated for each frame (granularity=0
or granularity='frame'), each plane (1 or 'plane'), each
row (2 or 'row'), or pixel (3 or 'column'). As well, a separate
displacement can be calculated for every n consecutive elements
(e.g. granularity=('row', 8) for every 8 rows).
Defaults to one displacement per row.
num_states_retained : int, optional
Number of states to retain at each time step of the HMM.
Defaults to 50.
max_displacement : array of int, optional
The maximum allowed displacement magnitudes in [y,x]. By
default, arbitrarily large displacements are allowed.
n_processes : int, optional
Number of pool processes to spawn to parallelize frame alignment.
Defaults to 1.
restarts : int, optional
How often to reinitialize the hidden Markov model. This can be useful
if there are long breaks between frames or planes. Parameter values of
0 or 1 reinitialize the hidden states every frame or plane,
respectively. By default, the hidden distribution of positions is
never reinitialized during the sequence.
verbose : bool, optional
Whether to print information about progress.
References
----------
* Dombeck et al. 2007. Neuron. 56(1): 43-57.
* Kaifosh et al. 2013. Nature Neuroscience. 16(9): 1182-4.
"""
def _estimate_shifts(self, dataset):
return sima.motion.frame_align.PlaneTranslation2D(
self._params['max_displacement'],
n_processes=self._params['n_processes']).estimate(dataset)
def _post_process(self, displacements):
return [d[..., 1:] for d in displacements]
class MovementModel(object):
"""
Attributes
----------
mean_shift : array of int
The mean of the whole-frame displacement estimates
"""
def __init__(self, cov_matrix, U, s, mean_shift):
if not np.all(np.isfinite(cov_matrix)):
raise ValueError
assert np.linalg.det(cov_matrix) > 0
self._cov_matrix = cov_matrix
self._U = U
self._s = s
self.mean_shift = mean_shift
@classmethod
def estimate(cls, shifts, times=None):
"""Estimate the movement model from displacements.
Parameters
----------
shifts : list of ndarray
The shape of the ndarray may vary depending on whether
displacements are estimated per volume, per plane, per row, etc.
"""
# TODO: add mean value at boundaries to eliminate boundary effects
# between cycles
shifts = np.concatenate(shifts).reshape(-1, shifts[0].shape[-1])
if not shifts.shape[1] in (2, 3):
raise ValueError
mean_shift = np.nanmean(shifts, axis=0)
assert len(mean_shift) == shifts.shape[1]
centered_shifts = np.nan_to_num(shifts - mean_shift)
past = centered_shifts[:-1]
future = centered_shifts[1:]
past_future = np.dot(past.T, future)
past_past = np.dot(past.T, past)
idx = 0
D = shifts.shape[1]
n = D * (D + 1) // 2
y = np.zeros(n)
M = np.zeros((n, n))
for i in range(D): # loop over the dimensions of motion
for j in range(i + 1): # loop over all pairs of dimension
y[idx] = past_future[i, j] + past_future[j, i]
idx_2 = 0
for k in range(D):
for l in range(k + 1):
if k == i:
M[idx, idx_2] += past_past[j, l]
elif l == i:
M[idx, idx_2] += past_past[j, k]
if k == j:
M[idx, idx_2] += past_past[i, l]
elif l == j:
M[idx, idx_2] += past_past[i, k]
idx_2 += 1
idx += 1
coefficients = np.dot(np.linalg.pinv(M), y)
if D == 2:
A = np.array([[coefficients[0], coefficients[1]],
[coefficients[1], coefficients[2]]])
if D == 3:
A = np.array([[coefficients[0], coefficients[1], coefficients[3]],
[coefficients[1], coefficients[2], coefficients[4]],
[coefficients[3], coefficients[4], coefficients[5]]])
cov_matrix = np.cov(future.T - np.dot(A, past.T))
# make cov_matrix non-singular
Uc, sc, _ = np.linalg.svd(cov_matrix) # NOTE: U == V
sc = np.maximum(sc, 1. / len(shifts))
cov_matrix = np.dot(Uc, np.dot(np.diag(sc), Uc))
assert np.linalg.det(cov_matrix) > 0
U, s, _ = np.linalg.svd(A) # NOTE: U == V for positive definite A
s = np.minimum(s, 1.) # Don't allow negative decay, i.e. growth
return cls(cov_matrix, U, s, mean_shift)
def decay_matrix(self, dt=1.):
"""
Parameters
---------
dt : float
Returns
-------
mov_decay : array
The per-line decay-term in the AR(1) motion model
"""
decay_matrix = np.dot(self._U, np.dot(self._s ** dt, self._U))
if not np.all(np.isfinite(decay_matrix)):
raise Exception
return decay_matrix
def cov_matrix(self, dt=1.):
"""
Parameters
---------
dt : float
Returns
-------
mov_cov : array
The per-line covariance-term in the AR(1) motion model
"""
return self._cov_matrix * dt
def log_transition_matrix(self, max_distance=1, dt=1.):
"""
Gaussian Transition Probabilities
Parameters
----------
max_distance : int
dt : float
"""
cov_matrix = self.cov_matrix(dt)
assert np.linalg.det(cov_matrix) > 0
def log_transition_probs(x):
return -0.5 * (np.log(2 * np.pi * np.linalg.det(cov_matrix)) +
np.dot(x, np.linalg.solve(cov_matrix, x)))
log_transition_matrix = -np.inf * np.ones(
[max_distance + 1] * len(cov_matrix))
for disp in it.product(
*([list(range(max_distance + 1))] * len(cov_matrix))):
log_transition_matrix[disp] = _discrete_transition_prob(
disp, log_transition_probs, 20)
assert np.all(np.isfinite(log_transition_matrix))
if log_transition_matrix.ndim == 2:
log_transition_matrix = np.expand_dims(log_transition_matrix, 0)
return log_transition_matrix
def _initial_distribution(self):
"""Get the initial distribution of the displacements."""
decay = self.decay_matrix()
noise_cov = self.cov_matrix()
initial_cov = np.linalg.solve(
np.diag(np.ones(len(decay))) - decay * decay.T,
noise_cov.newbyteorder('>').byteswap())
for _ in range(1000):
initial_cov = decay * initial_cov * decay.T + noise_cov
# don't let C be singular
for i in range(len(initial_cov)):
initial_cov[i, i] = max(initial_cov[i, i], 0.1)
def idist(x):
if len(x) == 3 and len(initial_cov) == 2:
x = x[1:]
return np.exp(
-0.5 * np.dot(x - self.mean_shift,
np.linalg.solve(initial_cov, x - self.mean_shift)
)
) / np.sqrt(2.0 * np.pi * np.linalg.det(initial_cov))
assert np.isfinite(idist(self.mean_shift))
return idist
def initial_probs(self, displacement_tbl, min_displacements,
max_displacements):
"""Give the initial probabilities for a displacement table"""
initial_dist = self._initial_distribution()
states = []
log_p = []
for index, position in enumerate(displacement_tbl): # TODO parallelize
# check that the displacement is allowable
if np.all(min_displacements <= position) and np.all(
position <= max_displacements):
states.append(index)
# probability of initial displacement
log_p.append(np.log(initial_dist(position)))
if not np.any(np.isfinite(log_p)):
raise Exception
return np.array(states, dtype='int'), np.array(log_p)
class PositionIterator(object):
"""Position iterator
Parameters
----------
shape : tuple of int
(times, planes, rows, columns)
granularity
offset : tuple of int
(z, y, x) or (y, x)
Examples
--------
>>> from sima.motion.hmm import PositionIterator
>>> pi = PositionIterator((100, 5, 128, 256), 'frame')
>>> positions = next(iter(pi))
>>> positions.shape == (163840, 3)
True
>>> pi = PositionIterator((100, 5, 128, 256), 'plane')
>>> positions = next(iter(pi))
>>> positions.shape == (32768, 3)
True
Group two rows at a time
>>> pi = PositionIterator((100, 5, 128, 256), (2, 2), [10, 12])
>>> positions = next(iter(pi))
>>> positions.shape == (512, 3)
True
>>> pi = PositionIterator((100, 5, 128, 256), 'column', [3, 10, 12])
>>> positions = next(iter(pi))
"""
def __init__(self, shape, granularity, offset=None):
self.granularity = _parse_granularity(granularity)
self.shape = shape
if self.shape[self.granularity[0]] % self.granularity[1] != 0:
raise ValueError('granularity[1] must divide the frame shape '
'along dimension granularity[0]')
if offset is None:
self.offset = [0, 0, 0, 0]
else:
self.offset = ([0, 0, 0, 0] + list(offset))[-4:]
def __iter__(self):
shape = self.shape
granularity = self.granularity
offset = self.offset
def out(group):
"""Calculate a single iteration output"""
return np.array(list(it.chain.from_iterable(
(base + s for s in it.product(
*[range(o, o + x) for x, o in
zip(shape[(granularity[0] + 1):],
offset[(granularity[0] + 1):])]))
for base in group)))
if granularity[0] > 0 or granularity[1] == 1:
def cycle():
"""Iterator that produces one period/period of the output."""
base_iter = it.product(*[list(range(o, x + o)) for x, o in
zip(shape[1:(granularity[0] + 1)],
offset[1:(granularity[0] + 1)])])
for group in zip(*[base_iter] * granularity[1]):
yield out(group)
for positions in it.cycle(cycle()):
yield positions
else:
base_iter = it.product(*[list(range(o, x + o)) for x, o in
zip(shape[:(granularity[0] + 1)],
offset[:(granularity[0] + 1)])])
for group in zip(*[base_iter] * granularity[1]):
yield out([b[1:] for b in group])
def _beam_search(imdata, positions, transitions, references, state_table,
initial_dist, num_retained=50, restart_period=None):
"""Perform a beam search (modified Viterbi algorithm).
Parameters
----------
imdata : iterator of ndarray
The imaging data for each time step.
positions : iterator
The acquisition positions (e.g. position of scan-head) corresponding
to the imdata.
transitions : iterator of tuple ()
references : ndarray
state_table : ndarray
initial_dist : tuple
num_retained : int
"""
if state_table.shape[1] != 3:
raise ValueError
log_references = np.log(references)
backpointer = []
states = []
states.append(initial_dist[0])
log_p_old = initial_dist[1]
estimates = []
assert np.any(np.isfinite(log_p_old))
t = 0
for data, pos, trans in zip(imdata, positions, transitions):
transition_table, log_transition_probs = trans
tmp_states, log_p, tmp_backpointer = mc.transitions(
states[-1], log_transition_probs, log_p_old, state_table,
transition_table)
obs, log_obs_fac, log_obs_p = data
assert len(obs) == len(pos)
mc.log_observation_probabilities_generalized(
log_p, tmp_states, obs, log_obs_p, log_obs_fac,
references, log_references, pos, state_table)
if np.any(np.isfinite(log_p)):
log_p[np.isnan(log_p)] = -np.Inf # Remove NaNs to sort.
ix = np.argsort(-log_p)[0:num_retained] # Keep likely states.
states.append(tmp_states[ix])
log_p_old = log_p[ix] - log_p[ix[0]]
backpointer.append(tmp_backpointer[ix])
else:
# If none of the observation probabilities are finite,
# then use states from the previous timestep.
warnings.warn('No finite observation probabilities.')
states.append(states[-1])
backpointer.append(np.arange(num_retained))
# reinitialize if necessary
t += 1
if restart_period is not None and (t % restart_period) == 0:
end_state_idx = np.argmax(log_p_old)
estimates.append(_backtrace(end_state_idx, backpointer[1:],
states[1:], state_table))
states = [initial_dist[0]]
log_p_old = initial_dist[1]
if len(states) > 1:
end_state_idx = np.argmax(log_p_old)
estimates.append(_backtrace(end_state_idx, backpointer[1:],
states[1:], state_table))
return np.concatenate(estimates, axis=0)
class HiddenMarkov3D(_HiddenMarkov):
"""
Hidden Markov model (HMM) with displacements in three dimensions.
Parameters
----------
granularity : int, str, or tuple, optional
The granularity of the calculated displacements. A separate
displacement can be calculated for each frame (granularity=0
or granularity='frame'), each plane (1 or 'plane'), each
row (2 or 'row'), or pixel (3 or 'column'). As well, a separate
displacement can be calculated for every n consecutive elements
(e.g.\ granularity=('row', 8) for every 8 rows).
Defaults to one displacement per row.
num_states_retained : int, optional
Number of states to retain at each time step of the HMM.
Defaults to 50.
max_displacement : array of int, optional
The maximum allowed displacement magnitudes in [z, y,x]. By
default, arbitrarily large displacements are allowed.
n_processes : int, optional
Number of pool processes to spawn to parallelize frame alignment.
Defaults to 1.
restarts : int, optional
How often to reinitialize the hidden Markov model. This can be useful
if there are long breaks between frames or planes. Parameter values of
0 or 1 reinitialize the hidden states every frame or plane,
respectively. default, the hidden distribution of positions is never
reinitialized during the sequence.
verbose : bool, optional
Whether to print information about progress.
References
----------
* Dombeck et al. 2007. Neuron. 56(1): 43-57.
* Kaifosh et al. 2013. Nature Neuroscience. 16(9): 1182-4.
"""
def _estimate_shifts(self, dataset):
shifts = sima.motion.frame_align.VolumeTranslation(
self._params['max_displacement'], criterion=2.5).estimate(dataset)
assert all(np.all(s) >= 0 for s in shifts)
return shifts
class NormalizedIterator(object):
"""Generator of preprocessed frames for efficient computation.
Parameters
----------
sequence : sima.Sequence
gains : array
The photon-to-intensity gains for each channel.
pixel_means : array
The mean pixel intensities for each channel.
pixel_variances : array
The pixel intensity variance for each channel.
granularity : tuple of int
Yields
------
im : list of array
The estimated photon counts for each channel.
log_im_fac : list of array
The logarithm of the factorial of the photon counts in im.
log_im_p: list of array
The log likelihood of observing each pixel intensity (without
spatial information).
Examples
--------
Plane-wise iteration
>>> from sima.motion.hmm import NormalizedIterator
>>> it = NormalizedIterator(
... np.ones((100, 10, 6, 5, 2)), np.ones(2), np.ones(2),
... np.ones(2), 'plane')
>>> next(iter(it))[0].shape == (30, 2)
True
Row-wise iteration:
>>> it = NormalizedIterator(
... np.ones((100, 10, 6, 5, 2)), np.ones(2), np.ones(2),
... np.ones(2), 'row')
>>> next(iter(it))[0].shape == (5, 2)
True
"""
def __init__(self, sequence, gains, pixel_means, pixel_variances,
granularity):
self.sequence = sequence
self.gains = gains
self.pixel_means = pixel_means
self.pixel_variances = pixel_variances
self.granularity = _parse_granularity(granularity)
def __iter__(self):
means = self.pixel_means / self.gains
variances = self.pixel_variances / self.gains ** 2
for frame in self.sequence:
frame = frame.reshape(
int(np.prod(frame.shape[:self.granularity[0]])),
-1, frame.shape[-1])
for chunk in zip(*[iter(frame)] * self.granularity[1]):
im = np.concatenate(chunk, axis=0) / self.gains
# replace NaN pixels with the mean value for the channel
for ch_idx, ch_mean in enumerate(means):
im_nans = np.isnan(im[..., ch_idx])
im[..., ch_idx][im_nans] = ch_mean
assert(np.all(np.isfinite(im)))
log_im_fac = gammaln(im + 1) # take the log of the factorial
# probability of observing the pixels (ignoring reference)
log_im_p = -(im - means) ** 2 / (2 * variances) \
- 0.5 * np.log(2. * np.pi * variances)
assert(np.all(np.isfinite(log_im_fac)))
assert(np.all(np.isfinite(log_im_p)))
yield im, log_im_fac, log_im_p
|
vjlbym/sima
|
sima/motion/hmm.py
|
Python
|
gpl-2.0
| 35,739
|
[
"Gaussian",
"NEURON"
] |
305fc0b149d64c327cbc144697f5434161295bf2cda0fc3fce96a150b2509286
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Renaming column for 'DenominatorPart.data' to match new field type.
db.rename_column(u'profiles_denominatorpart', 'data', 'data_id')
# Changing field 'DenominatorPart.data'
db.alter_column(u'profiles_denominatorpart', 'data_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.DataFile'], null=True))
# Adding index on 'DenominatorPart', fields ['data']
db.create_index(u'profiles_denominatorpart', ['data_id'])
# Renaming column for 'IndicatorPart.data' to match new field type.
db.rename_column(u'profiles_indicatorpart', 'data', 'data_id')
# Changing field 'IndicatorPart.data'
db.alter_column(u'profiles_indicatorpart', 'data_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.DataFile'], null=True))
# Adding index on 'IndicatorPart', fields ['data']
db.create_index(u'profiles_indicatorpart', ['data_id'])
def backwards(self, orm):
# Removing index on 'IndicatorPart', fields ['data']
db.delete_index(u'profiles_indicatorpart', ['data_id'])
# Removing index on 'DenominatorPart', fields ['data']
db.delete_index(u'profiles_denominatorpart', ['data_id'])
# Renaming column for 'DenominatorPart.data' to match new field type.
db.rename_column(u'profiles_denominatorpart', 'data_id', 'data')
# Changing field 'DenominatorPart.data'
db.alter_column(u'profiles_denominatorpart', 'data', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Renaming column for 'IndicatorPart.data' to match new field type.
db.rename_column(u'profiles_indicatorpart', 'data_id', 'data')
# Changing field 'IndicatorPart.data'
db.alter_column(u'profiles_indicatorpart', 'data', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 5, 8, 16, 2, 40, 999229)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 5, 8, 16, 2, 40, 998388)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maps.shapefile': {
'Meta': {'object_name': 'ShapeFile'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geo_meta_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'geom_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'shape_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zoom_threshold': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'profiles.customvalue': {
'Meta': {'object_name': 'CustomValue'},
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_value': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'supress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_operator': ('django.db.models.fields.CharField', [], {'max_length': "'255'"})
},
u'profiles.datadomain': {
'Meta': {'ordering': "['weight']", 'object_name': 'DataDomain'},
'group': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Group']", 'through': u"orm['profiles.DataDomainIndex']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'subdomain_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subdomains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.datadomainindex': {
'Meta': {'ordering': "['order']", 'object_name': 'DataDomainIndex'},
'dataDomain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'profiles.datafile': {
'Meta': {'object_name': 'DataFile'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'profiles.datapoint': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']", 'null': 'True'})
},
u'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'profiles.denominator': {
'Meta': {'object_name': 'Denominator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'table_label': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'profiles.denominatorpart': {
'Meta': {'object_name': 'DenominatorPart'},
'data': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataFile']", 'null': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.IndicatorPart']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'profiles.flatvalue': {
'Meta': {'object_name': 'FlatValue'},
'display_title': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'f_moe': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_number': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_numerator': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_numerator_moe': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_percent': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'geography': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'geography_geo_key': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': "'255'", 'db_index': 'True'}),
'geography_name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'geography_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'geometry_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'indicator_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'numerator': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'numerator_moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'time_key': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'value_type': ('django.db.models.fields.CharField', [], {'max_length': "'100'"})
},
u'profiles.geolevel': {
'Meta': {'ordering': "['summary_level']", 'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataSource']", 'symmetrical': 'False', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'shapefile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.ShapeFile']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}),
'summary_level': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_id_segments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
u'profiles.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'domain': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'domain_index'", 'symmetrical': 'False', 'through': u"orm['profiles.DataDomainIndex']", 'to': u"orm['profiles.DataDomain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.GroupIndex']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'profiles.groupindex': {
'Meta': {'ordering': "['name']", 'object_name': 'GroupIndex'},
'groups': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': u"orm['profiles.Indicator']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'profiles.indicator': {
'Meta': {'ordering': "['name']", 'object_name': 'Indicator'},
'data_as_of': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Group']", 'through': u"orm['profiles.GroupIndex']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'display_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_tasks': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'ind_tasks'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['profiles.IndicatorTask']"}),
'last_generated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_modified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'next_update_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'default': "'U.S. Census Bureau'", 'max_length': '300', 'blank': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
u'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataFile']", 'null': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']"})
},
u'profiles.indicatortask': {
'Meta': {'object_name': 'IndicatorTask'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']", 'null': 'True', 'blank': 'True'}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'profiles.legendoption': {
'Meta': {'object_name': 'LegendOption'},
'bin_options': ('django.db.models.fields.TextField', [], {'default': "''"}),
'bin_type': ('django.db.models.fields.CharField', [], {'default': "'jenks'", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.precalculatedvalue': {
'Meta': {'object_name': 'PrecalculatedValue'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'geo_record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'table': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'profiles.taskstatus': {
'Meta': {'object_name': 'TaskStatus'},
'error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
't_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'profiles.time': {
'Meta': {'ordering': "['name']", 'object_name': 'Time'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
},
u'profiles.value': {
'Meta': {'object_name': 'Value'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataPoint']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['profiles']
|
ProvidencePlan/Profiles
|
communityprofiles/profiles/oldmigrations/0005_auto__chg_field_denominatorpart_data__chg_field_indicatorpart_data.py
|
Python
|
mit
| 28,575
|
[
"MOE"
] |
3df81f605bd62bedb4ce63e398d174df64f830072ea5c7dc1213e0eef1e537d1
|
"""
There are ways to measure the quality of a separated source without
requiring ground truth. These functions operate on the output of
clustering-based separation algorithms and work by analyzing
the clusterability of the feature space used to generate the
separated sources.
"""
from sklearn.metrics import silhouette_samples
import numpy as np
from .cluster import KMeans, GaussianMixture
from scipy.special import logsumexp
from .train import loss
import torch
def softmax(x, axis=None):
return np.exp(x - logsumexp(x, axis=axis, keepdims=True))
def jensen_shannon_divergence(gmm_p, gmm_q, n_samples=10**5):
"""
Compute Jensen-Shannon (JS) divergence between two Gaussian Mixture Models via
sampling. JS divergence is also known as symmetric Kullback-Leibler divergence.
JS divergence has no closed form in general for GMMs, thus we use sampling to
compute it.
Args:
gmm_p (GaussianMixture): A GaussianMixture class fit to some data.
gmm_q (GaussianMixture): Another GaussianMixture class fit to some data.
n_samples (int): Number of samples to use to estimate JS divergence.
Returns:
JS divergence between gmm_p and gmm_q
"""
X = gmm_p.sample(n_samples)[0]
log_p_X = gmm_p.score_samples(X)
log_q_X = gmm_q.score_samples(X)
log_mix_X = np.logaddexp(log_p_X, log_q_X)
Y = gmm_q.sample(n_samples)[0]
log_p_Y = gmm_p.score_samples(Y)
log_q_Y = gmm_q.score_samples(Y)
log_mix_Y = np.logaddexp(log_p_Y, log_q_Y)
return (log_p_X.mean() - (log_mix_X.mean() - np.log(2))
+ log_q_Y.mean() - (log_mix_Y.mean() - np.log(2))) / 2
def _get_loud_bins_mask(threshold, audio_signal=None, representation=None):
if representation is None:
representation = np.abs(audio_signal.stft())
threshold = np.percentile(representation, threshold)
mask = representation > threshold
return mask, representation
def jensen_shannon_confidence(audio_signal, features, num_sources, threshold=95,
n_samples=10**5, **kwargs):
"""
Calculates the clusterability of a space by comparing a K-cluster GMM
with a 1-cluster GMM on the same features. This function fits two
GMMs to all of the points that are above the specified threshold (defaults
to 95: 95th percentile of all the data). This saves on computation time and
also allows one to have the confidence measure only focus on the louder
more perceptually important points.
References:
Seetharaman, Prem, Gordon Wichern, Jonathan Le Roux, and Bryan Pardo.
“Bootstrapping Single-Channel Source Separation via Unsupervised Spatial
Clustering on Stereo Mixtures”. 44th International Conference on Acoustics,
Speech, and Signal Processing, Brighton, UK, May, 2019
Seetharaman, Prem. Bootstrapping the Learning Process for Computer Audition.
Diss. Northwestern University, 2019.
Args:
audio_signal (AudioSignal): AudioSignal object which will be used to compute
the mask over which to compute the confidence measure. This can be None, if
and only if ``representation`` is passed as a keyword argument to this
function.
features (np.ndarray): Numpy array containing the features to be clustered.
Should have the same dimensions as the representation.
n_sources (int): Number of sources to cluster the features into.
threshold (int, optional): Threshold by loudness. Points below the threshold are
excluded from being used in the confidence measure. Defaults to 95.
kwargs: Keyword arguments to `_get_loud_bins_mask`. Namely, representation can
go here as a keyword argument.
Returns:
float: Confidence given by Jensen-Shannon divergence.
"""
mask, _ = _get_loud_bins_mask(threshold, audio_signal, **kwargs)
embedding_size = features.shape[-1]
features = features[mask].reshape(-1, embedding_size)
one_component_gmm = GaussianMixture(1)
n_component_gmm = GaussianMixture(num_sources)
one_component_gmm.fit(features)
n_component_gmm.fit(features)
confidence = jensen_shannon_divergence(
one_component_gmm, n_component_gmm, n_samples=n_samples)
return confidence
def posterior_confidence(audio_signal, features, num_sources, threshold=95,
**kwargs):
"""
Calculates the clusterability of an embedding space by looking at the
strength of the assignments of each point to a specific cluster. The
more points that are "in between" clusters (e.g. no strong assignmment),
the lower the clusterability.
References:
Seetharaman, Prem, Gordon Wichern, Jonathan Le Roux, and Bryan Pardo.
“Bootstrapping Single-Channel Source Separation via Unsupervised Spatial
Clustering on Stereo Mixtures”. 44th International Conference on Acoustics,
Speech, and Signal Processing, Brighton, UK, May, 2019
Seetharaman, Prem. Bootstrapping the Learning Process for Computer Audition.
Diss. Northwestern University, 2019.
Args:
audio_signal (AudioSignal): AudioSignal object which will be used to compute
the mask over which to compute the confidence measure. This can be None, if
and only if ``representation`` is passed as a keyword argument to this
function.
features (np.ndarray): Numpy array containing the features to be clustered.
Should have the same dimensions as the representation.
n_sources (int): Number of sources to cluster the features into.
threshold (int, optional): Threshold by loudness. Points below the threshold are
excluded from being used in the confidence measure. Defaults to 95.
kwargs: Keyword arguments to `_get_loud_bins_mask`. Namely, representation can
go here as a keyword argument.
Returns:
float: Confidence given by posteriors.
"""
mask, _ = _get_loud_bins_mask(threshold, audio_signal, **kwargs)
embedding_size = features.shape[-1]
features = features[mask].reshape(-1, embedding_size)
kmeans = KMeans(num_sources)
distances = kmeans.fit_transform(features)
confidence = softmax(-distances, axis=-1)
confidence = (
(num_sources * np.max(confidence, axis=-1) - 1) /
(num_sources - 1)
)
return confidence.mean()
def silhouette_confidence(audio_signal, features, num_sources, threshold=95,
max_points=1000, **kwargs):
"""
Uses the silhouette score to compute the clusterability of the feature space.
The Silhouette Coefficient is calculated using the
mean intra-cluster distance (a) and the mean nearest-cluster distance (b)
for each sample. The Silhouette Coefficient for a sample is (b - a) / max(a, b).
To clarify, b is the distance between a sample and the nearest cluster
that the sample is not a part of. Note that Silhouette Coefficient is
only defined if number of labels is 2 <= n_labels <= n_samples - 1.
References:
Seetharaman, Prem. Bootstrapping the Learning Process for Computer Audition.
Diss. Northwestern University, 2019.
Peter J. Rousseeuw (1987). “Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis”. Computational and
Applied Mathematics 20: 53-65.
Args:
audio_signal (AudioSignal): AudioSignal object which will be used to compute
the mask over which to compute the confidence measure. This can be None, if
and only if ``representation`` is passed as a keyword argument to this
function.
features (np.ndarray): Numpy array containing the features to be clustered.
Should have the same dimensions as the representation.
n_sources (int): Number of sources to cluster the features into.
threshold (int, optional): Threshold by loudness. Points below the threshold are
excluded from being used in the confidence measure. Defaults to 95.
kwargs: Keyword arguments to `_get_loud_bins_mask`. Namely, representation can
go here as a keyword argument.
max_points (int, optional): Maximum number of points to compute the Silhouette
score for. Silhouette score is a costly operation. Defaults to 1000.
Returns:
float: Confidence given by Silhouette score.
"""
mask, _ = _get_loud_bins_mask(threshold, audio_signal, **kwargs)
embedding_size = features.shape[-1]
features = features[mask].reshape(-1, embedding_size)
if features.shape[0] > max_points:
idx = np.random.choice(
np.arange(features.shape[0]), max_points,
replace=False)
features = features[idx]
kmeans = KMeans(num_sources)
labels = kmeans.fit_predict(features)
confidence = silhouette_samples(features, labels)
return confidence.mean()
def loudness_confidence(audio_signal, features, num_sources, threshold=95,
**kwargs):
"""
Computes the clusterability of the feature space by comparing the absolute
size of each cluster.
References:
Seetharaman, Prem, Gordon Wichern, Jonathan Le Roux, and Bryan Pardo.
“Bootstrapping Single-Channel Source Separation via Unsupervised Spatial
Clustering on Stereo Mixtures”. 44th International Conference on Acoustics,
Speech, and Signal Processing, Brighton, UK, May, 2019
Seetharaman, Prem. Bootstrapping the Learning Process for Computer Audition.
Diss. Northwestern University, 2019.
Args:
audio_signal (AudioSignal): AudioSignal object which will be used to compute
the mask over which to compute the confidence measure. This can be None, if
and only if ``representation`` is passed as a keyword argument to this
function.
features (np.ndarray): Numpy array containing the features to be clustered.
Should have the same dimensions as the representation.
n_sources (int): Number of sources to cluster the features into.
threshold (int, optional): Threshold by loudness. Points below the threshold are
excluded from being used in the confidence measure. Defaults to 95.
kwargs: Keyword arguments to `_get_loud_bins_mask`. Namely, representation can
go here as a keyword argument.
Returns:
float: Confidence given by size of smallest cluster.
"""
mask, _ = _get_loud_bins_mask(threshold, audio_signal, **kwargs)
embedding_size = features.shape[-1]
features = features[mask].reshape(-1, embedding_size)
kmeans = KMeans(num_sources)
labels = kmeans.fit_predict(features)
source_shares = np.array(
[(labels == i).sum() for i in range(num_sources)]
).astype(float)
source_shares *= (1 / source_shares.sum())
confidence = source_shares.min()
return confidence
def whitened_kmeans_confidence(audio_signal, features, num_sources, threshold=95,
**kwargs):
"""
Computes the clusterability in two steps:
1. Cluster the feature space using KMeans into assignments
2. Compute the Whitened K-Means loss between the features and the assignments.
Args:
audio_signal (AudioSignal): AudioSignal object which will be used to compute
the mask over which to compute the confidence measure. This can be None, if
and only if ``representation`` is passed as a keyword argument to this
function.
features (np.ndarray): Numpy array containing the features to be clustered.
Should have the same dimensions as the representation.
n_sources (int): Number of sources to cluster the features into.
threshold (int, optional): Threshold by loudness. Points below the threshold are
excluded from being used in the confidence measure. Defaults to 95.
kwargs: Keyword arguments to `_get_loud_bins_mask`. Namely, representation can
go here as a keyword argument.
Returns:
float: Confidence given by whitened k-means loss.
"""
mask, representation = _get_loud_bins_mask(threshold, audio_signal, **kwargs)
embedding_size = features.shape[-1]
features = features[mask].reshape(-1, embedding_size)
weights = representation[mask].reshape(-1)
kmeans = KMeans(num_sources)
distances = kmeans.fit_transform(features)
assignments = (distances == distances.max(axis=-1, keepdims=True))
loss_func = loss.WhitenedKMeansLoss()
features = torch.from_numpy(features).unsqueeze(0).float()
assignments = torch.from_numpy(assignments).unsqueeze(0).float()
weights = torch.from_numpy(weights).unsqueeze(0).float()
loss_val = loss_func(features, assignments, weights).item()
upper_bound = embedding_size + num_sources
confidence = 1 - (loss_val / upper_bound)
return confidence
def dpcl_classic_confidence(audio_signal, features, num_sources, threshold=95,
**kwargs):
"""
Computes the clusterability in two steps:
1. Cluster the feature space using KMeans into assignments
2. Compute the classic deep clustering loss between the features and the assignments.
Args:
audio_signal (AudioSignal): AudioSignal object which will be used to compute
the mask over which to compute the confidence measure. This can be None, if
and only if ``representation`` is passed as a keyword argument to this
function.
features (np.ndarray): Numpy array containing the features to be clustered.
Should have the same dimensions as the representation.
n_sources (int): Number of sources to cluster the features into.
threshold (int, optional): Threshold by loudness. Points below the threshold are
excluded from being used in the confidence measure. Defaults to 95.
kwargs: Keyword arguments to `_get_loud_bins_mask`. Namely, representation can
go here as a keyword argument.
Returns:
float: Confidence given by deep clustering loss.
"""
mask, representation = _get_loud_bins_mask(threshold, audio_signal, **kwargs)
embedding_size = features.shape[-1]
features = features[mask].reshape(-1, embedding_size)
weights = representation[mask].reshape(-1)
kmeans = KMeans(num_sources)
distances = kmeans.fit_transform(features)
assignments = (distances == distances.max(axis=-1, keepdims=True))
loss_func = loss.DeepClusteringLoss()
features = torch.from_numpy(features).unsqueeze(0).float()
assignments = torch.from_numpy(assignments).unsqueeze(0).float()
weights = torch.from_numpy(weights).unsqueeze(0).float()
loss_val = loss_func(features, assignments, weights).item()
confidence = 1 - loss_val
return confidence
|
interactiveaudiolab/nussl
|
nussl/ml/confidence.py
|
Python
|
mit
| 14,980
|
[
"Gaussian"
] |
ceaf26e00d0b23cb7b0e7b0739b57b625dd9d315cbe45f83eafa09ef2832e6d8
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send VLAN commands to Lenovo Switches
# Overloading aspect of vlan creation in a range is pending
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_vlan
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Manage VLAN resources and attributes on devices running
Lenovo CNOS
description:
- This module allows you to work with VLAN related configurations. The
operators used are overloaded to ensure control over switch VLAN
configurations. The first level of VLAN configuration allows to set up the
VLAN range, the VLAN tag persistence, a VLAN access map and access map
filter. After passing this level, there are five VLAN arguments that will
perform further configurations. They are vlanArg1, vlanArg2, vlanArg3,
vlanArg4, and vlanArg5. The value of vlanArg1 will determine the way
following arguments will be evaluated. This module uses SSH to manage
network device configuration. The results of the operation will be placed
in a directory named 'results' that must be created by the user in their
local directory to where the playbook is run. For more information about
this module from Lenovo and customizing it usage for your use cases,
please visit
U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_vlan.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
vlanArg1:
description:
- This is an overloaded vlan first argument. Usage of this argument can
be found is the User Guide referenced above.
required: true
choices: [access-map, dot1q, filter, <1-3999> VLAN ID 1-3999 or range]
vlanArg2:
description:
- This is an overloaded vlan second argument. Usage of this argument can
be found is the User Guide referenced above.
choices: [VLAN Access Map name,egress-only,name, flood,state, ip]
vlanArg3:
description:
- This is an overloaded vlan third argument. Usage of this argument can
be found is the User Guide referenced above.
choices: [action, match, statistics, enter VLAN id or range of vlan,
ascii name for the VLAN, ipv4 or ipv6, active or suspend,
fast-leave, last-member-query-interval, mrouter, querier,
querier-timeout, query-interval, query-max-response-time,
report-suppression, robustness-variable, startup-query-count,
startup-query-interval, static-group]
vlanArg4:
description:
- This is an overloaded vlan fourth argument. Usage of this argument can
be found is the User Guide referenced above.
choices: [drop or forward or redirect, ip or mac,Interval in seconds,
ethernet, port-aggregation, Querier IP address,
Querier Timeout in seconds, Query Interval in seconds,
Query Max Response Time in seconds, Robustness Variable value,
Number of queries sent at startup, Query Interval at startup]
vlanArg5:
description:
- This is an overloaded vlan fifth argument. Usage of this argument can
be found is the User Guide referenced above.
choices: [access-list name, Slot/chassis number, Port Aggregation Number]
'''
EXAMPLES = '''
Tasks: The following are examples of using the module cnos_vlan. These are
written in the main.yml file of the tasks directory.
---
- name: Test Vlan - Create a vlan, name it
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "name"
vlanArg3: "Anil"
- name: Test Vlan - Create a vlan, Flood configuration
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "flood"
vlanArg3: "ipv4"
- name: Test Vlan - Create a vlan, State configuration
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "state"
vlanArg3: "active"
- name: Test Vlan - VLAN Access map1
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: "access-map"
vlanArg2: "Anil"
vlanArg3: "statistics"
- name: Test Vlan - VLAN Accep Map2
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: "access-map"
vlanArg2: "Anil"
vlanArg3: "action"
vlanArg4: "forward"
- name: Test Vlan - ip igmp snooping query interval
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "ip"
vlanArg3: "query-interval"
vlanArg4: 1313
- name: Test Vlan - ip igmp snooping mrouter interface port-aggregation 23
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "ip"
vlanArg3: "mrouter"
vlanArg4: "port-aggregation"
vlanArg5: 23
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "VLAN configuration is accomplished"
'''
import sys
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def vlanAccessMapConfig(module, cmd):
retVal = ''
# Wait time to get response from server
command = ''
vlanArg3 = module.params['vlanArg3']
vlanArg4 = module.params['vlanArg4']
vlanArg5 = module.params['vlanArg5']
deviceType = module.params['deviceType']
if(vlanArg3 == "action"):
command = command + vlanArg3 + ' '
value = cnos.checkSanityofVariable(
deviceType, "vlan_accessmap_action", vlanArg4)
if(value == "ok"):
command = command + vlanArg4
else:
retVal = "Error-135"
return retVal
elif(vlanArg3 == "match"):
command = command + vlanArg3 + ' '
if(vlanArg4 == "ip" or vlanArg4 == "mac"):
command = command + vlanArg4 + ' address '
value = cnos.checkSanityofVariable(
deviceType, "vlan_access_map_name", vlanArg5)
if(value == "ok"):
command = command + vlanArg5
else:
retVal = "Error-136"
return retVal
else:
retVal = "Error-137"
return retVal
elif(vlanArg3 == "statistics"):
command = vlanArg3 + " per-entry"
else:
retVal = "Error-138"
return retVal
inner_cmd = [{'command': command, 'prompt': None, 'answer': None}]
cmd.extend(inner_cmd)
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
# debugOutput(command)
return retVal
# EOM
def checkVlanNameNotAssigned(module, prompt, answer):
retVal = "ok"
vlanId = module.params['vlanArg1']
vlanName = module.params['vlanArg3']
command = "show vlan id " + vlanId
cmd = [{'command': command, 'prompt': None, 'answer': None}]
retVal = str(cnos.run_cnos_commands(module, cmd))
if(retVal.find('Error') != -1):
command = "display vlan id " + vlanId
retVal = str(cnos.run_cnos_commands(module, cmd))
if(retVal.find(vlanName) != -1):
return "Nok"
else:
return "ok"
# EOM
# Utility Method to create vlan
def createVlan(module, prompt, answer):
# vlan config command happens here. It creates if not present
vlanArg1 = module.params['vlanArg1']
vlanArg2 = module.params['vlanArg2']
vlanArg3 = module.params['vlanArg3']
vlanArg4 = module.params['vlanArg4']
vlanArg5 = module.params['vlanArg5']
deviceType = module.params['deviceType']
retVal = ''
command = 'vlan ' + vlanArg1
# debugOutput(command)
cmd = [{'command': command, 'prompt': None, 'answer': None}]
command = ""
if(vlanArg2 == "name"):
# debugOutput("name")
command = vlanArg2 + " "
value = cnos.checkSanityofVariable(deviceType, "vlan_name", vlanArg3)
if(value == "ok"):
value = checkVlanNameNotAssigned(module, prompt, answer)
if(value == "ok"):
command = command + vlanArg3
else:
retVal = retVal + 'VLAN Name is already assigned \n'
command = "\n"
else:
retVal = "Error-139"
return retVal
elif (vlanArg2 == "flood"):
# debugOutput("flood")
command = vlanArg2 + " "
value = cnos.checkSanityofVariable(deviceType, "vlan_flood", vlanArg3)
if(value == "ok"):
command = command + vlanArg3
else:
retVal = "Error-140"
return retVal
elif(vlanArg2 == "state"):
# debugOutput("state")
command = vlanArg2 + " "
value = cnos.checkSanityofVariable(deviceType, "vlan_state", vlanArg3)
if(value == "ok"):
command = command + vlanArg3
else:
retVal = "Error-141"
return retVal
elif(vlanArg2 == "ip"):
# debugOutput("ip")
command = vlanArg2 + " igmp snooping "
# debugOutput("vlanArg3")
if(vlanArg3 is None or vlanArg3 == ""):
# debugOutput("None or empty")
command = command.strip()
elif(vlanArg3 == "fast-leave"):
# debugOutput("fast-leave")
command = command + vlanArg3
elif (vlanArg3 == "last-member-query-interval"):
# debugOutput("last-member-query-interval")
command = command + vlanArg3 + " "
value = cnos.checkSanityofVariable(
deviceType, "vlan_last_member_query_interval", vlanArg4)
if(value == "ok"):
command = command + vlanArg4
else:
retVal = "Error-142"
return retVal
elif (vlanArg3 == "querier"):
# debugOutput("querier")
command = command + vlanArg3 + " "
value = cnos.checkSanityofVariable(deviceType,
"vlan_querier", vlanArg4)
if(value == "ok"):
command = command + vlanArg4
else:
retVal = "Error-143"
return retVal
elif (vlanArg3 == "querier-timeout"):
# debugOutput("querier-timeout")
command = command + vlanArg3 + " "
value = cnos.checkSanityofVariable(
deviceType, "vlan_querier_timeout", vlanArg4)
if(value == "ok"):
command = command + vlanArg4
else:
retVal = "Error-144"
return retVal
elif (vlanArg3 == "query-interval"):
# debugOutput("query-interval")
command = command + vlanArg3 + " "
value = cnos.checkSanityofVariable(
deviceType, "vlan_query_interval", vlanArg4)
if(value == "ok"):
command = command + vlanArg4
else:
retVal = "Error-145"
return retVal
elif (vlanArg3 == "query-max-response-time"):
# debugOutput("query-max-response-time")
command = command + vlanArg3 + " "
value = cnos.checkSanityofVariable(
deviceType, "vlan_query_max_response_time", vlanArg4)
if(value == "ok"):
command = command + vlanArg4
else:
retVal = "Error-146"
return retVal
elif (vlanArg3 == "report-suppression"):
# debugOutput("report-suppression")
command = command + vlanArg3
elif (vlanArg3 == "robustness-variable"):
# debugOutput("robustness-variable")
command = command + vlanArg3 + " "
value = cnos.checkSanityofVariable(
deviceType, "vlan_startup_query_count", vlanArg4)
if(value == "ok"):
command = command + vlanArg4
else:
retVal = "Error-148"
return retVal
elif (vlanArg3 == "startup-query-interval"):
# debugOutput("startup-query-interval")
command = command + vlanArg3 + " "
value = cnos.checkSanityofVariable(
deviceType, "vlan_startup_query_interval", vlanArg4)
if(value == "ok"):
command = command + vlanArg4
else:
retVal = "Error-149"
return retVal
elif (vlanArg3 == "static-group"):
retVal = "Error-102"
return retVal
elif (vlanArg3 == "version"):
# debugOutput("version")
command = command + vlanArg3 + " "
value = cnos.checkSanityofVariable(
deviceType, "vlan_snooping_version", vlanArg4)
if(value == "ok"):
command = command + vlanArg4
else:
retVal = "Error-150"
return retVal
elif (vlanArg3 == "mrouter"):
# debugOutput("mrouter")
command = command + vlanArg3 + " interface "
if(vlanArg4 == "ethernet"):
command = command + vlanArg4 + " "
value = cnos.checkSanityofVariable(
deviceType, "vlan_ethernet_interface", vlanArg5)
if(value == "ok"):
command = command + vlanArg5
else:
retVal = "Error-151"
return retVal
elif(vlanArg4 == "port-aggregation"):
command = command + vlanArg4 + " "
value = cnos.checkSanityofVariable(
deviceType, "vlan_portagg_number", vlanArg5)
if(value == "ok"):
command = command + vlanArg5
else:
retVal = "Error-152"
return retVal
else:
retVal = "Error-153"
return retVal
else:
command = command + vlanArg3
else:
retVal = "Error-154"
return retVal
inner_cmd = [{'command': command, 'prompt': None, 'answer': None}]
cmd.extend(inner_cmd)
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
# debugOutput(command)
return retVal
# EOM
def vlanConfig(module, prompt, answer):
retVal = ''
# Wait time to get response from server
vlanArg1 = module.params['vlanArg1']
vlanArg2 = module.params['vlanArg2']
vlanArg3 = module.params['vlanArg3']
vlanArg4 = module.params['vlanArg4']
vlanArg5 = module.params['vlanArg5']
deviceType = module.params['deviceType']
# vlan config command happens here.
command = 'vlan '
if(vlanArg1 == "access-map"):
# debugOutput("access-map ")
command = command + vlanArg1 + ' '
value = cnos.checkSanityofVariable(
deviceType, "vlan_access_map_name", vlanArg2)
if(value == "ok"):
command = command + vlanArg2
# debugOutput(command)
cmd = [{'command': command, 'prompt': None, 'answer': None}]
retVal = retVal + vlanAccessMapConfig(module, cmd)
return retVal
else:
retVal = "Error-130"
return retVal
elif(vlanArg1 == "dot1q"):
# debugOutput("dot1q")
command = command + vlanArg1 + " tag native "
if(vlanArg2 is not None):
value = cnos.checkSanityofVariable(
deviceType, "vlan_dot1q_tag", vlanArg2)
if(value == "ok"):
command = command + vlanArg2
else:
retVal = "Error-131"
return retVal
elif(vlanArg1 == "filter"):
# debugOutput( "filter")
command = command + vlanArg1 + " "
if(vlanArg2 is not None):
value = cnos.checkSanityofVariable(
deviceType, "vlan_filter_name", vlanArg2)
if(value == "ok"):
command = command + vlanArg2 + " vlan-list "
value = cnos.checkSanityofVariable(deviceType, "vlan_id",
vlanArg3)
if(value == "ok"):
command = command + vlanArg3
else:
value = cnos.checkSanityofVariable(
deviceType, "vlan_id_range", vlanArg3)
if(value == "ok"):
command = command + vlanArg3
else:
retVal = "Error-133"
return retVal
else:
retVal = "Error-132"
return retVal
else:
value = cnos.checkSanityofVariable(deviceType, "vlan_id", vlanArg1)
if(value == "ok"):
retVal = createVlan(module, '(config-vlan)#', None)
return retVal
else:
value = cnos.checkSanityofVariable(
deviceType, "vlan_id_range", vlanArg1)
if(value == "ok"):
retVal = createVlan(module, '(config-vlan)#', None)
return retVal
retVal = "Error-133"
return retVal
# debugOutput(command)
cmd = [{'command': command, 'prompt': None, 'answer': None}]
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
return retVal
# EOM
def main():
#
# Define parameters for vlan creation entry
#
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
vlanArg1=dict(required=True),
vlanArg2=dict(required=False),
vlanArg3=dict(required=False),
vlanArg4=dict(required=False),
vlanArg5=dict(required=False),),
supports_check_mode=False)
outputfile = module.params['outputfile']
output = ""
# Send the CLi command
output = output + str(vlanConfig(module, "(config)#", None))
# Save it operation details into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# need to add logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True,
msg="VLAN configuration is accomplished")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
caphrim007/ansible
|
lib/ansible/modules/network/cnos/cnos_vlan.py
|
Python
|
gpl-3.0
| 22,092
|
[
"VisIt"
] |
2ac98161acf88de3448f908ae12505d6ec624bdbd64b67c4153a3e12efe9539e
|
"""Implements API endpoints under ``/api/org``"""
from typing import Any, Dict
from flask import Blueprint, jsonify
from werkzeug.exceptions import abort
from bson import ObjectId
from shrunk.client import ShrunkClient
from shrunk.util.ldap import is_valid_netid
from shrunk.util.decorators import require_login, request_schema
__all__ = ['bp']
bp = Blueprint('org', __name__, url_prefix='/api/v1/org')
LIST_ORGS_SCHEMA = {
'type': 'object',
'additionalProperties': False,
'required': ['which'],
'properties': {
'which': {
'type': 'string',
'enum': ['user', 'all'],
},
},
}
@bp.route('/list', methods=['POST'])
@request_schema(LIST_ORGS_SCHEMA)
@require_login
def get_orgs(netid: str, client: ShrunkClient, req: Any) -> Any:
"""``POST /api/org/list``
Lists organizations. Request format:
.. code-block:: json
{ "which": "'user' | 'all'" }
where the ``"which"`` property specifies whether to return information about all organizations
or only organizations of which the requesting user is a member. Only administrators may use the ``"all"``
option. Response format:
.. code-block:: json
{ "orgs": [ {
"id": "string",
"name": "string",
"is_member": "boolean",
"is_admin": "boolean",
"timeCreated": "date-time",
"members": [
{ "netid": "string", "timeCreated": "date-time", "is_admin": "boolean" }
]
} ]
}
Where the top-level ``"is_member"`` and ``"is_admin"`` properties specify respectively whether the requesting
user is a member and/or an administrator of the organization.
:param netid:
:param client:
:param req:
"""
if req['which'] == 'all' and not client.roles.has('admin', netid):
abort(403)
orgs = client.orgs.get_orgs(netid, req['which'] == 'user')
return jsonify({'orgs': orgs})
CREATE_ORG_SCHEMA = {
'type': 'object',
'additionalProperties': False,
'required': ['name'],
'properties': {
'name': {
'type': 'string',
'pattern': '^[a-zA-Z0-9_.,-]*$',
'minLength': 1,
},
},
}
@bp.route('', methods=['POST'])
@request_schema(CREATE_ORG_SCHEMA)
@require_login
def post_org(netid: str, client: ShrunkClient, req: Any) -> Any:
"""``POST /api/org``
Create a new organization. The requesting user is automatically an administrator of the
newly-created organization. Returns the ID of the created organization. Request format:
.. code-block:: json
{ "name": "string" }
Response format:
.. code-block:: json
{ "id": "string" }
:param netid:
:param client:
:param req:
"""
if not client.roles.has_some(['facstaff', 'admin'], netid):
abort(403)
org_id = client.orgs.create(req['name'])
if org_id is None:
abort(409)
client.orgs.create_member(org_id, netid, is_admin=True)
return jsonify({'id': org_id})
@bp.route('/<ObjectId:org_id>', methods=['DELETE'])
@require_login
def delete_org(netid: str, client: ShrunkClient, org_id: ObjectId) -> Any:
"""``DELETE /api/org/<org_id>``
Delete an organization. Returns 204 on success.
:param netid:
:param client:
:param org_id:
"""
if not client.orgs.is_admin(org_id, netid) and not client.roles.has('admin', netid):
abort(403)
client.orgs.delete(org_id)
return '', 204
@bp.route('/<ObjectId:org_id>', methods=['GET'])
@require_login
def get_org(netid: str, client: ShrunkClient, org_id: ObjectId) -> Any:
"""``GET /api/org/<org_id>``
Get information about an organization. For response format, see :py:func:`get_orgs`.
:param netid:
:param client:
:param org_id:
"""
if not client.orgs.is_member(org_id, netid) and not client.roles.has('admin', netid):
abort(403)
org = client.orgs.get_org(org_id)
if org is None:
abort(404)
org['id'] = org['_id']
del org['_id']
org['is_member'] = any(member['netid'] == netid for member in org['members'])
org['is_admin'] = any(member['netid'] == netid and member['is_admin'] for member in org['members'])
return jsonify(org)
VALIDATE_NAME_SCHEMA = {
'type': 'object',
'additionalProperties': False,
'required': ['name'],
'properties': {
'name': {'type': 'string'},
},
}
@bp.route('/validate_name', methods=['POST'])
@request_schema(VALIDATE_NAME_SCHEMA)
@require_login
def validate_org_name(_netid: str, client: ShrunkClient, req: Any) -> Any:
"""``POST /api/org/validate_name``
Validate an organization name. This endpoint is used for form validation in the frontend. Request format:
.. code-block:: json
{ "name": "string" }
Response format:
.. code-block:: json
{ "valid": "boolean", "reason?": "string" }
:param netid:
:param client:
:param req:
"""
valid = client.orgs.validate_name(req['name'])
response: Dict[str, Any] = {'valid': valid}
if not valid:
response['reason'] = 'That name is already taken.'
return jsonify(response)
VALIDATE_NETID_SCHEMA = {
'type': 'object',
'additionalProperties': False,
'required': ['netid'],
'properties': {
'netid': {'type': 'string'},
},
}
@bp.route('/validate_netid', methods=['POST'])
@request_schema(VALIDATE_NETID_SCHEMA)
@require_login
def validate_netid(_netid: str, _client: ShrunkClient, req: Any) -> Any:
"""``POST /api/org/validate_netid``
Check that a NetID is valid. This endpoint is used for form validation in the frontend. Request format:
.. code-block:: json
{ "netid": "string" }
Response format:
.. code-block:: json
{ "valid": "boolean", "reason?": "string" }
:param netid:
:param client:
:param req:
"""
valid = is_valid_netid(req['netid'])
response: Dict[str, Any] = {'valid': valid}
if not valid:
response['reason'] = 'That NetID is not valid.'
return jsonify(response)
@bp.route('/<ObjectId:org_id>/stats/visits', methods=['GET'])
@require_login
def get_org_visit_stats(netid: str, client: ShrunkClient, org_id: ObjectId) -> Any:
"""``GET /api/org/<org_id>/stats/visits``
Get per-user visit statistics for an org. Response format:
.. code-block:: json
{ "visits": [ {
"netid": "string",
"total_visits": "number",
"unique_visits": "number"
} ]
}
:param netid:
:param client:
:param org_id:
"""
if not client.orgs.is_admin(org_id, netid) and not client.roles.has('admin', netid):
abort(403)
visits = client.orgs.get_visit_stats(org_id)
return jsonify({'visits': visits})
@bp.route('/<ObjectId:org_id>/stats/geoip', methods=['GET'])
@require_login
def get_org_geoip_stats(netid: str, client: ShrunkClient, org_id: ObjectId) -> Any:
"""``GET /api/org/<org_id>/stats/geoip``
Get GeoIP statistics about all links belonging to members of the org. For response format,
see :py:func:`~shrunk.api.link.get_link_geoip_stats`.
:param netid:
:param client:
:param org_id:
"""
if not client.orgs.is_admin(org_id, netid) and not client.roles.has('admin', netid):
abort(403)
geoip = client.orgs.get_geoip_stats(org_id)
return jsonify({'geoip': geoip})
@bp.route('/<ObjectId:org_id>/member/<member_netid>', methods=['PUT'])
@require_login
def put_org_member(netid: str, client: ShrunkClient, org_id: ObjectId, member_netid: str) -> Any:
"""``PUT /api/org/<org_id>/member/<netid>``
Add a user to an org. Performs no action if the user is already a member of the org. Returns 204
on success.
:param netid:
:param client:
:param org_id:
:param member_netid:
"""
if not client.orgs.is_admin(org_id, netid) and not client.roles.has('admin', netid):
abort(403)
client.orgs.create_member(org_id, member_netid)
return '', 204
@bp.route('/<ObjectId:org_id>/member/<member_netid>', methods=['DELETE'])
@require_login
def delete_org_member(netid: str, client: ShrunkClient, org_id: ObjectId, member_netid: str) -> Any:
"""``DELETE /api/org/<org_id>/member/<netid>``
Remove a member from an org. Returns 204 on success.
:param netid:
:param client:
:param org_id:
:param member_netid:
"""
if not client.orgs.is_admin(org_id, netid) and not client.roles.has('admin', netid):
if not netid == member_netid:
abort(403)
client.orgs.delete_member(org_id, member_netid)
return '', 204
MODIFY_ORG_MEMBER_SCHEMA = {
'type': 'object',
'additionalProperties': False,
'properties': {
'is_admin': {'type': 'boolean'},
},
}
@bp.route('/<ObjectId:org_id>/member/<member_netid>', methods=['PATCH'])
@request_schema(MODIFY_ORG_MEMBER_SCHEMA)
@require_login
def patch_org_member(netid: str, client: ShrunkClient, req: Any, org_id: ObjectId, member_netid: str) -> Any:
"""``PATCH /api/org/<org_id>/member/<netid>``
Modify a member of an org. Returns 204 on success. Request response:
.. code-block:: json
{ "is_admin?": "boolean" }
Properties present in the request will be updated. Properties missing from the request will not be modified.
:param netid:
:param client:
:param req:
:param org_id:
:param member_netid:
"""
if not client.orgs.is_admin(org_id, netid) and not client.roles.has('admin', netid):
abort(403)
if 'is_admin' in req:
client.orgs.set_member_admin(org_id, member_netid, req['is_admin'])
return '', 204
|
oss/shrunk
|
backend/shrunk/api/org.py
|
Python
|
mit
| 9,690
|
[
"VisIt"
] |
975353fc882b70afa5917a98b15e4534c8b95c6604b622c1f11dd46bff4fd27d
|
from dateutil import parser as datetime_parser
from colander import (
Invalid,
Mapping,
SchemaNode,
null,
)
from deform.compat import (
string_types,
text_,
)
from deform.widget import (
Widget,
)
from deform.widget import _StrippedString
import logging
import json
LOGGER = logging.getLogger("PHOENIX")
class ResourceWidget(Widget):
"""
Renders an WPS ComplexType input widget with an upload button.
It is based on deform.widget.TextInputWidget.
"""
template = 'resource'
readonly_template = 'readonly/textinput'
strip = True
mask = None
mask_placeholder = "_"
mime_types = ['application/x-netcdf']
upload = False
storage_url = None
size_limit = 2 * 1024 * 1024 # 2 MB in bytes
requirements = (('jquery.maskedinput', None),)
def serialize(self, field, cstruct, **kw):
if cstruct in (null, None):
cstruct = ''
readonly = kw.get('readonly', self.readonly)
template = readonly and self.readonly_template or self.template
values = self.get_template_values(field, cstruct, kw)
return field.renderer(template, **values)
def deserialize(self, field, pstruct):
if pstruct is null:
return null
elif not isinstance(pstruct, string_types):
raise Invalid(field.schema, "Pstruct is not a string")
if self.strip:
pstruct = pstruct.strip()
if not pstruct:
return null
LOGGER.debug("pstruct: %s", pstruct)
return pstruct
class BBoxWidget(Widget):
"""
Renders a BoundingBox Widget.
**Attributes/Arguments**
template
The template name used to render the input widget. Default:
``bbox``.
readonly_template
The template name used to render the widget in read-only mode.
Default: ``readonly/bbox``.
"""
template = 'bbox'
readonly_template = 'readonly/bbox'
_pstruct_schema = SchemaNode(
Mapping(),
SchemaNode(_StrippedString(), name='minx'),
SchemaNode(_StrippedString(), name='miny'),
SchemaNode(_StrippedString(), name='maxx'),
SchemaNode(_StrippedString(), name='maxy'))
def serialize(self, field, cstruct, **kw):
if cstruct is null:
minx = '-180'
miny = '-90'
maxx = '180'
maxy = '90'
else:
minx, miny, maxx, maxy = cstruct.split(',', 3)
kw.setdefault('minx', minx)
kw.setdefault('miny', miny)
kw.setdefault('maxx', maxx)
kw.setdefault('maxy', maxy)
# readonly = kw.get('readonly', self.readonly)
# TODO: add readonly template
readonly = False
template = readonly and self.readonly_template or self.template
values = self.get_template_values(field, cstruct, kw)
return field.renderer(template, **values)
def deserialize(self, field, pstruct):
if pstruct is null:
return null
else:
try:
validated = self._pstruct_schema.deserialize(pstruct)
except Invalid as exc:
raise Invalid(field.schema, text_("Invalid pstruct: %s" % exc))
minx = validated['minx']
miny = validated['miny']
maxx = validated['maxx']
maxy = validated['maxy']
if not minx and not minx and not maxx and not maxy:
return null
result = ','.join([minx, miny, maxx, maxy])
if not minx or not miny or not maxx or not maxy:
raise Invalid(field.schema, 'Incomplete bbox', result)
return result
class TagsWidget(Widget):
template = 'tags'
# readonly_template = 'readonly/tags'
size = None
strip = True
mask = None
mask_placeholder = "_"
style = None
requirements = (('jquery.maskedinput', None), )
def serialize(self, field, cstruct, **kw):
if cstruct in (null, None):
cstruct = ''
values = self.get_template_values(field, cstruct, kw)
return field.renderer(self.template, **values)
def deserialize(self, field, pstruct):
LOGGER.debug('result pstruct=%s', pstruct)
if pstruct is null:
return null
if self.strip:
pstruct = pstruct.strip()
if not pstruct:
return null
return pstruct
|
bird-house/pyramid-phoenix
|
phoenix/geoform/widget.py
|
Python
|
apache-2.0
| 4,428
|
[
"NetCDF"
] |
dd237870f6dfd40fe4ce4e8231f2c13ee7d552f7bea30ca82e5cf62636d527ac
|
# (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import re
def camelcase_to_underscore(s):
'''
Convert CamelCase names to lowercase_with_underscore.
'''
s = re.sub(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', s)
return s.lower().strip('_')
|
chouseknecht/galaxy
|
galaxy/main/utils/__init__.py
|
Python
|
apache-2.0
| 899
|
[
"Galaxy"
] |
c3a37e46e83236f11504134e1f9bd26a5e7f3c2bd52a54ea425c4ae8652cf29f
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'DataDisplayTemplate'
db.delete_table('profiles_datadisplaytemplate')
# Removing M2M table for field records on 'DataDisplayTemplate'
db.delete_table('profiles_datadisplaytemplate_records')
# Removing M2M table for field levels on 'DataDisplayTemplate'
db.delete_table('profiles_datadisplaytemplate_levels')
# Removing M2M table for field domains on 'DataDisplayTemplate'
db.delete_table('profiles_datadisplaytemplate_domains')
# Removing M2M table for field indicators on 'DataDisplayTemplate'
db.delete_table('profiles_datadisplaytemplate_indicators')
# Deleting model 'DataDisplay'
db.delete_table('profiles_datadisplay')
# Changing field 'Indicator.data_type'
db.alter_column('profiles_indicator', 'data_type', self.gf('django.db.models.fields.CharField')(max_length=30))
def backwards(self, orm):
# Adding model 'DataDisplayTemplate'
db.create_table('profiles_datadisplaytemplate', (
('subtitle', self.gf('django.db.models.fields.CharField')(max_length=300, blank=True)),
('display_type', self.gf('django.db.models.fields.CharField')(default='STANDARD', max_length=11)),
('title', self.gf('django.db.models.fields.CharField')(max_length=300)),
('subsubtitle', self.gf('django.db.models.fields.CharField')(max_length=300, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('source', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('profiles', ['DataDisplayTemplate'])
# Adding M2M table for field records on 'DataDisplayTemplate'
db.create_table('profiles_datadisplaytemplate_records', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('datadisplaytemplate', models.ForeignKey(orm['profiles.datadisplaytemplate'], null=False)),
('georecord', models.ForeignKey(orm['profiles.georecord'], null=False))
))
db.create_unique('profiles_datadisplaytemplate_records', ['datadisplaytemplate_id', 'georecord_id'])
# Adding M2M table for field levels on 'DataDisplayTemplate'
db.create_table('profiles_datadisplaytemplate_levels', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('datadisplaytemplate', models.ForeignKey(orm['profiles.datadisplaytemplate'], null=False)),
('geolevel', models.ForeignKey(orm['profiles.geolevel'], null=False))
))
db.create_unique('profiles_datadisplaytemplate_levels', ['datadisplaytemplate_id', 'geolevel_id'])
# Adding M2M table for field domains on 'DataDisplayTemplate'
db.create_table('profiles_datadisplaytemplate_domains', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('datadisplaytemplate', models.ForeignKey(orm['profiles.datadisplaytemplate'], null=False)),
('datadomain', models.ForeignKey(orm['profiles.datadomain'], null=False))
))
db.create_unique('profiles_datadisplaytemplate_domains', ['datadisplaytemplate_id', 'datadomain_id'])
# Adding M2M table for field indicators on 'DataDisplayTemplate'
db.create_table('profiles_datadisplaytemplate_indicators', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('datadisplaytemplate', models.ForeignKey(orm['profiles.datadisplaytemplate'], null=False)),
('indicator', models.ForeignKey(orm['profiles.indicator'], null=False))
))
db.create_unique('profiles_datadisplaytemplate_indicators', ['datadisplaytemplate_id', 'indicator_id'])
# Adding model 'DataDisplay'
db.create_table('profiles_datadisplay', (
('subtitle', self.gf('django.db.models.fields.CharField')(max_length=300, blank=True)),
('image', self.gf('sorl.thumbnail.fields.ImageField')(max_length=100)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=100, unique=True, db_index=True)),
('indicator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.Indicator'], null=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=300)),
('subsubtitle', self.gf('django.db.models.fields.CharField')(max_length=300, blank=True)),
('html', self.gf('django.db.models.fields.TextField')(blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('record', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.GeoRecord'], null=True, blank=True)),
('template', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.DataDisplayTemplate'])),
))
db.send_create_signal('profiles', ['DataDisplay'])
# Changing field 'Indicator.data_type'
db.alter_column('profiles_indicator', 'data_type', self.gf('django.db.models.fields.CharField')(max_length=10))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.datadomain': {
'Meta': {'ordering': "['weight']", 'object_name': 'DataDomain'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Indicator']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'profiles.datapoint': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': "orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': "orm['profiles.Time']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']", 'null': 'True'})
},
'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'profiles.denominator': {
'Meta': {'object_name': 'Denominator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'profiles.denominatorpart': {
'Meta': {'object_name': 'DenominatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Denominator']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.IndicatorPart']"})
},
'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataSource']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataDomain']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataDomain']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"})
},
'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']"})
},
'profiles.time': {
'Meta': {'object_name': 'Time'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
},
'profiles.value': {
'Meta': {'object_name': 'Value'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataPoint']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Denominator']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['profiles']
|
ProvidencePlan/Profiles
|
communityprofiles/profiles/oldmigrations/0036_auto__del_datadisplaytemplate__del_datadisplay__chg_field_indicator_da.py
|
Python
|
mit
| 18,644
|
[
"MOE"
] |
2d1a140164baf3546f311f7f59122d54f1be21f19882c23e1489488331fad00d
|
# coding: utf-8
"""0MQ Socket pure Python methods."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import random
import codecs
import zmq
from .backend import Socket as SocketBase
from .poll import Poller
from . import constants
from .attrsettr import AttributeSetter
from zmq.error import ZMQError, ZMQBindError
from zmq.utils import jsonapi
from zmq.utils.strtypes import bytes,unicode,basestring
from .constants import (
SNDMORE, ENOTSUP, POLLIN,
int64_sockopt_names,
int_sockopt_names,
bytes_sockopt_names,
)
try:
import cPickle
pickle = cPickle
except:
cPickle = None
import pickle
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class Socket(SocketBase, AttributeSetter):
#-------------------------------------------------------------------------
# Hooks for sockopt completion
#-------------------------------------------------------------------------
def __dir__(self):
keys = dir(self.__class__)
for collection in (
bytes_sockopt_names,
int_sockopt_names,
int64_sockopt_names,
):
keys.extend(collection)
return keys
#-------------------------------------------------------------------------
# Getting/Setting options
#-------------------------------------------------------------------------
setsockopt = SocketBase.set
getsockopt = SocketBase.get
def set_string(self, option, optval, encoding='utf-8'):
"""set socket options with a unicode object
This is simply a wrapper for setsockopt to protect from encoding ambiguity.
See the 0MQ documentation for details on specific options.
Parameters
----------
option : int
The name of the option to set. Can be any of: SUBSCRIBE,
UNSUBSCRIBE, IDENTITY
optval : unicode string (unicode on py2, str on py3)
The value of the option to set.
encoding : str
The encoding to be used, default is utf8
"""
if not isinstance(optval, unicode):
raise TypeError("unicode strings only")
return self.set(option, optval.encode(encoding))
setsockopt_unicode = setsockopt_string = set_string
def get_string(self, option, encoding='utf-8'):
"""get the value of a socket option
See the 0MQ documentation for details on specific options.
Parameters
----------
option : int
The option to retrieve. Currently, IDENTITY is the only
gettable option that can return a string.
Returns
-------
optval : unicode string (unicode on py2, str on py3)
The value of the option as a unicode string.
"""
if option not in constants.bytes_sockopts:
raise TypeError("option %i will not return a string to be decoded"%option)
return self.getsockopt(option).decode(encoding)
getsockopt_unicode = getsockopt_string = get_string
def bind_to_random_port(self, addr, min_port=49152, max_port=65536, max_tries=100):
"""bind this socket to a random port in a range
Parameters
----------
addr : str
The address string without the port to pass to ``Socket.bind()``.
min_port : int, optional
The minimum port in the range of ports to try (inclusive).
max_port : int, optional
The maximum port in the range of ports to try (exclusive).
max_tries : int, optional
The maximum number of bind attempts to make.
Returns
-------
port : int
The port the socket was bound to.
Raises
------
ZMQBindError
if `max_tries` reached before successful bind
"""
for i in range(max_tries):
try:
port = random.randrange(min_port, max_port)
self.bind('%s:%s' % (addr, port))
except ZMQError as exception:
if not exception.errno == zmq.EADDRINUSE:
raise
else:
return port
raise ZMQBindError("Could not bind socket to random port.")
def get_hwm(self):
"""get the High Water Mark
On libzmq ≥ 3.x, this gets SNDHWM if available, otherwise RCVHWM
"""
major = zmq.zmq_version_info()[0]
if major >= 3:
# return sndhwm, fallback on rcvhwm
try:
return self.getsockopt(zmq.SNDHWM)
except zmq.ZMQError as e:
pass
return self.getsockopt(zmq.RCVHWM)
else:
return self.getsockopt(zmq.HWM)
def set_hwm(self, value):
"""set the High Water Mark
On libzmq ≥ 3.x, this sets *both* SNDHWM and RCVHWM
"""
major = zmq.zmq_version_info()[0]
if major >= 3:
raised = None
try:
self.sndhwm = value
except Exception as e:
raised = e
try:
self.rcvhwm = value
except Exception:
raised = e
if raised:
raise raised
else:
return self.setsockopt(zmq.HWM, value)
hwm = property(get_hwm, set_hwm)
#-------------------------------------------------------------------------
# Sending and receiving messages
#-------------------------------------------------------------------------
def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
"""send a sequence of buffers as a multipart message
Parameters
----------
msg_parts : iterable
A sequence of objects to send as a multipart message. Each element
can be any sendable object (Frame, bytes, buffer-providers)
flags : int, optional
SNDMORE is handled automatically for frames before the last.
copy : bool, optional
Should the frame(s) be sent in a copying or non-copying manner.
track : bool, optional
Should the frame(s) be tracked for notification that ZMQ has
finished with it (ignored if copy=True).
Returns
-------
None : if copy or not track
MessageTracker : if track and not copy
a MessageTracker object, whose `pending` property will
be True until the last send is completed.
"""
for msg in msg_parts[:-1]:
self.send(msg, SNDMORE|flags, copy=copy, track=track)
# Send the last part without the extra SNDMORE flag.
return self.send(msg_parts[-1], flags, copy=copy, track=track)
def recv_multipart(self, flags=0, copy=True, track=False):
"""receive a multipart message as a list of bytes or Frame objects
Parameters
----------
flags : int, optional
Any supported flag: NOBLOCK. If NOBLOCK is set, this method
will raise a ZMQError with EAGAIN if a message is not ready.
If NOBLOCK is not set, then this method will block until a
message arrives.
copy : bool, optional
Should the message frame(s) be received in a copying or non-copying manner?
If False a Frame object is returned for each part, if True a copy of
the bytes is made for each frame.
track : bool, optional
Should the message frame(s) be tracked for notification that ZMQ has
finished with it? (ignored if copy=True)
Returns
-------
msg_parts : list
A list of frames in the multipart message; either Frames or bytes,
depending on `copy`.
"""
parts = [self.recv(flags, copy=copy, track=track)]
# have first part already, only loop while more to receive
while self.getsockopt(zmq.RCVMORE):
part = self.recv(flags, copy=copy, track=track)
parts.append(part)
return parts
def send_string(self, u, flags=0, copy=False, encoding='utf-8'):
"""send a Python unicode string as a message with an encoding
0MQ communicates with raw bytes, so you must encode/decode
text (unicode on py2, str on py3) around 0MQ.
Parameters
----------
u : Python unicode string (unicode on py2, str on py3)
The unicode string to send.
flags : int, optional
Any valid send flag.
encoding : str [default: 'utf-8']
The encoding to be used
"""
if not isinstance(u, basestring):
raise TypeError("unicode/str objects only")
return self.send(u.encode(encoding), flags=flags, copy=copy)
send_unicode = send_string
def recv_string(self, flags=0, encoding='utf-8'):
"""receive a unicode string, as sent by send_string
Parameters
----------
flags : int
Any valid recv flag.
encoding : str [default: 'utf-8']
The encoding to be used
Returns
-------
s : unicode string (unicode on py2, str on py3)
The Python unicode string that arrives as encoded bytes.
"""
msg = self.recv(flags=flags, copy=False)
return codecs.decode(msg.bytes, encoding)
recv_unicode = recv_string
def send_pyobj(self, obj, flags=0, protocol=-1):
"""send a Python object as a message using pickle to serialize
Parameters
----------
obj : Python object
The Python object to send.
flags : int
Any valid send flag.
protocol : int
The pickle protocol number to use. Default of -1 will select
the highest supported number. Use 0 for multiple platform
support.
"""
msg = pickle.dumps(obj, protocol)
return self.send(msg, flags)
def recv_pyobj(self, flags=0):
"""receive a Python object as a message using pickle to serialize
Parameters
----------
flags : int
Any valid recv flag.
Returns
-------
obj : Python object
The Python object that arrives as a message.
"""
s = self.recv(flags)
return pickle.loads(s)
def send_json(self, obj, flags=0):
"""send a Python object as a message using json to serialize
Parameters
----------
obj : Python object
The Python object to send.
flags : int
Any valid send flag.
"""
if jsonapi.jsonmod is None:
raise ImportError('jsonlib{1,2}, json or simplejson library is required.')
else:
msg = jsonapi.dumps(obj)
return self.send(msg, flags)
def recv_json(self, flags=0):
"""receive a Python object as a message using json to serialize
Parameters
----------
flags : int
Any valid recv flag.
Returns
-------
obj : Python object
The Python object that arrives as a message.
"""
if jsonapi.jsonmod is None:
raise ImportError('jsonlib{1,2}, json or simplejson library is required.')
else:
msg = self.recv(flags)
return jsonapi.loads(msg)
_poller_class = Poller
def poll(self, timeout=None, flags=POLLIN):
"""poll the socket for events
The default is to poll forever for incoming
events. Timeout is in milliseconds, if specified.
Parameters
----------
timeout : int [default: None]
The timeout (in milliseconds) to wait for an event. If unspecified
(or secified None), will wait forever for an event.
flags : bitfield (int) [default: POLLIN]
The event flags to poll for (any combination of POLLIN|POLLOUT).
The default is to check for incoming events (POLLIN).
Returns
-------
events : bitfield (int)
The events that are ready and waiting. Will be 0 if no events were ready
by the time timeout was reached.
"""
if self.closed:
raise ZMQError(ENOTSUP)
p = self._poller_class()
p.register(self, flags)
evts = dict(p.poll(timeout))
# return 0 if no events, otherwise return event bitfield
return evts.get(self, 0)
__all__ = ['Socket']
|
IsCoolEntertainment/debpkg_python-pyzmq
|
zmq/sugar/socket.py
|
Python
|
lgpl-3.0
| 13,277
|
[
"Brian"
] |
fc698f9157d0cd247ead697301237c74a1a23468f88145a460545b17cc45902d
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from scipy.constants import Boltzmann
import random
from numpy import exp
from numpy import abs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# So class trong bo du lieu MNIST, dai dien cho cac so tu 0 den 9
NUM_CLASSES = 10
# Kich co anh trong bo du lieu MNIST
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
KERNEL_SIZE = 5
NUM_FEATURE_MAPS = 6
STDDEV_INDEX = 0.1
NUM_FEATURE_MAPS_2 = 12
NUM_FEATURE = 92
BATCH_SIZE = 50
FLAGS = None
EPOCH_NUMBER = 1
TRAINING_SIZE = 60000
# Cac tham so lien quan den giai thuat SA
NEIGHBOR_NUMBER = 10
REDUCE_FACTOR = 0.9
TEMPERATURE_INIT = 100
BOLTZMANN_CONSTANT = Boltzmann
# bien toan cuc
w_conv1 = None
b_conv1 = None
w_conv2 = None
b_conv2 = None
w_fc1 = None
b_fc1 = None
w_fc2 = None
b_fc2 = None
# Khai bao bien chua cac tham so ve weight voi kich thuoc cua kernel va so feature map
def weight_variable(weight_shape):
weight_init = tf.truncated_normal(weight_shape, stddev=STDDEV_INDEX)
return tf.Variable(weight_init)
# Khai bao bien chua cac tham so ve bias tuon
def bias_variable(bias_shape):
bias_init = tf.constant(0.1, shape=bias_shape)
return tf.Variable(bias_init)
# Khai bao convolution layer 2 chieu voi day du cac buoc
def conv2d(x, weight):
return tf.nn.conv2d(x, weight, strides=[1, 1, 1, 1], padding='SAME')
# Khai bao subsample 2x2 feature map (max_pool_2x2)
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
# Xay dung khoi tao mang Neural Network
def deep_network(x):
global w_conv1, b_conv1, w_conv2, b_conv2, w_fc1, b_fc1, w_fc2, b_fc2
# Reshape du lieu de sung dung ben trong mang neuron
with tf.name_scope('reshape'):
x_image = tf.reshape(x, [-1, IMAGE_SIZE,
IMAGE_SIZE, 1])
# Layer dau tien - map mot image ra 6 feature maps
with tf.name_scope('conv1'):
# Weight
w_conv1 = weight_variable([KERNEL_SIZE, KERNEL_SIZE,
1, NUM_FEATURE_MAPS])
# Bias
b_conv1 = bias_variable([NUM_FEATURE_MAPS])
# Activation function duoc su dung la ham ReLU
h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1)
# Layer pool/subsampling
with tf.name_scope('pool1'):
h_pool1 = max_pool_2x2(h_conv1)
# Layer convolution thu 2 -- noi 6 feature maps thanh 12
with tf.name_scope('conv2'):
# Weight
w_conv2 = weight_variable([KERNEL_SIZE, KERNEL_SIZE,
NUM_FEATURE_MAPS, NUM_FEATURE_MAPS_2])
# Bias
b_conv2 = bias_variable([NUM_FEATURE_MAPS_2])
# Activation function su dung ham ReLU
h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
# Layer pooling/subsampling thu 2
with tf.name_scope('pool2'):
h_pool2 = max_pool_2x2(h_conv2)
# Fully connect layer 1 sau 2 lan pool
# Anh 28x28 tro thanh 7x7x12 feature maps - ket noi toi 92 feature
with tf.name_scope('fc1'):
# Weight fc_1
w_fc1 = weight_variable([7 * 7 * NUM_FEATURE_MAPS_2, NUM_FEATURE])
b_fc1 = bias_variable([92])
h_pool2_flatten = tf.reshape(h_pool2, [-1, 7 * 7 * 12])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flatten, w_fc1) + b_fc1)
# Su dung dropout de kiem soat do phuc tap cua mo hinh
with tf.name_scope('dropout'):
dropper = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, dropper)
# Ghep 92 feature vao 10 class, tuong duong voi cac so
with tf.name_scope('fc2'):
w_fc2 = weight_variable([92, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2
return y_conv, dropper
# Lay mot lan can cua ma tran chua tham so
def neighbor(x):
delta = tf.random_normal(shape=x.get_shape(), mean=0.0, stddev=0.001*tf.reduce_mean(x))
x = x + delta
return x
def main(_):
# Nhap du lieu
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Tao mo hinh
x = tf.placeholder(tf.float32, [None, IMAGE_PIXELS])
# Ham mat mat
y_ = tf.placeholder(tf.float32, [None, NUM_CLASSES])
# Khoi tao do thi deep net
y_conv, keep_prob = deep_network(x)
with tf.name_scope('loss'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
logits=y_conv)
cross_entropy = tf.reduce_mean(cross_entropy)
# Toi uu theo giai thuat co san tren tensorflow
with tf.name_scope('momentum'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
# Gia tri duoc dung lam ham toi thieu
accuracy = tf.reduce_mean(correct_prediction)
# Luu temp graph
graph_location = tempfile.mkdtemp()
print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
with tf.Session() as sess:
global w_conv1, b_conv1, w_conv2, b_conv2, w_fc1, b_fc1, w_fc2, b_fc2
sess.run(tf.global_variables_initializer())
while 1:
for epoch in range(EPOCH_NUMBER):
for i in range(1200):
batch = mnist.train.next_batch(BATCH_SIZE)
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
test_accuracy = accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0
})
# Tien hanh thuat toan SA
# Back up tra lai gia tri cac tham so neu khong co thay doi
print("Tham so truoc SA")
print(sess.run(b_conv1))
print(test_accuracy)
back_up = w_conv1, b_conv1, w_conv2, b_conv2, w_fc1, b_fc1, w_fc2, b_fc2
# Khoi tao gia tri toi uu ban dau
f = test_accuracy
x0 = back_up
temperature = TEMPERATURE_INIT
for n in range(NEIGHBOR_NUMBER):
# w_conv1, b_conv1, w_conv2, b_conv2 = neighbor(w_conv1), neighbor(b_conv1), \
# neighbor(w_conv2), neighbor(b_conv2)
# w_fc1, b_fc1, w_fc2, b_fc2 = neighbor(w_fc1), neighbor(b_fc1), neighbor(w_fc2), neighbor(b_fc2)
sess.run(w_conv1.assign(neighbor(w_conv1))), sess.run(b_conv1.assign(neighbor(b_conv1)))
sess.run(w_conv2.assign(neighbor(w_conv2))), sess.run(b_conv2.assign(neighbor(b_conv2)))
sess.run(w_fc1.assign(neighbor(w_fc1))), sess.run(b_fc1.assign(neighbor(b_fc1)))
sess.run(w_fc2.assign(neighbor(w_fc2))), sess.run(b_fc2.assign(neighbor(b_fc2)))
w_fc1.eval(), b_fc1.eval(), w_fc2.eval(), b_fc2.eval()
# Gan cac tham so cho cac gia tri lan can
# Gia tri ham dem xet
print("Tham so duoc xet:")
print(sess.run(b_conv1))
f_delta = accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0
})
print(f_delta)
if f_delta > f:
f_new = f_delta
else:
df = f - f_delta
r = random.uniform(0, 1)
# Dieu kien phan bo boltzmann
if r > exp(-df/Boltzmann/temperature):
f_new = f_delta
else:
f_new = f
# Tra lai tham so ban dau
w_conv1, b_conv1, w_conv2, b_conv2, w_fc1, b_fc1, w_fc2, b_fc2 = x0
f = f_new
temperature = REDUCE_FACTOR * temperature
termination_criterion = abs(test_accuracy/f - 1)
# Dieu kien dung cua SA
if (termination_criterion > -0.02) and (termination_criterion < 0.02):
print(" SA Test accuracy : " + str(accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0
})))
break
print("Done!")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
HPCC-Cloud-Computing/press
|
prediction/Simulated-Annealing/sacnn.py
|
Python
|
mit
| 8,943
|
[
"NEURON"
] |
ed890a34d7a20d08e678dfdcb09284cd9e6b458f62bba59c40ec73b74f0955c4
|
from __future__ import print_function, division
from sympy.core import S, C, sympify
from sympy.core.function import Function, ArgumentIndexError
from sympy.core.logic import fuzzy_and
from sympy.ntheory import sieve
from math import sqrt as _sqrt
from sympy.core.compatibility import reduce, as_int, xrange
from sympy.core.cache import cacheit
class CombinatorialFunction(Function):
"""Base class for combinatorial functions. """
def _eval_simplify(self, ratio, measure):
from sympy.simplify.simplify import combsimp
expr = combsimp(self)
if measure(expr) <= ratio*measure(self):
return expr
return self
###############################################################################
######################## FACTORIAL and MULTI-FACTORIAL ########################
###############################################################################
class factorial(CombinatorialFunction):
"""Implementation of factorial function over nonnegative integers.
By convention (consistent with the gamma function and the binomial
coefficients), factorial of a negative integer is complex infinity.
The factorial is very important in combinatorics where it gives
the number of ways in which `n` objects can be permuted. It also
arises in calculus, probability, number theory, etc.
There is strict relation of factorial with gamma function. In
fact n! = gamma(n+1) for nonnegative integers. Rewrite of this
kind is very useful in case of combinatorial simplification.
Computation of the factorial is done using two algorithms. For
small arguments naive product is evaluated. However for bigger
input algorithm Prime-Swing is used. It is the fastest algorithm
known and computes n! via prime factorization of special class
of numbers, called here the 'Swing Numbers'.
Examples
========
>>> from sympy import Symbol, factorial, S
>>> n = Symbol('n', integer=True)
>>> factorial(0)
1
>>> factorial(7)
5040
>>> factorial(-2)
zoo
>>> factorial(n)
factorial(n)
>>> factorial(2*n)
factorial(2*n)
>>> factorial(S(1)/2)
factorial(1/2)
See Also
========
factorial2, RisingFactorial, FallingFactorial
"""
def fdiff(self, argindex=1):
if argindex == 1:
return C.gamma(self.args[0] + 1)*C.polygamma(0, self.args[0] + 1)
else:
raise ArgumentIndexError(self, argindex)
_small_swing = [
1, 1, 1, 3, 3, 15, 5, 35, 35, 315, 63, 693, 231, 3003, 429, 6435, 6435, 109395,
12155, 230945, 46189, 969969, 88179, 2028117, 676039, 16900975, 1300075,
35102025, 5014575, 145422675, 9694845, 300540195, 300540195
]
@classmethod
def _swing(cls, n):
if n < 33:
return cls._small_swing[n]
else:
N, primes = int(_sqrt(n)), []
for prime in sieve.primerange(3, N + 1):
p, q = 1, n
while True:
q //= prime
if q > 0:
if q & 1 == 1:
p *= prime
else:
break
if p > 1:
primes.append(p)
for prime in sieve.primerange(N + 1, n//3 + 1):
if (n // prime) & 1 == 1:
primes.append(prime)
L_product = R_product = 1
for prime in sieve.primerange(n//2 + 1, n + 1):
L_product *= prime
for prime in primes:
R_product *= prime
return L_product*R_product
@classmethod
def _recursive(cls, n):
if n < 2:
return 1
else:
return (cls._recursive(n//2)**2)*cls._swing(n)
@classmethod
def eval(cls, n):
n = sympify(n)
if n.is_Number:
if n is S.Zero:
return S.One
elif n is S.Infinity:
return S.Infinity
elif n.is_Integer:
if n.is_negative:
return S.ComplexInfinity
else:
n, result = n.p, 1
if n < 20:
for i in range(2, n + 1):
result *= i
else:
N, bits = n, 0
while N != 0:
if N & 1 == 1:
bits += 1
N = N >> 1
result = cls._recursive(n)*2**(n - bits)
return C.Integer(result)
def _eval_rewrite_as_gamma(self, n):
return C.gamma(n + 1)
def _eval_is_integer(self):
if self.args[0].is_integer:
return True
def _eval_is_positive(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
class MultiFactorial(CombinatorialFunction):
pass
class subfactorial(CombinatorialFunction):
"""The subfactorial counts the derangements of n items and is
defined for non-negative integers as::
,
| 1 for n = 0
!n = { 0 for n = 1
| (n - 1)*(!(n - 1) + !(n - 2)) for n > 1
`
It can also be written as int(round(n!/exp(1))) but the recursive
definition with caching is implemented for this function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Subfactorial
Examples
========
>>> from sympy import subfactorial
>>> from sympy.abc import n
>>> subfactorial(n + 1)
subfactorial(n + 1)
>>> subfactorial(5)
44
See Also
========
factorial, sympy.utilities.iterables.generate_derangements
"""
@classmethod
@cacheit
def _eval(self, n):
if not n:
return 1
elif n == 1:
return 0
return (n - 1)*(self._eval(n - 1) + self._eval(n - 2))
@classmethod
def eval(cls, arg):
try:
arg = as_int(arg)
if arg < 0:
raise ValueError
return C.Integer(cls._eval(arg))
except ValueError:
if sympify(arg).is_Number:
raise ValueError("argument must be a nonnegative integer")
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer,
self.args[0].is_nonnegative))
class factorial2(CombinatorialFunction):
"""The double factorial n!!, not to be confused with (n!)!
The double factorial is defined for integers >= -1 as::
,
| n*(n - 2)*(n - 4)* ... * 1 for n odd
n!! = { n*(n - 2)*(n - 4)* ... * 2 for n even
| 1 for n = 0, -1
`
Examples
========
>>> from sympy import factorial2, var
>>> var('n')
n
>>> factorial2(n + 1)
factorial2(n + 1)
>>> factorial2(5)
15
>>> factorial2(-1)
1
See Also
========
factorial, RisingFactorial, FallingFactorial
"""
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg == S.Zero or arg == S.NegativeOne:
return S.One
return factorial2(arg - 2)*arg
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer,
(self.args[0] + 1).is_nonnegative))
def _eval_is_positive(self):
return fuzzy_and((self.args[0].is_integer,
(self.args[0] + 1).is_nonnegative))
###############################################################################
######################## RISING and FALLING FACTORIALS ########################
###############################################################################
class RisingFactorial(CombinatorialFunction):
"""Rising factorial (also called Pochhammer symbol) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by:
rf(x, k) = x * (x+1) * ... * (x + k-1)
where 'x' can be arbitrary expression and 'k' is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/RisingFactorial.html page.
Examples
========
>>> from sympy import rf
>>> from sympy.abc import x
>>> rf(x, 0)
1
>>> rf(1, 5)
120
>>> rf(x, 5) == x*(1 + x)*(2 + x)*(3 + x)*(4 + x)
True
See Also
========
factorial, factorial2, FallingFactorial
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN:
return S.NaN
elif x is S.One:
return factorial(k)
elif k.is_Integer:
if k is S.NaN:
return S.NaN
elif k is S.Zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
return reduce(lambda r, i: r*(x + i), xrange(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
return 1/reduce(lambda r, i: r*(x - i), xrange(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k):
return C.gamma(x + k) / C.gamma(x)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
class FallingFactorial(CombinatorialFunction):
"""Falling factorial (related to rising factorial) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by
ff(x, k) = x * (x-1) * ... * (x - k+1)
where 'x' can be arbitrary expression and 'k' is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/FallingFactorial.html page.
>>> from sympy import ff
>>> from sympy.abc import x
>>> ff(x, 0)
1
>>> ff(5, 5)
120
>>> ff(x, 5) == x*(x-1)*(x-2)*(x-3)*(x-4)
True
See Also
========
factorial, factorial2, RisingFactorial
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN:
return S.NaN
elif k.is_Integer:
if k is S.NaN:
return S.NaN
elif k is S.Zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
return reduce(lambda r, i: r*(x - i), xrange(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
return 1/reduce(lambda r, i: r*(x + i), xrange(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k):
return (-1)**k * C.gamma(-x + k) / C.gamma(-x)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
rf = RisingFactorial
ff = FallingFactorial
###############################################################################
########################### BINOMIAL COEFFICIENTS #############################
###############################################################################
class binomial(CombinatorialFunction):
"""Implementation of the binomial coefficient. It can be defined
in two ways depending on its desired interpretation:
C(n,k) = n!/(k!(n-k)!) or C(n, k) = ff(n, k)/k!
First, in a strict combinatorial sense it defines the
number of ways we can choose 'k' elements from a set of
'n' elements. In this case both arguments are nonnegative
integers and binomial is computed using an efficient
algorithm based on prime factorization.
The other definition is generalization for arbitrary 'n',
however 'k' must also be nonnegative. This case is very
useful when evaluating summations.
For the sake of convenience for negative 'k' this function
will return zero no matter what valued is the other argument.
To expand the binomial when n is a symbol, use either
expand_func() or expand(func=True). The former will keep the
polynomial in factored form while the latter will expand the
polynomial itself. See examples for details.
Examples
========
>>> from sympy import Symbol, Rational, binomial, expand_func
>>> n = Symbol('n', integer=True)
>>> binomial(15, 8)
6435
>>> binomial(n, -1)
0
>>> [ binomial(0, i) for i in range(1)]
[1]
>>> [ binomial(1, i) for i in range(2)]
[1, 1]
>>> [ binomial(2, i) for i in range(3)]
[1, 2, 1]
>>> [ binomial(3, i) for i in range(4)]
[1, 3, 3, 1]
>>> [ binomial(4, i) for i in range(5)]
[1, 4, 6, 4, 1]
>>> binomial(Rational(5,4), 3)
-5/128
>>> binomial(n, 3)
binomial(n, 3)
>>> binomial(n, 3).expand(func=True)
n**3/6 - n**2/2 + n/3
>>> expand_func(binomial(n, 3))
n*(n - 2)*(n - 1)/6
"""
def fdiff(self, argindex=1):
if argindex == 1:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/01/
n, k = self.args
return binomial(n, k)*(C.polygamma(0, n + 1) - C.polygamma(0, n - k + 1))
elif argindex == 2:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/02/
n, k = self.args
return binomial(n, k)*(C.polygamma(0, n - k + 1) - C.polygamma(0, k + 1))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, n, k):
n, k = map(sympify, (n, k))
if k.is_Number:
if k.is_Integer:
if k < 0:
return S.Zero
elif k == 0 or n == k:
return S.One
elif n.is_Integer and n >= 0:
n, k = int(n), int(k)
if k > n:
return S.Zero
elif k > n // 2:
k = n - k
M, result = int(_sqrt(n)), 1
for prime in sieve.primerange(2, n + 1):
if prime > n - k:
result *= prime
elif prime > n // 2:
continue
elif prime > M:
if n % prime < k % prime:
result *= prime
else:
N, K = n, k
exp = a = 0
while N > 0:
a = int((N % prime) < (K % prime + a))
N, K = N // prime, K // prime
exp = a + exp
if exp > 0:
result *= prime**exp
return C.Integer(result)
elif n.is_Number:
result = n - k + 1
for i in xrange(2, k + 1):
result *= n - k + i
result /= i
return result
elif k.is_negative:
return S.Zero
elif (n - k).simplify().is_negative:
return S.Zero
else:
d = n - k
if d.is_Integer:
return cls.eval(n, d)
def _eval_expand_func(self, **hints):
"""
Function to expand binomial(n,k) when m is positive integer
Also,
n is self.args[0] and k is self.args[1] while using binomial(n, k)
"""
n = self.args[0]
if n.is_Number:
return binomial(*self.args)
k = self.args[1]
if k.is_Add and n in k.args:
k = n - k
if k.is_Integer:
if k == S.Zero:
return S.One
elif k < 0:
return S.Zero
else:
n = self.args[0]
result = n - k + 1
for i in xrange(2, k + 1):
result *= n - k + i
result /= i
return result
else:
return binomial(*self.args)
def _eval_rewrite_as_factorial(self, n, k):
return C.factorial(n)/(C.factorial(k)*C.factorial(n - k))
def _eval_rewrite_as_gamma(self, n, k):
return C.gamma(n + 1)/(C.gamma(k + 1)*C.gamma(n - k + 1))
def _eval_is_integer(self):
return self.args[0].is_integer and self.args[1].is_integer
|
Cuuuurzel/KiPyCalc
|
sympy/functions/combinatorial/factorials.py
|
Python
|
mit
| 18,067
|
[
"VisIt"
] |
0ac7e8f9a20e27b1278ed4344f355cfab01c9387e7ae17b1645555f8d7cb43f8
|
import os.path
import time
from remsci.lib.utility import path
from libpipe.cmds.base import BaseCmd
import logging
log = logging.getLogger(__name__)
class HisatCmd(BaseCmd):
'''HISAT command setup
Command usage:
hisat [options]* -x <bt2-idx> {-1 <m1> -2 <m2> | -U <r>} -S <sam>
'''
NAME = 'hisat'
INVOKE_STR = 'hisat'
ARGUMENTS = [
('-x', 'FILE', 'Hisat reference genome index (base name)'),
('-1', 'FILE[,FILE]', 'comma separated list of paired-end 1 files'),
('-2', 'FILE[,FILE]', 'comma separated list of paired-end 2 files'),
('-U', 'FILE[,FILE]', 'comma separated list of unpaired reads'),
('-S', 'FILE', 'Output sam file (defaults to read prefix)'),
('-p', 'INT', 'number of processors'),
('-I', 'INT', 'minimum fragment length. Default = 0.'),
('-X', 'INT', 'aximum fragment length. Default = 500.'),
('--un-conc', 'PATH', 'Path to write unaligned, paired-end reads to.'),
('--phred33', None, 'Illumina 1.9+ encoding'),
('--phred64', None, 'Illumina 1.8 and earlier encoding'),
('--fr', None, 'Upstream downstream mate orientations'),
('-q', None, 'Reads are FASTQ files'),
('-f', None, 'Reads are FASTA files'),
]
DEFAULTS = {
'-p': 3, # "$(wc -l < $PBS_NODEFILE)",
'-I': 0,
'-X': 500,
}
REQ_KWARGS = ['-x', ('-1', '-2'), ['-1', '-U']]
REQ_ARGS = 0
REQ_TYPE = [
[('-1', '-2'), ('.fastq', '.fq', '.fastq', '.fa'), False],
[('-U', ), ('.fastq', '.fq', '.fastq', '.fa'), False],
[('-S', ), ('sam', )],
]
#
# Custom Exceptions
#
class GenomeIndexError(FileNotFoundError):
ERRMSG = {
'missing': 'Expected index files for {} not found',
}
def __init__(self, msg, *args, genome='', **kwargs):
try:
msg = self.ERRMSG[msg].format(genome)
except KeyError:
pass
super().__init__(msg, *args, **kwargs)
#
# Magic methods
#
def __init__(
self, *args,
encoding='--phred33', orientation='--fr', format='-q',
**kwargs):
try:
super().__init__(*args, **kwargs)
except ValueError:
raise # requirements failure; pass it on up
# update flags
self.flags.extend([
encoding, orientation
])
# set the timestamp if not done already
if not self.timestamp:
self.timestamp = time.strftime("%y%m%d-%H%M%S")
#
# "Public" methods
#
def output(self):
out_list = [self.kwargs['-S'], ]
try:
out_list.append(self.kwargs['--un'])
except KeyError:
out_list.append(self.kwargs['--un-conc'])
return out_list
#
# "Private" methods
#
def _prepcmd(self):
'''Prep for hisat cmd
> parse log file name and set for redirect
> ensure unaligned reads are output
'''
# parse the genome name
genome_name = os.path.basename(self.kwargs['-x'])
# ensure we have an output file
try:
out_dir = os.path.dirname(self.kwargs['-S'])
run_name = self._trubase(self.kwargs['-S'])
except KeyError:
try:
# unpaired sequence file
out_dir = os.path.dirname(self.kwargs['-U'])
run_name = self._trubase(self.kwargs['-U'])
except KeyError:
# paired-end sequence file
out_dir = os.path.dirname(self.kwargs['-1'])
run_name = os.path.commonprefix(
self.kwargs['-1'], self.kwargs['-2'])
# ensure common prefix includes some of the base name
if run_name == out_dir:
run_name = self._trubase(self.kwargs['-1'])
else:
run_name = os.path.basename(run_name)
finally:
# generated output name should contain genome name, too
if genome_name not in run_name:
run_name = '_'.join([run_name, genome_name])
# ensure we have '-S' set
self.kwargs['-S'] = os.path.join(out_dir, run_name + '.sam')
# set log file name
self.id = '_'.join(
[run_name, genome_name, self.timestamp, self.name])
log_path = os.path.join(out_dir, self.id + '.log')
# setup stdout redirect
self.redirect = '2>&1 | tee -a {}'.format(log_path)
# ensure unaligned reads are written to a file
unal_key = '--un' if '-U' in self.kwargs else '--un-conc'
unal = os.path.splitext(self.kwargs['-S'])[0] + '.unal.fastq'
self.kwargs.update({unal_key: unal})
def _additional_requirements(
self, expected_file_count=10, extension='.bt2'):
'''Additional command requirements
Index check:
expect 10 files: *.[1-6].bt2, *.rev.[1,2,5,6].bt2
'''
# ensure the index exists
genome_dir, genome_base = os.path.split(self.kwargs['-x'])
index_pattern = r'{}\..*{}'.format(genome_base, extension)
index_files = path.walk_file(genome_dir, pattern=index_pattern)
if len(index_files) != expected_file_count:
raise self.GenomeIndexError('missing', genome=genome_base)
class Hisat2Cmd(HisatCmd):
'''Hisat 2 Aligner
Current version uses the same parameters as hisat
'''
NAME = 'hisat2'
INVOKE_STR = 'hisat2'
def _additional_requirements(
self, expected_file_count=8, extension='.ht2'):
'''Additional command requirements
Index check:
expect 8 files: *.[1-8].bt2
'''
super()._additional_requirements(
expected_file_count=expected_file_count,
extension=extension
)
class Bowtie2Cmd(HisatCmd):
'''Bowtie 2 Aligner
Current version uses the same parameters as hisat
'''
NAME = 'bowtie2'
INVOKE_STR = 'bowtie2'
def _additional_requirements(
self, expected_file_count=6, extension='.bt2'):
'''Additional command requirements
Index check:
expect 8 files: *.[1-4].bt2, *.rev.[1-2].bt2
'''
super()._additional_requirements(
expected_file_count=expected_file_count,
extension=extension
)
|
muppetjones/rempipe
|
libpipe/cmds/align.py
|
Python
|
gpl-3.0
| 6,515
|
[
"Bowtie"
] |
3d15c62f58fef34d37e0d6e35b7119aa7c9e2cc2a0002488a0f2634f2f2c7878
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# avoid the singularity at the Z axis using 0.0001 radian offset
plane = vtk.vtkPlaneSource()
plane.SetOrigin(1.0, 3.14159265359 - 0.0001, 0.0)
plane.SetPoint1(1.0, 3.14159265359 - 0.0001, 6.28318530719)
plane.SetPoint2(1.0, 0.0001, 0.0)
plane.SetXResolution(19)
plane.SetYResolution(9)
transform = vtk.vtkSphericalTransform()
tpoly = vtk.vtkTransformPolyDataFilter()
tpoly.SetInputConnection(plane.GetOutputPort())
tpoly.SetTransform(transform)
# also cover the inverse transformation by going back and forth
tpoly2 = vtk.vtkTransformPolyDataFilter()
tpoly2.SetInputConnection(tpoly.GetOutputPort())
tpoly2.SetTransform(transform.GetInverse())
tpoly3 = vtk.vtkTransformPolyDataFilter()
tpoly3.SetInputConnection(tpoly2.GetOutputPort())
tpoly3.SetTransform(transform)
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(tpoly3.GetOutputPort())
earth = vtk.vtkPNMReader()
earth.SetFileName(VTK_DATA_ROOT + "/Data/earth.ppm")
texture = vtk.vtkTexture()
texture.SetInputConnection(earth.GetOutputPort())
texture.InterpolateOn()
world = vtk.vtkActor()
world.SetMapper(mapper)
world.SetTexture(texture)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(world)
ren1.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(300, 300)
ren1.GetActiveCamera().SetPosition(8, -10, 6)
ren1.GetActiveCamera().SetFocalPoint(0, 0, 0)
ren1.GetActiveCamera().SetViewAngle(15)
ren1.GetActiveCamera().SetViewUp(0.0, 0.0, 1.0)
# render the image
#
cam1 = ren1.GetActiveCamera()
cam1.Zoom(1.4)
ren1.ResetCameraClippingRange()
iren.Initialize()
#iren.Start()
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Common/Transforms/Testing/Python/spherical.py
|
Python
|
bsd-3-clause
| 1,983
|
[
"VTK"
] |
5b717beea36cc74a2828f87a43b83b2394a5123c9c69cd5bac92bd4bf9ed13ba
|
#! /usr/bin/python
"""Copyright 2011 Phidgets Inc.
This work is licensed under the Creative Commons Attribution 2.5 Canada License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/2.5/ca/
"""
__author__="Adam Stelmack"
__version__="2.1.8"
__date__ ="14-Jan-2011 2:29:14 PM"
#Basic imports
import sys
from time import sleep
#Phidget specific imports
from Phidgets.PhidgetException import PhidgetException
from Phidgets.Devices.Bridge import Bridge, BridgeGain
from Phidgets.Phidget import PhidgetLogLevel
#Create an accelerometer object
try:
bridge = Bridge()
except RuntimeError as e:
print("Runtime Exception: %s" % e.details)
print("Exiting....")
exit(1)
#Information Display Function
def displayDeviceInfo():
print("|------------|----------------------------------|--------------|------------|")
print("|- Attached -|- Type -|- Serial No. -|- Version -|")
print("|------------|----------------------------------|--------------|------------|")
print("|- %8s -|- %30s -|- %10d -|- %8d -|" % (bridge.isAttached(), bridge.getDeviceName(), bridge.getSerialNum(), bridge.getDeviceVersion()))
print("|------------|----------------------------------|--------------|------------|")
print("Number of bridge inputs: %i" % (bridge.getInputCount()))
print("Data Rate Max: %d" % (bridge.getDataRateMax()))
print("Data Rate Min: %d" % (bridge.getDataRateMin()))
print("Input Value Max: %d" % (bridge.getBridgeMax(0)))
print("Input Value Min: %d" % (bridge.getBridgeMin(0)))
#Event Handler Callback Functions
def BridgeAttached(e):
attached = e.device
print("Bridge %i Attached!" % (attached.getSerialNum()))
def BridgeDetached(e):
detached = e.device
print("Bridge %i Detached!" % (detached.getSerialNum()))
def BridgeError(e):
try:
source = e.device
print("Bridge %i: Phidget Error %i: %s" % (source.getSerialNum(), e.eCode, e.description))
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
def BridgeData(e):
source = e.device
print("Bridge %i: Input %i: %f" % (source.getSerialNum(), e.index, e.value))
#Main Program Code
try:
#logging example, uncomment to generate a log file
#bridge.enableLogging(PhidgetLogLevel.PHIDGET_LOG_VERBOSE, "phidgetlog.log")
bridge.setOnAttachHandler(BridgeAttached)
bridge.setOnDetachHandler(BridgeDetached)
bridge.setOnErrorhandler(BridgeError)
bridge.setOnBridgeDataHandler(BridgeData)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Opening phidget object....")
try:
bridge.openPhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Waiting for attach....")
try:
bridge.waitForAttach(10000)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
try:
bridge.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Exiting....")
exit(1)
else:
displayDeviceInfo()
try:
print("Set data rate to 8ms ...")
bridge.setDataRate(16)
sleep(2)
print("Set Gain to 8...")
bridge.setGain(0, BridgeGain.PHIDGET_BRIDGE_GAIN_8)
sleep(2)
print("Enable the Bridge input for reading data...")
bridge.setEnabled(0, True)
sleep(2)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
try:
bridge.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Exiting....")
exit(1)
print("Press Enter to quit....")
chr = sys.stdin.read(1)
print("Closing...")
try:
print("Disable the Bridge input for reading data...")
bridge.setEnabled(0, False)
sleep(2)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
try:
bridge.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Exiting....")
exit(1)
try:
bridge.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Done.")
exit(0)
|
danielsuo/mobot
|
src/move/Python/Bridge-simple.py
|
Python
|
mit
| 4,555
|
[
"VisIt"
] |
990a4a5bd3f957a265f68f8dfeddb68cc9565394ccc18d889f3fba059736e814
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
'''
This software has been developed by:
GI Genética, Fisiología e Historia Forestal
Dpto. Sistemas y Recursos Naturales
ETSI Montes, Forestal y del Medio Natural
Universidad Politécnica de Madrid
http://gfhforestal.com/
https://github.com/ggfhf/
Licence: GNU General Public Licence Version 3.
'''
#-------------------------------------------------------------------------------
'''
This source contains general functions and classes used in NGScloud
software package used in both console mode and gui mode.
'''
#-------------------------------------------------------------------------------
import configparser
import datetime
import os
import re
import subprocess
import sys
import tkinter
import xconfiguration
#-------------------------------------------------------------------------------
def get_project_code():
'''
Get the project name.
'''
return 'ngscloud'
#-------------------------------------------------------------------------------
def get_project_name():
'''
Get the project name.
'''
return 'NGScloud'
#-------------------------------------------------------------------------------
def get_project_version():
'''
Get the project name.
'''
return '0.94'
#-------------------------------------------------------------------------------
def get_project_manual_file():
'''
Get the project name.
'''
return './NGScloud-manual.pdf'
#-------------------------------------------------------------------------------
def get_project_image_file():
'''
Get the project name.
'''
return './image_NGScloud.png'
#-------------------------------------------------------------------------------
def get_starcluster():
'''
Get the script to run StarCluster corresponding to the Operating System.
'''
# assign the StarCluster script
if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
starcluster = './starcluster.sh'
elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
starcluster = '.\starcluster.bat'
# return the StarCluster script
return starcluster
#-------------------------------------------------------------------------------
def get_editor():
'''
Get the editor depending on the Operating System.
'''
# assign the editor
if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
editor = 'nano'
elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
editor = 'notepad'
# return the editor
return editor
#-------------------------------------------------------------------------------
def get_volume_creator_name():
'''
Get the template name of the volume creator.
'''
# set the template name of the volume creator
volume_creator_name = '{0}-volume-creator'.format(xconfiguration.environment)
# return the template name of the volume creator
return volume_creator_name
#-------------------------------------------------------------------------------
def get_all_applications_selected_code():
'''
Get the code that means all applications.
'''
return 'all_applications_selected'
#-------------------------------------------------------------------------------
def get_bedtools_code():
'''
Get the BEDTools code used to identify its processes.
'''
return 'bedtools'
#-------------------------------------------------------------------------------
def get_bedtools_name():
'''
Get the BEDTools name used to title.
'''
return 'BEDtools'
#-------------------------------------------------------------------------------
def get_bedtools_bioconda_code():
'''
Get the BEDTools code used to identify the Bioconda package.
'''
return 'bedtools'
#-------------------------------------------------------------------------------
def get_bioconda_code():
'''
Get the Bioconda code used to identify its processes.
'''
return 'bioconda'
#-------------------------------------------------------------------------------
def get_bioconda_name():
'''
Get the Bioconda name used to title.
'''
return 'Bioconda'
#-------------------------------------------------------------------------------
def get_blastplus_code():
'''
Get the BLAST+ code used to identify its processes.
'''
return 'blast'
#-------------------------------------------------------------------------------
def get_blastplus_name():
'''
Get the BLAST+ name used to title.
'''
return 'BLAST+'
#-------------------------------------------------------------------------------
def get_blastplus_bioconda_code():
'''
Get the BLAST+ code used to identify the Bioconda package.
'''
return 'blast'
#-------------------------------------------------------------------------------
def get_bowtie2_code():
'''
Get the Bowtie2 code used to identify its processes.
'''
return 'bowtie2'
#-------------------------------------------------------------------------------
def get_bowtie2_name():
'''
Get the Bowtie2 name used to title.
'''
return 'Bowtie2'
#-------------------------------------------------------------------------------
def get_bowtie2_bioconda_code():
'''
Get the Bowtie2 code used to identify the Bioconda package.
'''
return 'bowtie2'
#-------------------------------------------------------------------------------
def get_busco_code():
'''
Get the BUSCO code used to identify its processes.
'''
return 'busco'
#-------------------------------------------------------------------------------
def get_busco_name():
'''
Get the BUSCO name used to title.
'''
return 'BUSCO'
#-------------------------------------------------------------------------------
def get_busco_bioconda_code():
'''
Get the BUSCO code used to identify the Bioconda package.
'''
return 'busco'
#-------------------------------------------------------------------------------
def get_cd_hit_code():
'''
Get the CD-HIT code used to identify its processes.
'''
return 'cdhit'
#-------------------------------------------------------------------------------
def get_cd_hit_name():
'''
Get the CD-HIT name used to title.
'''
return 'CD-HIT'
#-------------------------------------------------------------------------------
def get_cd_hit_bioconda_code():
'''
Get the CD-HIT code used to identify the Bioconda package.
'''
return 'cd-hit'
#-------------------------------------------------------------------------------
def get_cd_hit_est_code():
'''
Get the CD-HIT-EST code used to identify its processes.
'''
return 'cdhitest'
#-------------------------------------------------------------------------------
def get_cd_hit_est_name():
'''
Get the CD-HIT-EST name used to title.
'''
return 'CD-HIT-EST'
#-------------------------------------------------------------------------------
def get_conda_code():
'''
Get the Conda code used to identify its processes.
'''
return 'conda'
#-------------------------------------------------------------------------------
def get_conda_name():
'''
Get the Conda name used to title.
'''
return 'Conda'
#-------------------------------------------------------------------------------
def get_detonate_code():
'''
Get the DETONATE code used to identify its processes.
'''
return 'detonate'
#-------------------------------------------------------------------------------
def get_detonate_name():
'''
Get the DETONATE name used to title.
'''
return 'DETONATE'
#-------------------------------------------------------------------------------
def get_detonate_bioconda_code():
'''
Get the DETONATE code used to identify the Bioconda package.
'''
return 'detonate'
#-------------------------------------------------------------------------------
def get_emboss_code():
'''
Get the EMBOSS code used to identify its processes.
'''
return 'emboss'
#-------------------------------------------------------------------------------
def get_emboss_name():
'''
Get the EMBOSS name used to title.
'''
return 'EMBOSS'
#-------------------------------------------------------------------------------
def get_emboss_bioconda_code():
'''
Get the EMBOSS code used to identify the Bioconda package
'''
return 'emboss'
#-------------------------------------------------------------------------------
def get_fastqc_code():
'''
Get the FastQC code used to identify its processes.
'''
return 'fastqc'
#-------------------------------------------------------------------------------
def get_fastqc_name():
'''
Get the FastQC name used to title.
'''
return 'FastQC'
#-------------------------------------------------------------------------------
def get_fastqc_bioconda_code():
'''
Get the FastQC code used to identify the Bioconda package.
'''
return 'fastqc'
#-------------------------------------------------------------------------------
def get_gmap_gsnap_code():
'''
Get the GMAP-GSNAP code used to identify its processes.
'''
return 'gmap_gsnap'
#-------------------------------------------------------------------------------
def get_gmap_gsnap_name():
'''
Get the GMAP-GSNAP name used to title.
'''
return 'GMAP-GSNAP'
#-------------------------------------------------------------------------------
def get_gmap_gsnap_bioconda_code():
'''
Get the GMAP-GSNAP code used to identify the Bioconda package.
'''
return 'gmap'
#-------------------------------------------------------------------------------
def get_gmap_code():
'''
Get the GMAP code used to identify its processes.
'''
return 'gmap'
#-------------------------------------------------------------------------------
def get_gmap_name():
'''
Get the GMAP name used to title.
'''
return 'GMAP'
#-------------------------------------------------------------------------------
def get_gzip_code():
'''
Get the gzip code used to identify its processes.
'''
return 'gzip'
#-------------------------------------------------------------------------------
def get_gzip_name():
'''
Get the gzip name used to title.
'''
return 'gzip'
#-------------------------------------------------------------------------------
def get_insilico_read_normalization_code():
'''
Get the insilico_read_normalization (Trinity package) code used to identify its
processes.
'''
return 'insreadnor'
#-------------------------------------------------------------------------------
def get_insilico_read_normalization_name():
'''
Get the insilico_read_normalization (Trinity package) name used to title.
'''
return 'insilico_read_normalization'
#-------------------------------------------------------------------------------
def get_miniconda3_code():
'''
Get the Miniconda3 code used to identify its processes.
'''
return 'miniconda3'
#-------------------------------------------------------------------------------
def get_miniconda3_name():
'''
Get the Miniconda3 name used to title.
'''
return 'Miniconda3'
#-------------------------------------------------------------------------------
def get_ngshelper_code():
'''
Get the NGShelper code used to identify its processes.
'''
return 'ngshelper'
#-------------------------------------------------------------------------------
def get_ngshelper_name():
'''
Get the NGShelper name used to title.
'''
return 'NGShelper'
#-------------------------------------------------------------------------------
def get_quast_code():
'''
Get the QUAST code used to identify process.
'''
return 'quast'
#-------------------------------------------------------------------------------
def get_quast_name():
'''
Get the QUAST name used to title.
'''
return 'QUAST'
#-------------------------------------------------------------------------------
def get_quast_bioconda_code():
'''
Get the QUAST code used to identify the Bioconda package.
'''
return 'quast'
#-------------------------------------------------------------------------------
def get_r_code():
'''
Get the R code used to identify its processes.
'''
return 'r'
#-------------------------------------------------------------------------------
def get_r_name():
'''
Get the R name used to title.
'''
return 'R'
#-------------------------------------------------------------------------------
def get_ref_eval_code():
'''
Get the REF-EVAL (DETONATE package) code used to identify its processes.
'''
return 'refeval'
#-------------------------------------------------------------------------------
def get_ref_eval_name():
'''
Get the REF-EVAL (DETONATE package) name used to title.
'''
return 'REF-EVAL'
#-------------------------------------------------------------------------------
def get_rnaquast_code():
'''
Get the rnaQUAST code used to identify its processes.
'''
return 'rnaquast'
#-------------------------------------------------------------------------------
def get_rnaquast_name():
'''
Get the rnaQUAST name used to title.
'''
return 'rnaQUAST'
#-------------------------------------------------------------------------------
def get_rsem_code():
'''
Get the RSEM code used to identify its processes.
'''
return 'rsem'
#-------------------------------------------------------------------------------
def get_rsem_name():
'''
Get the RSEM name used to title.
'''
return 'RSEM'
#-------------------------------------------------------------------------------
def get_rsem_bioconda_code():
'''
Get the RSEM code used to identify the Bioconda package.
'''
return 'rsem'
#-------------------------------------------------------------------------------
def get_rsem_eval_code():
'''
Get the RSEM-EVAL (DETONATE package) code used to identify its processes.
'''
return 'rsemeval'
#-------------------------------------------------------------------------------
def get_rsem_eval_name():
'''
Get the RSEM-EVAL (DETONATE package) name used to title.
'''
return 'RSEM-EVAL'
#-------------------------------------------------------------------------------
def get_samtools_code():
'''
Get the BEDTools code used to identify its processes.
'''
return 'samtools'
#-------------------------------------------------------------------------------
def get_samtools_name():
'''
Get the BEDTools name used to title.
'''
return 'SAMtools'
#-------------------------------------------------------------------------------
def get_samtools_bioconda_code():
'''
Get the BEDTools code used to identify the Bioconda package.
'''
return 'samtools'
#-------------------------------------------------------------------------------
def get_soapdenovotrans_code():
'''
Get the SOAPdenovo-Trans code used to identify its processes.
'''
return 'sdnt'
#-------------------------------------------------------------------------------
def get_soapdenovotrans_name():
'''
Get the SOAPdenovo-Trans name used to title.
'''
return 'SOAPdenovo-Trans'
#-------------------------------------------------------------------------------
def get_soapdenovotrans_bioconda_code():
'''
Get the SOAPdenovo-Trans code used to identify the Bioconda package.
'''
return 'soapdenovo-trans'
#-------------------------------------------------------------------------------
def get_star_code():
'''
Get the STAR code used to identify its processes.
'''
return 'star'
#-------------------------------------------------------------------------------
def get_star_name():
'''
Get the STAR name used to title.
'''
return 'STAR'
#-------------------------------------------------------------------------------
def get_star_bioconda_code():
'''
Get the STAR code used to identify the Bioconda package.
'''
return 'star'
#-------------------------------------------------------------------------------
def get_transabyss_code():
'''
Get the Trans-ABySS code used to identify its processes.
'''
return 'transabyss'
#-------------------------------------------------------------------------------
def get_transabyss_name():
'''
Get the Trans-ABySS name used to title.
'''
return 'Trans-ABySS'
#-------------------------------------------------------------------------------
def get_transabyss_bioconda_code():
'''
Get the Trans-ABySS code used to the Bioconda package.
'''
return 'transabyss'
#-------------------------------------------------------------------------------
def get_transcript_filter_code():
'''
Get the transcripts-filter (NGShelper package) code used to identify its
processes.
'''
return 'transfil'
#-------------------------------------------------------------------------------
def get_transcript_filter_name():
'''
Get the transcripts-filter (NGShelper package) name used to title.
'''
return 'transcript-filter'
#-------------------------------------------------------------------------------
def get_transcriptome_blastx_code():
'''
Get the transcriptome-blastx (NGShelper package) code used to identify its
processes.
'''
return 'transbastx'
#-------------------------------------------------------------------------------
def get_transcriptome_blastx_name():
'''
Get the transcriptome-blastx (NGShelper package) name used to title.
'''
return 'transcriptome-blastx'
#-------------------------------------------------------------------------------
def get_transrate_code():
'''
Get the Transrate code used to identify its processes.
'''
return 'transrate'
#-------------------------------------------------------------------------------
def get_transrate_name():
'''
Get the FastQC name used to title.
'''
return 'Transrate'
#-------------------------------------------------------------------------------
def get_trimmomatic_code():
'''
Get the Trimmomatic code used to identify its processes.
'''
return 'trimmo'
#-------------------------------------------------------------------------------
def get_trimmomatic_name():
'''
Get the FastQC name used to title.
'''
return 'Trimmomatic'
#-------------------------------------------------------------------------------
def get_trimmomatic_bioconda_code():
'''
Get the Trimmomatic code used to the Bioconda package.
'''
return 'trimmomatic'
#-------------------------------------------------------------------------------
def get_trinity_code():
'''
Get the Trinity code used to identify its processes.
'''
return 'trinity'
#-------------------------------------------------------------------------------
def get_trinity_name():
'''
Get the Trinity name used to title.
'''
return 'Trinity'
#-------------------------------------------------------------------------------
def get_trinity_bioconda_code():
'''
Get the Trinity code used to the Bioconda package.
'''
return 'trinity'
#-------------------------------------------------------------------------------
def get_config_dir():
'''
Get the configuration directory in the local computer.
'''
return './config'
#-------------------------------------------------------------------------------
def get_keypairs_dir():
'''
Get the key pairs directory in the local computer.
'''
return './keypairs'
#-------------------------------------------------------------------------------
def get_temp_dir():
'''
Get the temporal directory in the local computer.
'''
return './temp'
#-------------------------------------------------------------------------------
def get_log_dir():
'''
Get the temporal directory in the local computer.
'''
return './logs'
#-------------------------------------------------------------------------------
def get_log_file(function_name=None):
'''
Get the log file name of in the local computer.
'''
# set the log file name
now = datetime.datetime.now()
date = datetime.datetime.strftime(now, '%y%m%d')
time = datetime.datetime.strftime(now, '%H%M%S')
if function_name is not None:
log_file_name = '{0}/{1}-{2}-{3}-{4}.txt'.format(get_log_dir(), xconfiguration.environment, function_name, date, time)
else:
log_file_name = '{0}/{1}-x-{2}-{3}.txt'.format(get_log_dir(), xconfiguration.environment, date, time)
# return the log file name
return log_file_name
#-------------------------------------------------------------------------------
def list_log_files_command(local_process_id):
'''
Get the command to list log files in the local computer depending on the Operating System.
'''
# get log dir
log_dir = get_log_dir()
# assign the command
if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
if local_process_id == 'all':
command = 'ls {0}/{1}-*.txt'.format(log_dir, xconfiguration.environment)
else:
command = 'ls {0}/{1}-{2}-*.txt'.format(log_dir, xconfiguration.environment, local_process_id)
elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
log_dir = log_dir.replace('/','\\')
if local_process_id == 'all':
command = 'dir /B {0}\{1}-*.txt'.format(log_dir, xconfiguration.environment)
else:
command = 'dir /B {0}\{1}-{2}-*.txt'.format(log_dir, xconfiguration.environment, local_process_id)
# return the command
return command
#-------------------------------------------------------------------------------
def get_local_process_dict():
'''
Get the local process dictionary.
'''
# build the local process dictionary
local_process_dict = {}
local_process_dict['add_node']= {'text': 'Add node in a cluster'}
local_process_dict['create_cluster']= {'text': 'Create cluster'}
local_process_dict['create_volume']= {'text': 'Create volume'}
local_process_dict['delink_volume_from_template']= {'text': 'Delink volume in a cluster template'}
local_process_dict['download_result_dataset']= {'text': 'Download result dataset from a cluster'}
local_process_dict['kill_batch_job']= {'text': 'Kill batch job'}
local_process_dict['link_volume_to_template']= {'text': 'Link volume in a cluster template'}
local_process_dict['list_clusters']= {'text': 'List clusters'}
local_process_dict['mount_volume']= {'text': 'Mount volume in a node'}
local_process_dict['remove_node']= {'text': 'Remove node in a cluster'}
local_process_dict['remove_volume']= {'text': 'Remove volume'}
local_process_dict['replicate_volume']= {'text': 'Replicate volume to another zone'}
local_process_dict['resize_volume']= {'text': 'Resize volume'}
local_process_dict['restart_cluster']= {'text': 'Restart cluster'}
local_process_dict['review_volume_links']= {'text': 'Review volumes linked to cluster templates'}
local_process_dict['run_busco_process']= {'text': 'Run {0} process'.format(get_busco_name())}
local_process_dict['run_cd_hit_est_process']= {'text': 'Run {0} process'.format(get_cd_hit_est_name())}
local_process_dict['run_fastqc_process']= {'text': 'Run {0} process'.format(get_fastqc_name())}
local_process_dict['run_gmap_process']= {'text': 'Run {0} process'.format(get_gmap_name())}
local_process_dict['run_gzip_process']= {'text': 'Run compression/decompression process'}
local_process_dict['run_insilico_read_normalization_process']= {'text': 'Run {0} process'.format(get_insilico_read_normalization_name())}
local_process_dict['run_quast_process']= {'text': 'Run {0} process'.format(get_quast_name())}
local_process_dict['run_ref_eval_process']= {'text': 'Run {0} process'.format(get_ref_eval_name())}
local_process_dict['run_rnaquast_process']= {'text': 'Run {0} process'.format(get_rnaquast_name())}
local_process_dict['run_rsem_eval_process']= {'text': 'Run {0} process'.format(get_rsem_eval_name())}
local_process_dict['run_soapdenovotrans_process']= {'text': 'Run {0} process'.format(get_soapdenovotrans_name())}
local_process_dict['run_star_process']= {'text': 'Run {0} process'.format(get_star_name())}
local_process_dict['run_transabyss_process']= {'text': 'Run {0} process'.format(get_transabyss_name())}
local_process_dict['run_transcript_filter_process']= {'text': 'Run {0} process'.format(get_transcript_filter_name())}
local_process_dict['run_transcriptome_blastx_process']= {'text': 'Run {0} process'.format(get_transcriptome_blastx_name())}
local_process_dict['run_transrate_process']= {'text': 'Run {0} process'.format(get_transrate_name())}
local_process_dict['run_trimmomatic_process']= {'text': 'Run {0} process'.format(get_trimmomatic_name())}
local_process_dict['run_trinity_process']= {'text': 'Run {0} process'.format(get_trinity_name())}
local_process_dict['setup_bioconda_package_list']= {'text': 'Set up Bioconda package list'}
local_process_dict['setup_conda_package_list']= {'text': 'Set up Conda package list'}
local_process_dict['setup_miniconda3']= {'text': 'Set up {0}'.format(get_miniconda3_name())}
local_process_dict['setup_ngshelper']= {'text': 'Set up {0}'.format(get_ngshelper_name())}
local_process_dict['setup_r']= {'text': 'Set up {0}'.format(get_r_name())}
local_process_dict['setup_rnaquast']= {'text': 'Set up {0}'.format(get_rnaquast_name())}
local_process_dict['setup_transrate']= {'text': 'Set up {0}'.format(get_transrate_name())}
local_process_dict['show_cluster_composing']= {'text': 'Show cluster composing'}
local_process_dict['show_status_batch_jobs']= {'text': 'Show status of batch jobs'}
local_process_dict['stop_cluster']= {'text': 'Stop cluster'}
local_process_dict['terminate_cluster']= {'text': 'Terminate cluster'}
local_process_dict['terminate_volume_creator']= {'text': 'Terminate volume creator'}
local_process_dict['unmount_volume']= {'text': 'Unmount volume in a node'}
local_process_dict['upload_database_dataset']= {'text': 'Upload database dataset to a cluster'}
local_process_dict['upload_read_dataset']= {'text': 'Upload read dataset to a cluster'}
local_process_dict['upload_reference_dataset']= {'text': 'Upload reference dataset to a cluster'}
# return the local process dictionary
return local_process_dict
#-------------------------------------------------------------------------------
def get_local_process_id(local_process_text):
'''
Get the local process identification from the local process text.
'''
# initialize the control variable
local_process_id_found = None
# get the dictionary of the local processes
local_process_dict = get_local_process_dict()
# search the local process identification
for local_process_id in local_process_dict.keys():
if local_process_dict[local_process_id]['text'] == local_process_text:
local_process_id_found = local_process_id
break
# return the local process identification
return local_process_id_found
#-------------------------------------------------------------------------------
def get_cluster_app_dir():
'''
Get the aplication directory in the cluster.
'''
return '/apps'
#-------------------------------------------------------------------------------
def get_cluster_reference_dir():
'''
Get the reference directory in the cluster.
'''
return '/references'
#-------------------------------------------------------------------------------
def get_cluster_reference_dataset_dir(reference_dataset_id):
'''
Get the directory of a reference dataset in the cluster.
'''
# set the reference directory in the cluster
cluster_reference_dataset_dir = '{0}/{1}'.format(get_cluster_reference_dir(), reference_dataset_id)
# return the reference directory in the cluster
return cluster_reference_dataset_dir
#-------------------------------------------------------------------------------
def get_cluster_reference_file(reference_dataset_id, file_name):
'''
Get the reference file path of a reference dataset in the cluster.
'''
# set the path of the reference file
cluster_reference_file = '{0}/{1}'.format(get_cluster_reference_dataset_dir(reference_dataset_id), os.path.basename(file_name))
# return the path of the reference file
return cluster_reference_file
#-------------------------------------------------------------------------------
def get_cluster_database_dir():
'''
Get the database directory in the cluster.
'''
return '/databases'
#-------------------------------------------------------------------------------
def get_cluster_database_dataset_dir(database_dataset_id):
'''
Get the directory of a database dataset in the cluster.
'''
# set the database directory in the cluster
cluster_database_dataset_dir = '{0}/{1}'.format(get_cluster_database_dir(), database_dataset_id)
# return the database directory in the cluster
return cluster_database_dataset_dir
#-------------------------------------------------------------------------------
def get_cluster_database_file(database_dataset_id, file_name):
'''
Get the database file path of a database dataset in the cluster.
'''
# set the path of the database file
cluster_database_file = '{0}/{1}'.format(get_cluster_database_dataset_dir(database_dataset_id), os.path.basename(file_name))
# return the path of the database file
return cluster_database_file
#-------------------------------------------------------------------------------
def get_cluster_read_dir():
'''
Get the read directory in the cluster.
'''
return '/reads'
#-------------------------------------------------------------------------------
def get_uploaded_read_dataset_name():
'''
Get the name of the row read dataset in the cluster.
'''
return 'uploaded-reads'
#-------------------------------------------------------------------------------
def get_cluster_experiment_read_dataset_dir(experiment_id, read_dataset_id):
'''
Get the directory of a experiment read dataset in the cluster.
'''
# set the experiment read directory in the cluster
cluster_experiment_read_dataset_dir = '{0}/{1}/{2}'.format(get_cluster_read_dir(), experiment_id, read_dataset_id)
# return the experiment read directory in the cluster
return cluster_experiment_read_dataset_dir
#-------------------------------------------------------------------------------
def get_cluster_read_file(experiment_id, read_dataset_id, file_name):
'''
Get the read file path of an experiment read dataset in the cluster.
'''
# set the path of the read file
cluster_read_file = '{0}/{1}'.format(get_cluster_experiment_read_dataset_dir(experiment_id, read_dataset_id), os.path.basename(file_name))
# return the path of the read file
return cluster_read_file
#-------------------------------------------------------------------------------
def get_cluster_result_dir():
'''
Get the result directory in the cluster.
'''
return '/results'
#-------------------------------------------------------------------------------
def get_cluster_experiment_result_dir(experiment_id):
'''
Get the directory of run result datasets in the cluster.
'''
# set the run result directory in the cluster
cluster_experiment_results_dir = '{0}/{1}'.format(get_cluster_result_dir(), experiment_id)
# return the run result directory in the cluster
return cluster_experiment_results_dir
#-------------------------------------------------------------------------------
def get_cluster_experiment_result_dataset_dir(experiment_id, result_dataset_id):
'''
Get the directory of an experiment result dataset in the cluster.
'''
# set the experiment result dataset directory in the cluster
cluster_experiment_result_dataset_dir = '{0}/{1}/{2}'.format(get_cluster_result_dir(), experiment_id, result_dataset_id)
# return the experiment result dataset directory in the cluster
return cluster_experiment_result_dataset_dir
#-------------------------------------------------------------------------------
def get_cluster_current_run_dir(experiment_id, process):
'''
Get the run directory of a bioinfo process in the cluster.
'''
# set the run identificacion
now = datetime.datetime.now()
date = datetime.datetime.strftime(now, '%y%m%d')
time = datetime.datetime.strftime(now, '%H%M%S')
run_id = '{0}-{1}-{2}'.format(process, date, time)
# set the run directory in the cluster
cluster_current_run_dir = get_cluster_experiment_result_dir(experiment_id) + '/' + run_id
# return the run directory in the cluster
return cluster_current_run_dir
#-------------------------------------------------------------------------------
def get_mounting_point_list():
'''
Get the available mounting point list.
'''
return [get_cluster_app_dir(), get_cluster_database_dir(), get_cluster_read_dir(), get_cluster_reference_dir(), get_cluster_result_dir()]
#-------------------------------------------------------------------------------
def get_cluster_log_file():
'''
Get the log file name of an experiment run in the cluster.
'''
return 'log.txt'
#-------------------------------------------------------------------------------
def change_extension(path, new_extension):
'''Change the file extension.'''
# get the path with the new extension
i = path.rfind('.')
if i >= 0:
new_path = path[:i + 1] + new_extension
else:
new_path = path + new_extension
# return the path with new extension
return new_path
#-------------------------------------------------------------------------------
def existing_dir(dir):
'''
Verify if a directory exists.
'''
# normalize the directory path depending on the operating system
dir = os.path.normpath(dir)
# get the current directory and its parent directory
current_dir = os.getcwd()
parent_dir = os.path.dirname(current_dir)
# if the opeating system is Linux or Mac OS X:
if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
if dir.startswith('/'):
pass
elif dir == ('.'):
dir = current_dir
elif dir.startswith('./'):
dir = '{0}/{1}'.format(current_dir, os.path.basename(dir[2:]))
elif dir.startswith('../'):
dir = '{0}/{1}'.format(parent_dir, os.path.basename(dir[3:]))
else:
dir = '{0}/{1}'.format(current_dir, os.path.basename(dir))
# if the opeating system is Windows or Windows/Cygwin
elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
if dir[1:3] == (':\\'):
pass
elif dir == ('.'):
dir = current_dir
elif dir.startswith('.\\'):
dir = '{0}\{1}'.format(current_dir, os.path.basename(dir[2:]))
elif dir.startswith('..\\'):
dir = '{0}\{1}'.format(parent_dir, os.path.basename(dir[3:]))
else:
dir = '{0}\{1}'.format(current_dir, os.path.basename(dir))
# return the verification of valid directory
return os.path.isdir(dir)
#-------------------------------------------------------------------------------
def is_valid_path(path, operating_system=sys.platform):
'''
Verify if a path is a valid path.
'''
# initialize control variable
valid = False
# verify if the path is valid
if operating_system.startswith('linux') or operating_system.startswith('darwin'):
# -- valid = re.match('^(/.+)(/.+)*/?$', path)
valid = True
elif operating_system.startswith('win32') or operating_system.startswith('cygwin'):
valid = True
# return control variable
return valid
#-------------------------------------------------------------------------------
def is_absolute_path(path, operating_system=sys.platform):
'''
Verify if a path is a absolute path.
'''
# initialize control variable
valid = False
# verify if the path is absolute
if operating_system.startswith('linux') or operating_system.startswith('darwin'):
if path != '':
# -- valid = is_path_valid(path) and path[0] == '/'
valid = True
elif operating_system.startswith('win32') or operating_system.startswith('cygwin'):
valid = True
# return control variable
return valid
#-------------------------------------------------------------------------------
def is_relative_path(path, operating_system=sys.platform):
'''
Verify if a path is a relative path.
'''
# initialize control variable
valid = False
# verify if the path is valid
if operating_system.startswith('linux') or operating_system.startswith('darwin'):
valid = True
elif operating_system.startswith('win32') or operating_system.startswith('cygwin'):
valid = True
# return control variable
return valid
#-------------------------------------------------------------------------------
def is_device_file(path, device_pattern):
'''
Verify if a path is a valid device file, e.g. /dev/sdf.
'''
# initialize control variable
valid = False
# build the complete pattern
pattern = '^{0}$'.format(device_pattern)
# verify if path is a valid device file
valid = re.match(pattern, path)
# return control variable
return valid
#-------------------------------------------------------------------------------
def get_machine_device_file(aws_device_file):
'''
Get de machine device from AWS device
E.g. /dev/sdb1 -> /dev/xvdb1.
'''
# determine the machine device file
machine_device_file = aws_device_file[0:5] + 'xv' + aws_device_file[6:]
# return the machine device file
return machine_device_file
#-------------------------------------------------------------------------------
def is_email_address_valid(email):
'''
Verify if an e-mail address is valid.
'''
# initialize control variable
valid = False
# build the complete pattern
pattern = '^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,4})$'
# verify if the e-mail address is valid
valid = re.match(pattern, email)
# return control variable
return valid
#-------------------------------------------------------------------------------
def get_option_dict(config_file):
'''
Get a dictionary with the options retrieved from a configuration file.
'''
# initialize the options dictionary
option_dict = {}
# create class to parse the configuration files
config = configparser.ConfigParser()
# read the configuration file
config.read(config_file)
# build the dictionary
for section in config.sections():
# get the keys dictionary
keys_dict = option_dict.get(section, {})
# for each key in the section
for key in config[section]:
# get the value of the key
value = config.get(section, key, fallback='')
# add a new enter in the keys dictionary
keys_dict[key] = get_option_value(value)
# update the section with its keys dictionary
option_dict[section] = keys_dict
# return the options dictionary
return option_dict
#-------------------------------------------------------------------------------
def get_option_value(option):
'''
Remove comments ans spaces from an option retrieve from a configuration file.
'''
# Remove comments
position = option.find('#')
if position == -1:
value = option
else:
value = option[:position]
# Remove comments
value = value.strip()
# return the value without comments and spaces
return value
#-------------------------------------------------------------------------------
def split_literal_to_integer_list(literal):
'''
Split a string literal in a integer value list which are separated by comma.
'''
# initialize the string values list and the interger values list
strings_list = []
integers_list = []
# split the string literal in a string values list
strings_list = split_literal_to_string_list(literal)
# convert each value from string to integer
for i in range(len(strings_list)):
try:
integers_list.append(int(strings_list[i]))
except:
integers_list = []
break
# return the integer values list
return integers_list
#-------------------------------------------------------------------------------
def split_literal_to_float_list(literal):
'''
Split a string literal in a float value list which are separated by comma.
'''
# initialize the string values list and the float values list
strings_list = []
float_list = []
# split the string literal in a string values list
strings_list = split_literal_to_string_list(literal)
# convert each value from string to float
for i in range(len(strings_list)):
try:
float_list.append(float(strings_list[i]))
except:
float_list = []
break
# return the float values list
return float_list
#-------------------------------------------------------------------------------
def split_literal_to_string_list(literal):
'''
Split a string literal in a string value list which are separated by comma.
'''
# initialize the string values list
string_list = []
# split the string literal in a string values list
string_list = literal.split(',')
# remove the leading and trailing whitespaces in each value
for i in range(len(string_list)):
string_list[i] = string_list[i].strip()
# return the string values list
return string_list
#-------------------------------------------------------------------------------
def pair_files(file_name_list, specific_chars_1, specific_chars_2):
'''
...
'''
# initialize the file lists
file_name_1_list = []
file_name_2_list = []
unpaired_file_name_list = []
# for each file name, append it to the corresponding list
for file_name in file_name_list:
if file_name.find(specific_chars_1) >= 0:
file_name_1_list.append(file_name)
elif file_name.find(specific_chars_2) >= 0:
file_name_2_list.append(file_name)
else:
unpaired_file_name_list.append(file_name)
file_name_1_list.sort()
file_name_2_list.sort()
# verify the file pairing
review_file_name_1_list = []
review_file_name_2_list = []
index_1 = 0
index_2 = 0
while index_1 < len(file_name_1_list) or index_2 < len(file_name_2_list):
if index_1 < len(file_name_1_list):
file_name_1 = file_name_1_list[index_1]
short_file_name_1 = file_name_1.replace(specific_chars_1, '')
if index_2 < len(file_name_2_list):
file_name_2 = file_name_2_list[index_2]
short_file_name_2 = file_name_2.replace(specific_chars_2, '')
if short_file_name_1 == short_file_name_2:
review_file_name_1_list.append(file_name_1)
index_1 += 1
review_file_name_2_list.append(file_name_2)
index_2 += 1
elif short_file_name_1 < short_file_name_2:
unpaired_file_name_list.append(file_name_1)
index_1 += 1
elif short_file_name_1 > short_file_name_2:
unpaired_file_name_list.append(file_name_2)
index_2 += 1
# return the file lists
return (review_file_name_1_list, review_file_name_2_list, unpaired_file_name_list)
#-------------------------------------------------------------------------------
def run_command(command, log):
'''
Run a Bash shell command and redirect stdout and stderr to log.
'''
# run the command
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
for line in iter(process.stdout.readline, b''):
# replace non-ASCII caracters by one blank space
line = re.sub(b'[^\x00-\x7F]+', b' ', line)
# control return code and new line characters
if not isinstance(log, DevStdOut):
line = re.sub(b'\r\n', b'\r', line)
line = re.sub(b'\r', b'\r\n', line)
elif sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
pass
elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
line = re.sub(b'\r\n', b'\r', line)
line = re.sub(b'\r', b'\r\n', line)
# create a string from the bytes literal
line = line.decode('utf-8')
# write the line in log
log.write('{0}'.format(line))
rc = process.wait()
# return the return code of the command run
return rc
#-------------------------------------------------------------------------------
def get_separator():
'''
Get the separation line between process steps.
'''
return '**************************************************'
#-------------------------------------------------------------------------------
class DevStdOut(object):
'''
This class is used when it is necessary write in sys.stdout and in a log file
'''
#---------------
def __init__(self, calling_function=None, print_stdout=True):
'''
Execute actions correspending to the creation of a "DevStdOut" instance.
'''
# save initial parameters in instance variables
self.calling_function = calling_function
self.print_stdout = print_stdout
# get the local log file
self.log_file = get_log_file(self.calling_function)
# open the local log file
try:
if not os.path.exists(os.path.dirname(self.log_file)):
os.makedirs(os.path.dirname(self.log_file))
self.log_file_id = open(self.log_file, mode='w', encoding='iso-8859-1')
except:
print('*** ERROR: The file {0} can not be created'.format(self.log_file))
#---------------
def write(self, message):
'''
Write the message in sys.stadout and in the log file
'''
# write in sys.stdout
if self.print_stdout:
sys.stdout.write(message)
# write in the log file
self.log_file_id.write(message)
self.log_file_id.flush()
os.fsync(self.log_file_id.fileno())
#---------------
def get_log_file(self):
'''
Get the current log file name
'''
return self.log_file
#---------------
def __del__(self):
'''
Execute actions correspending to the object removal.
'''
# close the local log file
self.log_file_id.close()
#---------------
#-------------------------------------------------------------------------------
class DevNull(object):
'''
This class is used when it is necessary do not write a output
'''
#---------------
def write(self, *_):
'''
Do not write anything.
'''
pass
#---------------
#-------------------------------------------------------------------------------
class ProgramException(Exception):
'''
This class controls various exceptions that can occur in the execution of the application.
'''
#---------------
def __init__(self, code_exception, param1='', param2='', param3=''):
'''
Execute actions correspending to the creation of an instance to manage a passed exception.
'''
if code_exception == 'C001':
print('*** ERROR {0}: The application do not work if config files are not OK.'.format(code_exception), file=sys.stderr)
sys.exit(1)
elif code_exception == 'C002':
print('*** ERROR {0}: The application do not work if the environment file is not OK.'.format(code_exception), file=sys.stderr)
sys.exit(1)
elif code_exception == 'EXIT':
sys.exit(0)
elif code_exception == 'P001':
print('*** ERROR {0}: This program has parameters with invalid values.'.format(code_exception), file=sys.stderr)
sys.exit(1)
elif code_exception == 'S001':
print('*** ERROR {0}: There are libraries are not installed.'.format(code_exception, param1), file=sys.stderr)
sys.exit(1)
elif code_exception == 'S002':
print('*** ERROR {0}: There is infrastructure software not installed.'.format(code_exception), file=sys.stderr)
sys.exit(1)
else:
print('*** ERROR {0}: This exception is not managed.'.format(code_exception), file=sys.stderr)
sys.exit(1)
#---------------
#-------------------------------------------------------------------------------
class BreakAllLoops(Exception):
'''
This class is used to break out of nested loops
'''
pass
#-------------------------------------------------------------------------------
if __name__ == '__main__':
print('This source contains general functions and classes used in {0} software package used in both console mode and gui mode.'.format(get_project_name()))
sys.exit(0)
#-------------------------------------------------------------------------------
|
GGFHF/NGScloud
|
Package/xlib.py
|
Python
|
gpl-3.0
| 50,618
|
[
"BLAST",
"Bioconda"
] |
87ca2a464697049f94c87d71157834b8639a0437be9b88bd40e50715eee850bc
|
from modal_noise_script import (save_new_object, set_new_data,
save_baseline_object, save_fft_plot,
save_modal_noise_data)
import numpy as np
import os
import csv
from copy import deepcopy
NEW_DATA = True
NEW_OBJECTS = False
NEW_BASELINE = False
FOLDER = "C:/Libraries/Box Sync/ExoLab/Fiber_Characterization/Image Analysis/data/modal_noise/amp_freq_600um/"
CAMERAS = ['ff']
KERNEL = 101
FIBER_METHOD = 'edge'
CASE = 1
# METHODS = ['tophat', 'gaussian', 'polynomial', 'contrast', 'filter', 'gradient', 'fft']
# METHODS = ['tophat', 'gaussian', 'polynomial', 'contrast', 'filter', 'gradient']
METHODS = ['filter', 'fft']
if CASE == 1:
TITLE = 'Amplitude vs Frequency'
TESTS = ['unagitated_10s',
'agitated_5volts_40mm_10s',
'agitated_5volts_160mm_10s_test1',
'agitated_30volts_40mm_10s',
'agitated_30volts_160mm_10s_test1']
LABELS = ['unagitated',
'0.1Hz 40mm agitation',
'0.1Hz 160mm agitation',
'1.0Hz 40mm agitation',
'1.0Hz 160mm agitation']
if CASE == 2:
TITLE = 'Normalization Test'
TESTS = ['unagitated_1s',
'unagitated_8s',
'unagitated_10s',
'agitated_5volts_160mm_8s',
'agitated_5volts_160mm_80s',
'agitated_30volts_160mm_1s',
'agitated_30volts_160mm_10s_test2']
LABELS = ['unagitated 1s-exp',
'unagitated 8s-exp',
'unagitated 10s-exp',
'0.1Hz agitation 8s-exp',
'0.1Hz agitation 80s-exp',
'1.0Hz agitation 1s-exp',
'1.0Hz agitation 10s-exp']
if CASE == 3:
TITLE = 'Test 1 vs Test 2'
TESTS = ['unagitated_10s',
'agitated_5volts_160mm_10s_test1',
'agitated_5volts_160mm_10s_test2',
'agitated_30volts_160mm_10s_test1',
'agitated_30volts_160mm_10s_test2']
LABELS = ['unagitated',
'0.1Hz agitation test 1',
'0.1Hz agitation test 2',
'1.0Hz agitation test 1',
'1.0Hz agitation test 2']
if CASE == 4:
TITLE = 'All'
TESTS = ['unagitated_1s',
'unagitated_8s',
'unagitated_10s',
'agitated_5volts_40mm_10s',
'agitated_5volts_160mm_8s',
'agitated_5volts_160mm_10s_test1',
'agitated_5volts_160mm_10s_test2',
'agitated_5volts_160mm_80s',
'agitated_30volts_40mm_10s',
'agitated_30volts_160mm_1s',
'agitated_30volts_160mm_10s_test1',
'agitated_30volts_160mm_10s_test2']
LABELS = ['unagitated 1s',
'unagitated 8s',
'unagitated 10s',
'0.1Hz 40mm 10s',
'0.1Hz 160mm 8s',
'0.1Hz 160mm 10s test1',
'0.1Hz 160mm 10s test2',
'0.1Hz 160mm 80s',
'1.0Hz 40mm 10s',
'1.0Hz 160mm 1s',
'1.0Hz 160mm 10s test1',
'1.0Hz 160mm 10s test2']
if __name__ == '__main__':
print TITLE
print
for cam in CAMERAS:
methods = deepcopy(METHODS)
if cam == 'nf' and 'gaussian' in METHODS:
methods.remove('gaussian')
elif cam == 'ff' and 'tophat' in METHODS:
methods.remove('tophat')
kernel = KERNEL
if cam == 'ff':
kernel = None
base_i = None
for i, test in enumerate(TESTS):
if 'baseline' in test:
base_i = i
continue
print cam, test
new_object = NEW_OBJECTS or cam + '_obj.pkl' not in os.listdir(FOLDER + test + '/')
if new_object:
dark_folder = 'dark/'
ambient_folder = 'ambient_1s/'
if '8s' in test:
ambient_folder = 'ambient_8s/'
if '10s' in test:
ambient_folder = 'ambient_10s/'
if '80s' in test:
ambient_folder = 'ambient_80s/'
save_new_object(FOLDER, test, cam, ambient_folder)
if NEW_DATA or new_object:
set_new_data(FOLDER + test, cam, methods,
fiber_method=FIBER_METHOD,
kernel_size=KERNEL)
if base_i is not None:
new_baseline = NEW_BASELINE or cam + '_obj.pkl' not in os.listdir(FOLDER + TESTS[base_i] + '/')
if new_baseline:
save_baseline_object(FOLDER + TESTS[base_i], cam,
TESTS[base_i-1],
fiber_method=FIBER_METHOD,
kernel=KERNEL)
if NEW_DATA or new_baseline:
set_new_data(FOLDER + TESTS[base_i], cam, methods,
fiber_method=FIBER_METHOD,
kernel_size=KERNEL)
if 'fft' in methods:
methods.remove('fft')
save_fft_plot(FOLDER, test, cam, LABELS, TITLE)
save_modal_noise_data(FOLDER, TESTS, cam, LABELS, methods, TITLE)
|
rpetersburg/FiberProperties
|
scripts/modal_noise_600um.py
|
Python
|
mit
| 5,353
|
[
"Gaussian"
] |
f035823e755db7448802aaaba7fde41ecb0a16b6d6c013534abd821aa7c48cb8
|
import unittest
from os import path
import pysam
from cigar import Cigar
from mock import Mock
from pyfasta import Fasta
from clrsvsim.simulator import (
make_split_read,
modify_read,
modify_read_for_insertion,
invert_read,
unpack_cigar,
get_max_clip_len,
get_inverse_sequence,
overlap
)
TEST_DATA_DIR = path.join(path.dirname(path.realpath(__file__)), 'test_data')
class SplitReadTest(unittest.TestCase):
def test_make_split_read(self):
read = Mock()
read.seq = 'A' * 20
read.qual = '*' * len(read.seq)
read.rlen = len(read.seq)
read.qname = read.query_name = 'name'
read.reference_start = 100
read.cigarstring = '20M'
alternate_seq = 'C' * 10 + 'T' * 10
split_read = make_split_read(read, 5, True, sequence=alternate_seq)
self.assertEqual(split_read.seq, 'T' * 5 + 'A' * 15)
self.assertEqual(split_read.cigarstring, '5S15M')
split_read = make_split_read(read, 5, False, sequence=alternate_seq)
self.assertEqual(split_read.seq, 'A' * 5 + 'C' * 10 + 'T' * 5)
self.assertEqual(split_read.cigarstring, '5M15S')
split_read = make_split_read(read, 5, False, hard_clip_threshold=0.1, sequence=alternate_seq)
self.assertEqual(split_read.seq, 'A' * 5 + 'C' * 10 + 'T' * 5)
self.assertEqual(split_read.cigarstring, '5M15H')
split_read = make_split_read(read, 5, False, hard_clip_threshold=0.9, sequence=alternate_seq)
self.assertEqual(split_read.seq, 'A' * 5 + 'C' * 10 + 'T' * 5)
self.assertEqual(split_read.cigarstring, '5M15S')
def test_make_split_read_bam_file(self):
sorted_bam = path.join(TEST_DATA_DIR, 'sorted.bam')
with pysam.Samfile(sorted_bam, 'rb') as samfile:
for read in samfile:
if not read.cigarstring:
continue
for breakpoint in (10, 50, 100):
if breakpoint >= read.rlen:
continue
for is_left_split in (True, False):
split_read = make_split_read(read, breakpoint, is_left_split)
cigar_items = list(Cigar(split_read.cigarstring).items())
clipped_item = cigar_items[0] if is_left_split else cigar_items[-1]
min_clip_len = breakpoint if is_left_split else read.rlen - breakpoint # Can be longer if adjacent to another clip.
self.assertGreaterEqual(clipped_item[0], min_clip_len)
self.assertIn(clipped_item[1], ('S', 'H')) # Will be soft-clipped unless already hard-clipped.
def test_modify_read(self):
read = Mock()
read.seq = 'AAAAA'
read.qname = 'test'
# SNPs
modified, changes = modify_read(read, 1, 0, 0)
self.assertEqual(changes, len(modified.seq))
self.assertEqual(len(modified.seq), len(read.seq))
self.assertTrue(all([read.seq[i] != modified.seq[i] for i in range(len(read.seq))]))
# Insertions
modified, changes = modify_read(read, 0, 1, 0)
self.assertEqual(changes, len(read.seq))
self.assertEqual(len(modified.seq), len(read.seq) * 2)
# Deletions
modified, changes = modify_read(read, 0, 0, 1)
self.assertEqual(changes, len(read.seq))
self.assertEqual(len(modified.seq), 0)
def test_modify_read_for_insertion(self):
read = Mock()
read.seq = 'AAAAAA'
read.qual = '*' * len(read.seq)
read.qname = 'test'
read.rlen = len(read.seq)
read.reference_start = 100
read.cigarstring = '{}M'.format(read.rlen)
ins_position = 103
ins_seq = 'CCCCCCCC'
modified, changes = modify_read_for_insertion(read, ins_position, ins_seq, 0, 0)
self.assertEqual(changes, 0)
# Read can either be modified to be on the left or right of the insertion
self.assertIn(modified.seq, ('AAACCC', 'CCCAAA'))
self.assertIn(modified.cigarstring, ('3M3S', '3S3M'))
# Test padding
modified, _ = modify_read_for_insertion(read, ins_position, ins_seq, 0, 0, padding=2)
self.assertIn(modified.seq, ('AAAACC', 'CCAAAA'))
self.assertIn(modified.cigarstring, ('4M2S', '2S4M'))
# Insertion positions beyond the read boundaries should not modify it
for position in (0, 1000):
modified, _ = modify_read_for_insertion(read, position, ins_seq, 0, 0, )
self.assertEqual(read, modified)
# Test limiting the maximum clip length
modified, _ = modify_read_for_insertion(read, ins_position, ins_seq, 0, 0, max_clip_len=3)
self.assertIn(modified.cigarstring, ('3M3S', '3S3M')) # clip len below max - allowed
modified, _ = modify_read_for_insertion(read, ins_position, ins_seq, 0, 0, max_clip_len=2)
self.assertEqual(read, modified) # clip len above max - insertion should not happen
def test_unpack_cigar(self):
for bad_cigar_string in (None, '', 'ok', '1', '1s2m', '1S2'):
self.assertRaises(ValueError, unpack_cigar, bad_cigar_string)
for cigar, unpacked in [
('1M', ['1M']),
('2M', ['1M', '1M']),
('2M1S', ['1M', '1M', '1S']),
('100S', ['1S'] * 100)
]:
self.assertEqual(unpack_cigar(cigar), unpacked)
def test_get_max_clip_len(self):
read = Mock()
read.cigarstring = None
self.assertRaises(ValueError, get_max_clip_len, read)
for cigar, max_len in [
('4M', 0),
('1S2M', 1),
('1S1M2S', 2),
('2S1M1S', 2),
('1M1S', 1)
]:
read.cigarstring = cigar
self.assertEqual(get_max_clip_len(read), max_len)
def test_invert_read(self):
read = Mock()
read.seq = '123456'
read.qual = '*' * len(read.seq)
read.qname = 'test'
read.rlen = len(read.seq)
read.reference_start = 100
read.reference_end = read.reference_start + read.rlen
read.cigarstring = '{}M'.format(read.rlen)
def assert_inversion(read, start, end, sequence, expected_seq, expected_cigar):
inv, _ = invert_read(read, start, end, sequence, 0, 0)
msg_prefix = 'invert({}, {}--{})'.format(read.seq, start, end)
self.assertEqual(inv.seq, expected_seq, '{}: {} != {}'.format(msg_prefix, inv.seq, expected_seq))
self.assertEqual(inv.cigarstring, expected_cigar, '{}: {} != {}'.format(msg_prefix, inv.cigarstring, expected_cigar))
# Inversions that are fully within the read
for start, end, expected_seq, expected_cigar in [
# fully contained, away from borders
(102, 104, '124356', '2M4S'),
(101, 104, '143256', '4S2M'),
# fully contained, touching borders
(100, 104, '432156', '4S2M'),
(102, 106, '126543', '2M4S'),
# spanning exactly the read
(100, 106, '654321', '6S'),
# edge cases
(102, 102, '123456', '6M'),
(102, 103, '123456', '6M'),
]:
assert_inversion(read, start, end, '', expected_seq, expected_cigar)
# The sequence that's inverted in the entire genome; only a subset will appear in each read.
sequence = '9876543210'
for start, expected_seq, expected_cigar in [
# inversion ends before the read
(80, '123456', '6M'),
(90, '123456', '6M'),
# inversion starts before the read, and extends into it
(91, '023456', '1S5M'),
(92, '103456', '2S4M'),
(93, '210456', '3S3M'),
(94, '321056', '4S2M'),
(95, '432106', '5S1M'),
# read is fully contained in the inversion
(96, '543210', '6S'),
(97, '654321', '6S'),
(98, '765432', '6S'),
(99, '876543', '6S'),
(100, '987654', '6S'),
# inversion starts mid-read
(101, '198765', '1M5S'),
(102, '129876', '2M4S'),
(103, '123987', '3M3S'),
(104, '123498', '4M2S'),
(105, '123459', '5M1S'),
# inversion starts past the read
(106, '123456', '6M'),
(110, '123456', '6M'),
]:
assert_inversion(read, start, start + len(sequence), sequence, expected_seq, expected_cigar)
def test_get_inverse_sequence(self):
# Reads represented in this file (start position = 100):
#
# ACGTACGTAC
# ACGTCCGTAC
# CGTCCGTACT
# CGTCCGAACT
# GTCCGAACTT
# TCCGAACTTC
# CCGAACTTAA
# CCGAACTTAA
# CCGAACTTAG
# CGAACTTAGC
#
bam = path.join(TEST_DATA_DIR, 'sv_sim.bam')
self.assertEqual(get_inverse_sequence(bam, '1', 100, 102), 'GT')
self.assertEqual(get_inverse_sequence(bam, '1', 108, 111), 'AGT')
# Inversion of an area with no reads, no ref genome provided
self.assertEqual(get_inverse_sequence(bam, '1', 98, 102), 'GTNN')
self.assertEqual(get_inverse_sequence(bam, '1', 0, 100), 'N' * 100)
# Inversion of an area with no reads, ref genome provided
ref_genome_fa = Fasta(path.join(TEST_DATA_DIR, 'sv_sim.fa'))
self.assertEqual(get_inverse_sequence(bam, '1', 0, 4, ref_genome_fa), 'AAAA')
self.assertEqual(get_inverse_sequence(bam, '1', 98, 102, ref_genome_fa), 'GTAA')
def test_overlap(self):
self.assertEqual(overlap((0, 0), (0, 0)), 0)
self.assertEqual(overlap((0, 1), (0, 1)), 1)
self.assertEqual(overlap((0, 1), (1, 1)), 0)
self.assertEqual(overlap((0, 1), (0, 2)), 1)
self.assertEqual(overlap((0, 1), (1, 2)), 0)
self.assertEqual(overlap((0, 2), (1, 2)), 1)
self.assertEqual(overlap((0, 2), (1, 3)), 1)
self.assertEqual(overlap((0, 2), (0, 3)), 2)
self.assertEqual(overlap((0, 2), (2, 4)), 0)
self.assertEqual(overlap((0, 3), (1, 2)), 1)
self.assertEqual(overlap((0, 4), (1, 3)), 2)
self.assertEqual(overlap((0, 4), (2, 4)), 2)
self.assertEqual(overlap((0, 4), (0, 2)), 2)
# TODO: add tests for:
# inversion directly from BAM
# inversion of an area that has no reads in the BAM
# max clip len
|
color/clrsvsim
|
clrsvsim/test_simulator.py
|
Python
|
apache-2.0
| 10,536
|
[
"pysam"
] |
c20d2d19a5c3717b108f1ee6c0146c8c688f3a8990242c4b31a62c8093f2ee06
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
n0 = 50
n1 = 50
display = True
worldSize = El.mpi.WorldSize()
worldRank = El.mpi.WorldRank()
# Stack two 2D finite-difference matrices on top of each other
# and make the last column dense
def StackedFD2D(N0,N1):
A = El.DistMatrix()
height = 2*N0*N1
width = N0*N1
A.Resize(height,width)
blocksize = height // worldSize
myStart = blocksize*worldRank
if worldRank == worldSize-1:
myHeight = height - myStart
else:
myHeight = blocksize
A.Reserve(6*myHeight)
for sLoc in xrange(localHeight):
s = A.GlobalRow(sLoc)
if s < N0*N1:
x0 = s % N0
x1 = s / N0
A.QueueUpdate( sLoc, s, 11 )
if x0 > 0:
A.QueueUpdate( sLoc, s-1, -1 )
if x0+1 < N0:
A.QueueUpdate( sLoc, s+1, 2 )
if x1 > 0:
A.QueueUpdate( sLoc, s-N0, -30 )
if x1+1 < N1:
A.QueueUpdate( sLoc, s+N0, 4 )
else:
sRel = s-N0*N1
x0 = sRel % N0
x1 = sRel / N0
A.QueueUpdate( sLoc, sRel, -20 )
if x0 > 0:
A.QueueUpdate( sLoc, sRel-1, -17 )
if x0+1 < N0:
A.QueueUpdate( sLoc, sRel+1, -20 )
if x1 > 0:
A.QueueUpdate( sLoc, sRel-N0, -3 )
if x1+1 < N1:
A.QueueUpdate( sLoc, sRel+N0, 3 )
# The dense last column
A.QueueUpdate( sLoc, width-1, -10/height );
A.ProcessQueues()
return A
A = StackedFD2D(n0,n1)
b = El.DistMatrix()
El.Gaussian( b, 2*n0*n1, 1 )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
ctrl = El.LPAffineCtrl_d()
ctrl.mehrotraCtrl.outerEquil = True
ctrl.mehrotraCtrl.innerEquil = True
ctrl.mehrotraCtrl.scaleTwoNorm = True
ctrl.mehrotraCtrl.progress = True
ctrl.mehrotraCtrl.qsdCtrl.relTol = 1e-10
ctrl.mehrotraCtrl.qsdCtrl.relTolRefine = 1e-11
ctrl.mehrotraCtrl.qsdCtrl.progress = True
startCP = El.mpi.Time()
x = El.CP( A, b, ctrl )
endCP = El.mpi.Time()
if worldRank == 0:
print "CP time:", endCP-startCP, "seconds"
if display:
El.Display( x, "x" )
bTwoNorm = El.Nrm2( b )
bInfNorm = El.MaxNorm( b )
r = El.DistMatrix()
El.Copy( b, r )
El.Gemv( El.NORMAL, -1., A, x, 1., r )
if display:
El.Display( r, "r" )
rTwoNorm = El.Nrm2( r )
rInfNorm = El.MaxNorm( r )
if worldRank == 0:
print "|| b ||_2 =", bTwoNorm
print "|| b ||_oo =", bInfNorm
print "|| A x - b ||_2 =", rTwoNorm
print "|| A x - b ||_oo =", rInfNorm
startLS = El.mpi.Time()
xLS = El.LeastSquares(A,b)
endLS = El.mpi.Time()
if worldRank == 0:
print "LS time:", endLS-startLS, "seconds"
if display:
El.Display( xLS, "x_{LS}" )
rLS = El.DistMatrix()
El.Copy( b, rLS )
El.Gemv( El.NORMAL, -1., A, xLS, 1., rLS )
if display:
El.Display( rLS, "A x_{LS} - b" )
rLSTwoNorm = El.Nrm2(rLS)
rLSInfNorm = El.MaxNorm(rLS)
if worldRank == 0:
print "|| A x_{LS} - b ||_2 =", rLSTwoNorm
print "|| A x_{LS} - b ||_oo =", rLSInfNorm
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
|
birm/Elemental
|
examples/interface/RemoteUpdate.py
|
Python
|
bsd-3-clause
| 3,212
|
[
"Gaussian"
] |
ab6f49f8d27aed2d95255b8f84e063be45264c426b93bbd937b7703e36fd9aba
|
from __future__ import print_function
import os, sys, inspect
import h5py
import numpy as np
import matplotlib
import random
import math
import multiprocessing
from PIL import Image
from Crypto.Random.random import randint
from functools import partial
# Load the configuration file
import config
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.append(cmd_folder)
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],config.caffe_path+"/python")))
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
sys.path.append(config.caffe_path+"/python")
# Ensure correct compilation of Caffe and Pycaffe
if config.library_compile:
cpus = multiprocessing.cpu_count()
cwd = os.getcwd()
os.chdir(config.caffe_path)
result = os.system("make all -j %s" % cpus)
if result != 0:
sys.exit(result)
result = os.system("make pycaffe -j %s" % cpus)
if result != 0:
sys.exit(result)
os.chdir(cwd)
# Import pycaffe
import caffe
from caffe import layers as L, params as P, to_proto
from caffe.proto import caffe_pb2
import netconf
# General variables
# Size of a float variable
fsize = 4
def compute_memory_weights(shape_arr):
memory = 0
for i in range(0,len(shape_arr)):
memory += shape_arr[i][1]
return memory
def compute_memory_buffers(shape_arr):
memory = 0
for i in range(0,len(shape_arr)):
memory = max(memory, shape_arr[i][0])
return memory
def compute_memory_blobs(shape_arr):
memory = 0
for i in range(0,len(shape_arr)):
mem = fsize * shape_arr[i][2]
for j in range(0,len(shape_arr[i][4])):
mem *= shape_arr[i][4][j]
memory += mem
return memory
def update_shape(shape_arr, update):
last_shape = shape_arr[-1]
new_shape = [update[0](last_shape[0]), update[1](last_shape[1]), update[2](last_shape[2]),
[update[3][min(i,len(update[3])-1)](last_shape[3][i]) for i in range(0,len(last_shape[3]))],
[update[4][min(i,len(update[4])-1)](last_shape[4][i]) for i in range(0,len(last_shape[4]))]]
shape_arr += [new_shape]
print ("TEST B: %s" % [update[4][min(i,len(update[4])-1)]([1,1,1][i]) for i in range(0,3)])
return shape_arr
def data_layer(shape):
data, label = L.MemoryData(dim=shape, ntop=2)
return data, label
def conv_relu(run_shape, bottom, num_output, kernel_size=[3], stride=[1], pad=[0], kstride=[1], group=1, weight_std=0.01):
# The convolution buffer and weight memory
weight_mem = fsize * num_output * run_shape[-1][2]
conv_buff = fsize * run_shape[-1][2]
for i in range(0,len(run_shape[-1][4])):
conv_buff *= kernel_size[min(i,len(kernel_size)-1)]
conv_buff *= run_shape[-1][4][i]
weight_mem *= kernel_size[min(i,len(kernel_size)-1)]
# Shape update rules
update = [lambda x: conv_buff, lambda x: weight_mem, lambda x: num_output]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: x - (kernel_size[min(i,len(kernel_size)-1)] - 1) * (run_shape[-1][3][i]) for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
conv = L.Convolution(bottom, kernel_size=kernel_size, stride=stride, kstride=kstride,
num_output=num_output, pad=pad, group=group,
param=[dict(lr_mult=1),dict(lr_mult=2)],
weight_filler=dict(type='gaussian', std=weight_std),
bias_filler=dict(type='constant'))
return conv, L.ReLU(conv, in_place=True, negative_slope=0.005)
def convolution(run_shape, bottom, num_output, kernel_size=[3], stride=[1], pad=[0], kstride=[1], group=1, weight_std=0.01):
# The convolution buffer and weight memory
weight_mem = fsize * num_output * run_shape[-1][2]
conv_buff = fsize * run_shape[-1][2]
for i in range(0,len(run_shape[-1][4])):
conv_buff *= kernel_size[min(i,len(kernel_size)-1)]
conv_buff *= run_shape[-1][4][i]
weight_mem *= kernel_size[min(i,len(kernel_size)-1)]
# Shape update rules
update = [lambda x: conv_buff, lambda x: weight_mem, lambda x: num_output]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: x - (kernel_size[min(i,len(kernel_size)-1)] - 1) * (run_shape[-1][3][i]) for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
return L.Convolution(bottom, kernel_size=kernel_size, stride=stride, kstride=kstride,
num_output=num_output, pad=pad, group=group,
param=[dict(lr_mult=1),dict(lr_mult=2)],
weight_filler=dict(type='gaussian', std=weight_std),
bias_filler=dict(type='constant'))
def max_pool(run_shape, bottom, kernel_size=[2], stride=[2], pad=[0], kstride=[1]):
# Shape update rules
update = [lambda x: 0, lambda x: 0, lambda x: x]
update += [[lambda x, i=i: x * kstride[min(i,len(kstride)-1)] for i in range(0,len(run_shape[-1][4]))]]
# Strictly speaking this update rule is not complete, but should be sufficient for USK
if kstride[0] == 1 and kernel_size[0] == stride[0]:
update += [[lambda x, i=i: x / (kernel_size[min(i,len(kernel_size)-1)]) for i in range(0,len(run_shape[-1][4]))]]
else:
update += [[lambda x, i=i: x - (kernel_size[min(i,len(kernel_size)-1)] - 1) * (run_shape[-1][3][i]) for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=kernel_size, stride=stride, pad=pad, kstride=kstride)
def upconv(run_shape, bottom, num_output_dec, num_output_conv, weight_std=0.01, kernel_size=[2], stride=[2]):
# Shape update rules
update = [lambda x: 0, lambda x: 0, lambda x: num_output_dec]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: kernel_size[min(i,len(kernel_size)-1)] * x for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
deconv = L.Deconvolution(bottom, convolution_param=dict(num_output=num_output_dec, kernel_size=kernel_size, stride=stride, pad=[0], kstride=[1], group=num_output_dec,
weight_filler=dict(type='constant', value=1), bias_term=False),
param=dict(lr_mult=0, decay_mult=0))
# The convolution buffer and weight memory
weight_mem = fsize * num_output_conv * num_output_dec
conv_buff = fsize * run_shape[-1][2]
for i in range(0,len(run_shape[-1][4])):
conv_buff *= 2
conv_buff *= run_shape[-1][4][i]
# Shape update rules
update = [lambda x: conv_buff, lambda x: weight_mem, lambda x: num_output_conv]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: x for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
conv = L.Convolution(deconv, num_output=num_output_conv, kernel_size=[1], stride=[1], pad=[0], kstride=[1], group=1,
param=[dict(lr_mult=1),dict(lr_mult=2)],
weight_filler=dict(type='gaussian', std=weight_std),
bias_filler=dict(type='constant'))
return deconv, conv
def mergecrop(run_shape, bottom_a, bottom_b):
# Shape update rules
update = [lambda x: 0, lambda x: 0, lambda x: 2*x]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: x for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
return L.MergeCrop(bottom_a, bottom_b, forward=[1,1], backward=[1,1])
def implement_usknet(net, run_shape, fmaps_start, fmaps_end):
# Chained blob list to construct the network (forward direction)
blobs = []
# All networks start with data
blobs = blobs + [net.data]
fmaps = fmaps_start
if netconf.unet_depth > 0:
# U-Net downsampling; 2*Convolution+Pooling
for i in range(0, netconf.unet_depth):
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu] # This is the blob of interest for mergecrop (index 2 + 3 * i)
pool = max_pool(run_shape, blobs[-1], kernel_size=netconf.unet_downsampling_strategy[i], stride=netconf.unet_downsampling_strategy[i])
blobs = blobs + [pool]
fmaps = netconf.unet_fmap_inc_rule(fmaps)
# If there is no SK-Net component, fill with 2 convolutions
if (netconf.unet_depth > 0 and netconf.sknet_conv_depth == 0):
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
# Else use the SK-Net instead
else:
for i in range(0, netconf.sknet_conv_depth):
# TODO: Not implemented yet (fixme)
run_shape = run_shape
if netconf.unet_depth > 0:
# U-Net upsampling; Upconvolution+MergeCrop+2*Convolution
for i in range(0, netconf.unet_depth):
deconv, conv = upconv(run_shape, blobs[-1], fmaps, netconf.unet_fmap_dec_rule(fmaps), kernel_size=netconf.unet_downsampling_strategy[i], stride=netconf.unet_downsampling_strategy[i], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [conv]
fmaps = netconf.unet_fmap_dec_rule(fmaps)
# Here, layer (2 + 3 * i) with reversed i (high to low) is picked
mergec = mergecrop(run_shape, blobs[-1], blobs[-1 + 3 * (netconf.unet_depth - i)])
blobs = blobs + [mergec]
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
conv = convolution(run_shape, blobs[-1], fmaps_end, kernel_size=[1], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [conv]
# Return the last blob of the network (goes to error objective)
return blobs[-1]
def caffenet(netmode):
# Start Caffe proto net
net = caffe.NetSpec()
# Specify input data structures
if netmode == caffe_pb2.TEST:
if netconf.loss_function == 'malis':
fmaps_end = 11
if netconf.loss_function == 'euclid':
fmaps_end = 11
if netconf.loss_function == 'softmax':
fmaps_end = 2
net.data, net.datai = data_layer([1,1,572,572])
net.silence = L.Silence(net.datai, ntop=0)
# Shape specs:
# 00. Convolution buffer size
# 01. Weight memory size
# 03. Num. channels
# 04. [d] parameter running value
# 05. [w] parameter running value
run_shape_in = [[0,0,1,[1,1],[572,572]]]
run_shape_out = run_shape_in
last_blob = implement_usknet(net, run_shape_out, 64, fmaps_end)
# Implement the prediction layer
if netconf.loss_function == 'malis':
net.prob = L.Sigmoid(last_blob, ntop=1)
if netconf.loss_function == 'euclid':
net.prob = L.Sigmoid(last_blob, ntop=1)
if netconf.loss_function == 'softmax':
net.prob = L.Softmax(last_blob, ntop=1)
for i in range(0,len(run_shape_out)):
print(run_shape_out[i])
print("Max. memory requirements: %s B" % (compute_memory_buffers(run_shape_out)+compute_memory_weights(run_shape_out)+compute_memory_blobs(run_shape_out)))
print("Weight memory: %s B" % compute_memory_weights(run_shape_out))
print("Max. conv buffer: %s B" % compute_memory_buffers(run_shape_out))
else:
if netconf.loss_function == 'malis':
net.data, net.datai = data_layer([1,1,572,572])
net.label, net.labeli = data_layer([1,1,388,388])
net.label_affinity, net.label_affinityi = data_layer([1,11,16,388,388])
net.affinity_edges, net.affinity_edgesi = data_layer([1,1,11,3])
net.silence = L.Silence(net.datai, net.labeli, net.label_affinityi, net.affinity_edgesi, ntop=0)
fmaps_end = 11
if netconf.loss_function == 'euclid':
net.data, net.datai = data_layer([1,1,572,572])
net.label, net.labeli = data_layer([1,3,388,388])
net.scale, net.scalei = data_layer([1,3,388,388])
net.silence = L.Silence(net.datai, net.labeli, net.scalei, ntop=0)
fmaps_end = 11
if netconf.loss_function == 'softmax':
net.data, net.datai = data_layer([1,1,572,572])
# Currently only supports binary classification
net.label, net.labeli = data_layer([1,1,388,388])
net.silence = L.Silence(net.datai, net.labeli, ntop=0)
fmaps_end = 2
run_shape_in = [[0,1,1,[1,1],[572,338]]]
run_shape_out = run_shape_in
# Start the actual network
last_blob = implement_usknet(net, run_shape_out, 64, fmaps_end)
for i in range(0,len(run_shape_out)):
print(run_shape_out[i])
print("Max. memory requirements: %s B" % (compute_memory_buffers(run_shape_out)+compute_memory_weights(run_shape_out)+2*compute_memory_blobs(run_shape_out)))
print("Weight memory: %s B" % compute_memory_weights(run_shape_out))
print("Max. conv buffer: %s B" % compute_memory_buffers(run_shape_out))
# Implement the loss
if netconf.loss_function == 'malis':
last_blob = L.Sigmoid(last_blob, in_place=True)
net.loss = L.MalisLoss(last_blob, net.label_affinity, net.label, net.affinity_edges, ntop=0)
if netconf.loss_function == 'euclid':
last_blob = L.Sigmoid(last_blob, in_place=True)
net.loss = L.EuclideanLoss(last_blob, net.label, net.scale, ntop=0)
if netconf.loss_function == 'softmax':
net.loss = L.SoftmaxWithLoss(last_blob, net.label, ntop=0)
# Return the protocol buffer of the generated network
return net.to_proto()
def make_net():
with open('net/net_train.prototxt', 'w') as f:
print(caffenet(caffe_pb2.TRAIN), file=f)
with open('net/net_test.prototxt', 'w') as f:
print(caffenet(caffe_pb2.TEST), file=f)
def make_solver():
with open('net/solver.prototxt', 'w') as f:
print('train_net: \"net/net_train.prototxt\"', file=f)
print('base_lr: 0.00001', file=f)
print('momentum: 0.99', file=f)
print('weight_decay: 0.000005', file=f)
print('lr_policy: \"inv\"', file=f)
print('gamma: 0.0001', file=f)
print('power: 0.75', file=f)
print('max_iter: 100000', file=f)
print('snapshot: 2000', file=f)
print('snapshot_prefix: \"net\"', file=f)
print('display: 50', file=f)
make_net()
make_solver()
|
srinituraga/caffe_neural_models
|
dataset_08/network_generator.py
|
Python
|
bsd-2-clause
| 16,036
|
[
"Gaussian"
] |
6adb8bf8a8216b48dbdc87f16b082253c518a9cd601e9209846c8887feaa85eb
|
import py
import os
import execnet
from xdist.slavemanage import HostRSync, NodeManager
pytest_plugins = "pytester",
def pytest_funcarg__hookrecorder(request):
_pytest = request.getfuncargvalue('_pytest')
config = request.getfuncargvalue('config')
return _pytest.gethookrecorder(config.hook)
def pytest_funcarg__config(request):
testdir = request.getfuncargvalue("testdir")
config = testdir.parseconfig()
return config
def pytest_funcarg__mysetup(request):
class mysetup:
def __init__(self, request):
temp = request.getfuncargvalue("tmpdir")
self.source = temp.mkdir("source")
self.dest = temp.mkdir("dest")
request.getfuncargvalue("_pytest")
return mysetup(request)
class TestNodeManagerPopen:
def test_popen_no_default_chdir(self, config):
gm = NodeManager(config, ["popen"])
assert gm.specs[0].chdir is None
def test_default_chdir(self, config):
l = ["ssh=noco", "socket=xyz"]
for spec in NodeManager(config, l).specs:
assert spec.chdir == "pyexecnetcache"
for spec in NodeManager(config, l, defaultchdir="abc").specs:
assert spec.chdir == "abc"
def test_popen_makegateway_events(self, config, hookrecorder, _pytest):
hm = NodeManager(config, ["popen"] * 2)
hm.makegateways()
call = hookrecorder.popcall("pytest_xdist_setupnodes")
assert len(call.specs) == 2
call = hookrecorder.popcall("pytest_xdist_newgateway")
assert call.gateway.spec == execnet.XSpec("popen")
assert call.gateway.id == "gw0"
call = hookrecorder.popcall("pytest_xdist_newgateway")
assert call.gateway.id == "gw1"
assert len(hm.group) == 2
hm.teardown_nodes()
assert not len(hm.group)
def test_popens_rsync(self, config, mysetup):
source = mysetup.source
hm = NodeManager(config, ["popen"] * 2)
hm.makegateways()
assert len(hm.group) == 2
for gw in hm.group:
class pseudoexec:
args = []
def __init__(self, *args):
self.args.extend(args)
def waitclose(self):
pass
gw.remote_exec = pseudoexec
l = []
hm.rsync(source, notify=lambda *args: l.append(args))
assert not l
hm.teardown_nodes()
assert not len(hm.group)
assert "sys.path.insert" in gw.remote_exec.args[0]
def test_rsync_popen_with_path(self, config, mysetup):
source, dest = mysetup.source, mysetup.dest
hm = NodeManager(config, ["popen//chdir=%s" %dest] * 1)
hm.makegateways()
source.ensure("dir1", "dir2", "hello")
l = []
hm.rsync(source, notify=lambda *args: l.append(args))
assert len(l) == 1
assert l[0] == ("rsyncrootready", hm.group['gw0'].spec, source)
hm.teardown_nodes()
dest = dest.join(source.basename)
assert dest.join("dir1").check()
assert dest.join("dir1", "dir2").check()
assert dest.join("dir1", "dir2", 'hello').check()
def test_rsync_same_popen_twice(self, config, mysetup, hookrecorder):
source, dest = mysetup.source, mysetup.dest
hm = NodeManager(config, ["popen//chdir=%s" %dest] * 2)
hm.makegateways()
source.ensure("dir1", "dir2", "hello")
hm.rsync(source)
call = hookrecorder.popcall("pytest_xdist_rsyncstart")
assert call.source == source
assert len(call.gateways) == 1
assert call.gateways[0] in hm.group
call = hookrecorder.popcall("pytest_xdist_rsyncfinish")
class TestHRSync:
def pytest_funcarg__mysetup(self, request):
class mysetup:
def __init__(self, request):
tmp = request.getfuncargvalue('tmpdir')
self.source = tmp.mkdir("source")
self.dest = tmp.mkdir("dest")
return mysetup(request)
def test_hrsync_filter(self, mysetup):
source, dest = mysetup.source, mysetup.dest
source.ensure("dir", "file.txt")
source.ensure(".svn", "entries")
source.ensure(".somedotfile", "moreentries")
source.ensure("somedir", "editfile~")
syncer = HostRSync(source)
l = list(source.visit(rec=syncer.filter,
fil=syncer.filter))
assert len(l) == 3
basenames = [x.basename for x in l]
assert 'dir' in basenames
assert 'file.txt' in basenames
assert 'somedir' in basenames
def test_hrsync_one_host(self, mysetup):
source, dest = mysetup.source, mysetup.dest
gw = execnet.makegateway("popen//chdir=%s" % dest)
finished = []
rsync = HostRSync(source)
rsync.add_target_host(gw, finished=lambda: finished.append(1))
source.join("hello.py").write("world")
rsync.send()
gw.exit()
assert dest.join(source.basename, "hello.py").check()
assert len(finished) == 1
class TestNodeManager:
@py.test.mark.xfail
def test_rsync_roots_no_roots(self, testdir, mysetup):
mysetup.source.ensure("dir1", "file1").write("hello")
config = testdir.parseconfig(source)
nodemanager = NodeManager(config, ["popen//chdir=%s" % mysetup.dest])
#assert nodemanager.config.topdir == source == config.topdir
nodemanager.makegateways()
nodemanager.rsync_roots()
p, = nodemanager.gwmanager.multi_exec(
"import os ; channel.send(os.getcwd())").receive_each()
p = py.path.local(p)
py.builtin.print_("remote curdir", p)
assert p == mysetup.dest.join(config.topdir.basename)
assert p.join("dir1").check()
assert p.join("dir1", "file1").check()
def test_popen_rsync_subdir(self, testdir, mysetup):
source, dest = mysetup.source, mysetup.dest
dir1 = mysetup.source.mkdir("dir1")
dir2 = dir1.mkdir("dir2")
dir2.ensure("hello")
for rsyncroot in (dir1, source):
dest.remove()
nodemanager = NodeManager(testdir.parseconfig(
"--tx", "popen//chdir=%s" % dest,
"--rsyncdir", rsyncroot,
source,
))
nodemanager.makegateways()
nodemanager.rsync_roots()
if rsyncroot == source:
dest = dest.join("source")
assert dest.join("dir1").check()
assert dest.join("dir1", "dir2").check()
assert dest.join("dir1", "dir2", 'hello').check()
nodemanager.teardown_nodes()
def test_init_rsync_roots(self, testdir, mysetup):
source, dest = mysetup.source, mysetup.dest
dir2 = source.ensure("dir1", "dir2", dir=1)
source.ensure("dir1", "somefile", dir=1)
dir2.ensure("hello")
source.ensure("bogusdir", "file")
source.join("tox.ini").write(py.std.textwrap.dedent("""
[pytest]
rsyncdirs=dir1/dir2
"""))
config = testdir.parseconfig(source)
nodemanager = NodeManager(config, ["popen//chdir=%s" % dest])
nodemanager.makegateways()
nodemanager.rsync_roots()
assert dest.join("dir2").check()
assert not dest.join("dir1").check()
assert not dest.join("bogus").check()
def test_rsyncignore(self, testdir, mysetup):
source, dest = mysetup.source, mysetup.dest
dir2 = source.ensure("dir1", "dir2", dir=1)
dir5 = source.ensure("dir5", "dir6", "bogus")
dirf = source.ensure("dir5", "file")
dir2.ensure("hello")
source.join("tox.ini").write(py.std.textwrap.dedent("""
[pytest]
rsyncdirs = dir1 dir5
rsyncignore = dir1/dir2 dir5/dir6
"""))
config = testdir.parseconfig(source)
nodemanager = NodeManager(config, ["popen//chdir=%s" % dest])
nodemanager.makegateways()
nodemanager.rsync_roots()
assert dest.join("dir1").check()
assert not dest.join("dir1", "dir2").check()
assert dest.join("dir5","file").check()
assert not dest.join("dir6").check()
def test_optimise_popen(self, testdir, mysetup):
source, dest = mysetup.source, mysetup.dest
specs = ["popen"] * 3
source.join("conftest.py").write("rsyncdirs = ['a']")
source.ensure('a', dir=1)
config = testdir.parseconfig(source)
nodemanager = NodeManager(config, specs)
nodemanager.makegateways()
nodemanager.rsync_roots()
for gwspec in nodemanager.specs:
assert gwspec._samefilesystem()
assert not gwspec.chdir
def test_ssh_setup_nodes(self, specssh, testdir):
testdir.makepyfile(__init__="", test_x="""
def test_one():
pass
""")
reprec = testdir.inline_run("-d", "--rsyncdir=%s" % testdir.tmpdir,
"--tx", specssh, testdir.tmpdir)
rep, = reprec.getreports("pytest_runtest_logreport")
assert rep.passed
|
curzona/pytest-xdist
|
testing/test_slavemanage.py
|
Python
|
mit
| 9,121
|
[
"VisIt"
] |
b22edb8de3b3c110eb3073e7a3ff99e13f6c222c308d4559c3b2c9ed5e92d1d3
|
from __future__ import absolute_import
from builtins import object
import argparse
import textwrap
from .utils import load_json
class CommandLineParser(object):
def __init__(self):
self._instantiate_parser()
self._add_arguments()
def _instantiate_parser(self):
self._parser = argparse.ArgumentParser(description=textwrap.dedent(self._get_description()), epilog=textwrap.dedent(self._get_epilog()), formatter_class=argparse.RawDescriptionHelpFormatter)
def _get_description(self):
return '''\
Grace is a toolchain to work with rich JavaScript applications.
It provides several tools for developers to create applications
in a fast and clean manner.'''
def _get_epilog(self):
return '''\
Task Commands
-------------
The following tasks can be specified through the task command.
build Builds the project and places the output in ./build/ProjectName.
deploy First build and then deploy the project to the path
specified in the deployment_path option in your project.cfg file.
autodeploy Execute a deploy task upon any change int the src directory.
jsdoc Build the jsDoc of the project.
zip Build and then zip the output and put it into the path
specified by the zip_path option in your project.cfg file.
clean Clean the build output.
test Build all the tests.
test:deploy Build and then deploy the tests.
test:zip Build and then zip the tests
upload Upload the project to the specified server.
Overwrite Commands
------------------
Most of the configuration options specified by either the project.cfg or the
global grace.cfg can be overwritten on the command line. They take the form:
option=new_value
The following options can be overwritten:
deployment_path
zip_path
doc_path
minify_js Accepts true or false
minify_css Accepts true or false
autolint Accepts true or false
urls:upload
credentials:username
credentials:password
Example:
python manage.py deploy --overwrite deployment_path=/tmp/deployment --overwrite minify_js=true
python manage.py build -o minify_css=true
Further Reading
---------------
For more information visit https://www.github.com/mdiener/grace'
'''
def _add_arguments(self):
self._parser.add_argument('task', help='Executes the given task.')
self._parser.add_argument('--test-cases', help='Build only the specified test cases (separated by a semicolon).')
self._parser.add_argument('--overwrite', '-o', action='append', help='Overwrite the specified configuration option.')
self._parser.add_argument('--stack-trace', '-s', action='store_true', help='Provides a full stack trace instead of just an error message.')
def get_arguments(self):
args = self._parser.parse_args()
overwrites = {}
if args.overwrite is not None:
for overwrite in args.overwrite:
overwrite = overwrite.split('=')
if len(overwrite) != 1:
key = overwrite[0]
value = overwrite[1]
def parse_nested_key(holder, keychain, value):
if len(keychain) == 1:
holder[keychain[0]] = value
else:
if keychain[0] not in holder:
holder[keychain[0]] = {}
parse_nested_key(holder[keychain[0]], keychain[1:], value)
return holder
if len(key.split(':')) > 1:
keychain = key.split(':')
key = keychain[0]
value = parse_nested_key({}, keychain[1:], value)
try:
value = load_json(value)
except:
pass
overwrites[key] = value
return args.task, args.test_cases, overwrites, args.stack_trace
|
mdiener/grace
|
grace/cmdparse.py
|
Python
|
gpl-3.0
| 4,036
|
[
"VisIt"
] |
52fec7db18596334c591acb9ed21ab1be80126360d13b2f3ae2c46bf5b071d24
|
# coding=utf-8
"""**Utilities for storage module**
"""
import os
import re
import copy
import numpy
import math
from ast import literal_eval
from osgeo import ogr
from geometry import Polygon
from safe.common.numerics import ensure_numeric
from safe.common.utilities import verify
from safe.common.exceptions import BoundingBoxError, InaSAFEError
# Default attribute to assign to vector layers
DEFAULT_ATTRIBUTE = 'inapolygon'
# Spatial layer file extensions that are recognised in Risiko
# FIXME: Perhaps add '.gml', '.zip', ...
LAYER_TYPES = ['.shp', '.asc', '.tif', '.tiff', '.geotif', '.geotiff']
# Map between extensions and ORG drivers
DRIVER_MAP = {'.sqlite': 'SQLITE',
'.shp': 'ESRI Shapefile',
'.gml': 'GML',
'.tif': 'GTiff',
'.asc': 'AAIGrid'}
# Map between Python types and OGR field types
# FIXME (Ole): I can't find a double precision type for OGR
TYPE_MAP = {type(None): ogr.OFTString, # What else should this be?
type(''): ogr.OFTString,
type(True): ogr.OFTInteger,
type(0): ogr.OFTInteger,
type(0.0): ogr.OFTReal,
type(numpy.array([0.0])[0]): ogr.OFTReal, # numpy.float64
type(numpy.array([[0.0]])[0]): ogr.OFTReal} # numpy.ndarray
# Map between verbose types and OGR geometry types
INVERSE_GEOMETRY_TYPE_MAP = {'point': ogr.wkbPoint,
'line': ogr.wkbLineString,
'polygon': ogr.wkbPolygon}
# Miscellaneous auxiliary functions
def _keywords_to_string(keywords, sublayer=None):
"""Create a string from a keywords dict.
Args:
* keywords: A required dictionary containing the keywords to stringify.
* sublayer: str optional group marker for a sub layer.
Returns:
str: a String containing the rendered keywords list
Raises:
Any exceptions are propogated.
.. note: Only simple keyword dicts should be passed here, not multilayer
dicts.
For example you pass a dict like this::
{'datatype': 'osm',
'category': 'exposure',
'title': 'buildings_osm_4326',
'subcategory': 'building',
'purpose': 'dki'}
and the following string would be returned:
datatype: osm
category: exposure
title: buildings_osm_4326
subcategory: building
purpose: dki
If sublayer is provided e.g. _keywords_to_string(keywords, sublayer='foo'),
the following:
[foo]
datatype: osm
category: exposure
title: buildings_osm_4326
subcategory: building
purpose: dki
"""
# Write
result = ''
if sublayer is not None:
result = '[%s]\n' % sublayer
for k, v in keywords.items():
# Create key
msg = ('Key in keywords dictionary must be a string. '
'I got %s with type %s' % (k, str(type(k))[1:-1]))
verify(isinstance(k, basestring), msg)
key = k
msg = ('Key in keywords dictionary must not contain the ":" '
'character. I got "%s"' % key)
verify(':' not in key, msg)
# Create value
msg = ('Value in keywords dictionary must be convertible to a string. '
'For key %s, I got %s with type %s'
% (k, v, str(type(v))[1:-1]))
try:
val = str(v)
except:
raise Exception(msg)
# Store
result += '%s: %s\n' % (key, val)
return result
def write_keywords(keywords, filename, sublayer=None):
"""Write keywords dictonary to file
:param keywords: Dictionary of keyword, value pairs
:type keywords: dict
:param filename: Name of keywords file. Extension expected to be .keywords
:type filename: str
:param sublayer: Optional sublayer applicable only to multilayer formats
such as sqlite or netcdf which can potentially hold more than
one layer. The string should map to the layer group as per the
example below. **If the keywords file contains sublayer
definitions but no sublayer was defined, keywords file content
will be removed and replaced with only the keywords provided
here.**
:type sublayer: str
A keyword file with sublayers may look like this:
[osm_buildings]
datatype: osm
category: exposure
subcategory: building
purpose: dki
title: buildings_osm_4326
[osm_flood]
datatype: flood
category: hazard
subcategory: building
title: flood_osm_4326
Keys must be strings not containing the ":" character
Values can be anything that can be converted to a string (using
Python's str function)
Surrounding whitespace is removed from values, but keys are unmodified
The reason being that keys must always be valid for the dictionary they
came from. For values we have decided to be flexible and treat entries like
'unit:m' the same as 'unit: m', or indeed 'unit: m '.
Otherwise, unintentional whitespace in values would lead to surprising
errors in the application.
"""
# Input checks
basename, ext = os.path.splitext(filename)
msg = ('Unknown extension for file %s. '
'Expected %s.keywords' % (filename, basename))
verify(ext == '.keywords', msg)
# First read any keywords out of the file so that we can retain
# keywords for other sublayers
existing_keywords = read_keywords(filename, all_blocks=True)
first_value = None
if len(existing_keywords) > 0:
first_value = existing_keywords[existing_keywords.keys()[0]]
multilayer_flag = type(first_value) == dict
handle = file(filename, 'wt')
if multilayer_flag:
if sublayer is not None and sublayer != '':
#replace existing keywords / add new for this layer
existing_keywords[sublayer] = keywords
for key, value in existing_keywords.iteritems():
handle.write(_keywords_to_string(value, sublayer=key))
handle.write('\n')
else:
# It is currently a multilayer but we will replace it with
# a single keyword block since the user passed no sublayer
handle.write(_keywords_to_string(keywords))
else:
#currently a simple layer so replace it with our content
handle.write(_keywords_to_string(keywords, sublayer=sublayer))
handle.close()
def read_keywords(filename, sublayer=None, all_blocks=False):
"""Read keywords dictionary from file
:param filename: Name of keywords file. Extension expected to be .keywords
The format of one line is expected to be either
string: string or string
:type filename: str
:param sublayer: Optional sublayer applicable only to multilayer formats
such as sqlite or netcdf which can potentially hold more than
one layer. The string should map to the layer group as per the
example below. If the keywords file contains sublayer definitions
but no sublayer was defined, the first layer group will be
returned.
:type sublayer: str
:param all_blocks: Optional, defaults to False. If True will return
a dict of dicts, where the top level dict entries each represent
a sublayer, and the values of that dict will be dicts of keyword
entries.
:type all_blocks: bool
:returns: keywords: Dictionary of keyword, value pairs
A keyword layer with sublayers may look like this:
[osm_buildings]
datatype: osm
category: exposure
subcategory: building
purpose: dki
title: buildings_osm_4326
[osm_flood]
datatype: flood
category: hazard
subcategory: building
title: flood_osm_4326
Whereas a simple keywords file would look like this
datatype: flood
category: hazard
subcategory: building
title: flood_osm_4326
If filename does not exist, an empty dictionary is returned
Blank lines are ignored
Surrounding whitespace is removed from values, but keys are unmodified
If there are no ':', then the keyword is treated as a key with no value
"""
# Input checks
basename, ext = os.path.splitext(filename)
msg = ('Unknown extension for file %s. '
'Expected %s.keywords' % (filename, basename))
verify(ext == '.keywords', msg)
if not os.path.isfile(filename):
return {}
# Read all entries
blocks = {}
keywords = {}
fid = open(filename, 'r')
current_block = None
first_keywords = None
for line in fid.readlines():
# Remove trailing (but not preceeding!) whitespace
# FIXME: Can be removed altogether
text = line.rstrip()
# Ignore blank lines
if text == '':
continue
# Check if it is an ini style group header
block_flag = re.search(r'^\[.*]$', text, re.M | re.I)
if block_flag:
# Write the old block if it exists - must have a current
# block to prevent orphans
if len(keywords) > 0 and current_block is not None:
blocks[current_block] = keywords
if first_keywords is None and len(keywords) > 0:
first_keywords = keywords
# Now set up for a new block
current_block = text[1:-1]
# Reset the keywords each time we encounter a new block
# until we know we are on the desired one
keywords = {}
continue
if ':' not in text:
key = text.strip()
val = None
else:
# Get splitting point
idx = text.find(':')
# Take key as everything up to the first ':'
key = text[:idx]
# Take value as everything after the first ':'
textval = text[idx + 1:].strip()
try:
# Take care of python structures like
# booleans, None, lists, dicts etc
val = literal_eval(textval)
except (ValueError, SyntaxError):
val = textval
# Add entry to dictionary
keywords[key] = val
fid.close()
# Write our any unfinalised block data
if len(keywords) > 0 and current_block is not None:
blocks[current_block] = keywords
if first_keywords is None:
first_keywords = keywords
# Ok we have generated a structure that looks like this:
# blocks = {{ 'foo' : { 'a': 'b', 'c': 'd'},
# { 'bar' : { 'd': 'e', 'f': 'g'}}
# where foo and bar are sublayers and their dicts are the sublayer keywords
if all_blocks:
return blocks
if sublayer is not None:
if sublayer in blocks:
return blocks[sublayer]
else:
return first_keywords
# noinspection PyExceptionInherit
def check_geotransform(geotransform):
"""Check that geotransform is valid
:param geotransform: GDAL geotransform (6-tuple).
(top left x, w-e pixel resolution, rotation,
top left y, rotation, n-s pixel resolution).
See e.g. http://www.gdal.org/gdal_tutorial.html
:type geotransform: tuple
.. note::
This assumes that the spatial reference uses geographic coordinates,
so will not work for projected coordinate systems.
"""
msg = ('Supplied geotransform must be a tuple with '
'6 numbers. I got %s' % str(geotransform))
verify(len(geotransform) == 6, msg)
for x in geotransform:
try:
float(x)
except TypeError:
raise InaSAFEError(msg)
# Check longitude
msg = ('Element in 0 (first) geotransform must be a valid '
'longitude. I got %s' % geotransform[0])
verify(-180 <= geotransform[0] <= 180, msg)
# Check latitude
msg = ('Element 3 (fourth) in geotransform must be a valid '
'latitude. I got %s' % geotransform[3])
verify(-90 <= geotransform[3] <= 90, msg)
# Check cell size
msg = ('Element 1 (second) in geotransform must be a positive '
'number. I got %s' % geotransform[1])
verify(geotransform[1] > 0, msg)
msg = ('Element 5 (sixth) in geotransform must be a negative '
'number. I got %s' % geotransform[1])
verify(geotransform[5] < 0, msg)
def geotransform_to_bbox(geotransform, columns, rows):
"""Convert geotransform to bounding box
:param geotransform: GDAL geotransform (6-tuple).
(top left x, w-e pixel resolution, rotation,
top left y, rotation, n-s pixel resolution).
See e.g. http://www.gdal.org/gdal_tutorial.html
:type geotransform: tuple
:param columns: Number of columns in grid
:type columns: int
:param rows: Number of rows in grid
:type rows: int
:returns: bbox: Bounding box as a list of geographic coordinates
[west, south, east, north]
.. note::
Rows and columns are needed to determine eastern and northern bounds.
FIXME: Not sure if the pixel vs gridline registration issue is observed
correctly here. Need to check against gdal > v1.7
"""
x_origin = geotransform[0] # top left x
y_origin = geotransform[3] # top left y
x_res = geotransform[1] # w-e pixel resolution
y_res = geotransform[5] # n-s pixel resolution
x_pix = columns
y_pix = rows
min_x = x_origin
max_x = x_origin + (x_pix * x_res)
min_y = y_origin + (y_pix * y_res)
max_y = y_origin
return [min_x, min_y, max_x, max_y]
def geotransform_to_resolution(geotransform, isotropic=False):
"""Convert geotransform to resolution
:param geotransform: GDAL geotransform (6-tuple).
(top left x, w-e pixel resolution, rotation,
top left y, rotation, n-s pixel resolution).
See e.g. http://www.gdal.org/gdal_tutorial.html
:type geotransform: tuple
:param isotropic: If True, return the average (dx + dy) / 2
:type isotropic: bool
:returns: resolution: grid spacing (res_x, res_y) in (positive) decimal
degrees ordered as longitude first, then latitude.
or (res_x + res_y) / 2 (if isotropic is True)
"""
res_x = geotransform[1] # w-e pixel resolution
res_y = -geotransform[5] # n-s pixel resolution (always negative)
if isotropic:
return (res_x + res_y) / 2
else:
return res_x, res_y
def raster_geometry_to_geotransform(longitudes, latitudes):
"""Convert vectors of longitudes and latitudes to geotransform
Note:
This is the inverse operation of Raster.get_geometry().
:param longitudes: Vectors of geographic coordinates
:type longitudes:
:param latitudes: Vectors of geographic coordinates
:type latitudes:
:returns: geotransform: 6-tuple (top left x, w-e pixel resolution,
rotation, top left y, rotation, n-s pixel resolution)
"""
nx = len(longitudes)
ny = len(latitudes)
msg = ('You must specify more than 1 longitude to make geotransform: '
'I got %s' % str(longitudes))
verify(nx > 1, msg)
msg = ('You must specify more than 1 latitude to make geotransform: '
'I got %s' % str(latitudes))
verify(ny > 1, msg)
dx = float(longitudes[1] - longitudes[0]) # Longitudinal resolution
dy = float(latitudes[0] - latitudes[1]) # Latitudinal resolution (neg)
# Define pixel centers along each directions
# This is to achieve pixel registration rather
# than gridline registration
dx2 = dx / 2
dy2 = dy / 2
geotransform = (longitudes[0] - dx2, # Longitude of upper left corner
dx, # w-e pixel resolution
0, # rotation
latitudes[-1] - dy2, # Latitude of upper left corner
0, # rotation
dy) # n-s pixel resolution
return geotransform
# noinspection PyExceptionInherit
def bbox_intersection(*args):
"""Compute intersection between two or more bounding boxes
:param args: two or more bounding boxes.
Each is assumed to be a list or a tuple with
four coordinates (W, S, E, N)
:returns: The minimal common bounding box
"""
msg = 'Function bbox_intersection must take at least 2 arguments.'
verify(len(args) > 1, msg)
result = [-180, -90, 180, 90]
for a in args:
if a is None:
continue
msg = ('Bounding box expected to be a list of the '
'form [W, S, E, N]. '
'Instead i got "%s"' % str(a))
try:
box = list(a)
except:
raise Exception(msg)
if not len(box) == 4:
raise BoundingBoxError(msg)
msg = ('Western boundary must be less than or equal to eastern. '
'I got %s' % box)
if not box[0] <= box[2]:
raise BoundingBoxError(msg)
msg = ('Southern boundary must be less than or equal to northern. '
'I got %s' % box)
if not box[1] <= box[3]:
raise BoundingBoxError(msg)
# Compute intersection
# West and South
for i in [0, 1]:
result[i] = max(result[i], box[i])
# East and North
for i in [2, 3]:
result[i] = min(result[i], box[i])
# Check validity and return
if result[0] <= result[2] and result[1] <= result[3]:
return result
else:
return None
def minimal_bounding_box(bbox, min_res, eps=1.0e-6):
"""Grow bounding box to exceed specified resolution if needed
:param bbox: Bounding box with format [W, S, E, N]
:type bbox: list
:param min_res: Minimal acceptable resolution to exceed
:type min_res: float
:param eps: Optional tolerance that will be applied to 'buffer' result
:type eps: float
:returns: Adjusted bounding box guaranteed to exceed specified resolution
"""
# FIXME (Ole): Probably obsolete now
bbox = copy.copy(list(bbox))
delta_x = bbox[2] - bbox[0]
delta_y = bbox[3] - bbox[1]
if delta_x < min_res:
dx = (min_res - delta_x) / 2 + eps
bbox[0] -= dx
bbox[2] += dx
if delta_y < min_res:
dy = (min_res - delta_y) / 2 + eps
bbox[1] -= dy
bbox[3] += dy
return bbox
def buffered_bounding_box(bbox, resolution):
"""Grow bounding box with one unit of resolution in each direction
Note:
This will ensure there are enough pixels to robustly provide
interpolated values without having to painstakingly deal with
all corner cases such as 1 x 1, 1 x 2 and 2 x 1 arrays.
The border will also make sure that points that would otherwise fall
outside the domain (as defined by a tight bounding box) get assigned
values.
:param bbox: Bounding box with format [W, S, E, N]
:type bbox: list
:param resolution: (resx, resy) - Raster resolution in each direction.
res - Raster resolution in either direction
If resolution is None bbox is returned unchanged.
:type resolution: tuple
:returns: Adjusted bounding box
Note:
Case in point: Interpolation point O would fall outside this domain
even though there are enough grid points to support it
::
--------------
| |
| * * | * *
| O|
| |
| * * | * *
--------------
"""
bbox = copy.copy(list(bbox))
if resolution is None:
return bbox
try:
resx, resy = resolution
except TypeError:
resx = resy = resolution
bbox[0] -= resx
bbox[1] -= resy
bbox[2] += resx
bbox[3] += resy
return bbox
def get_geometry_type(geometry, geometry_type):
"""Determine geometry type based on data
:param geometry: A list of either point coordinates [lon, lat] or polygons
which are assumed to be numpy arrays of coordinates
:type geometry: list
:param geometry_type: Optional type - 'point', 'line', 'polygon' or None
:type geometry_type: str, None
:returns: geometry_type: Either ogr.wkbPoint, ogr.wkbLineString or
ogr.wkbPolygon
Note:
If geometry type cannot be determined an Exception is raised.
There is no consistency check across all entries of the
geometry list, only the first element is used in this determination.
"""
# FIXME (Ole): Perhaps use OGR's own symbols
msg = ('Argument geometry_type must be either "point", "line", '
'"polygon" or None')
verify(geometry_type is None or
geometry_type in [1, 2, 3] or
geometry_type.lower() in ['point', 'line', 'polygon'], msg)
if geometry_type is not None:
if isinstance(geometry_type, basestring):
return INVERSE_GEOMETRY_TYPE_MAP[geometry_type.lower()]
else:
return geometry_type
# FIXME (Ole): Should add some additional checks to see if choice
# makes sense
msg = 'Argument geometry must be a sequence. I got %s ' % type(geometry)
verify(is_sequence(geometry), msg)
if len(geometry) == 0:
# Default to point if there is no data
return ogr.wkbPoint
msg = ('The first element in geometry must be a sequence of length > 2. '
'I got %s ' % str(geometry[0]))
verify(is_sequence(geometry[0]), msg)
verify(len(geometry[0]) >= 2, msg)
if len(geometry[0]) == 2:
try:
float(geometry[0][0])
float(geometry[0][1])
except (ValueError, TypeError, IndexError):
pass
else:
# This geometry appears to be point data
geometry_type = ogr.wkbPoint
elif len(geometry[0]) > 2:
try:
x = numpy.array(geometry[0])
except ValueError:
pass
else:
# This geometry appears to be polygon data
if x.shape[0] > 2 and x.shape[1] == 2:
geometry_type = ogr.wkbPolygon
if geometry_type is None:
msg = 'Could not determine geometry type'
raise Exception(msg)
return geometry_type
def is_sequence(x):
"""Determine if x behaves like a true sequence but not a string
:param x: Sequence like object
:type x: object
:returns: Test result
:rtype: bool
Note:
This will for example return True for lists, tuples and numpy arrays
but False for strings and dictionaries.
"""
if isinstance(x, basestring):
return False
try:
list(x)
except TypeError:
return False
else:
return True
def array_to_line(A, geometry_type=ogr.wkbLinearRing):
"""Convert coordinates to linear_ring
:param A: Nx2 Array of coordinates representing either a polygon or a line.
A can be either a numpy array or a list of coordinates.
:type A: numpy.ndarray, list
:param geometry_type: A valid OGR geometry type.
Default type ogr.wkbLinearRing
:type geometry_type: ogr.wkbLinearRing, include ogr.wkbLineString
Returns:
* ring: OGR line geometry
Note:
Based on http://www.packtpub.com/article/working-geospatial-data-python
"""
try:
A = ensure_numeric(A, numpy.float)
except Exception, e:
msg = ('Array (%s) could not be converted to numeric array. '
'I got type %s. Error message: %s'
% (A, str(type(A)), e))
raise Exception(msg)
msg = 'Array must be a 2d array of vertices. I got %s' % (str(A.shape))
verify(len(A.shape) == 2, msg)
msg = 'A array must have two columns. I got %s' % (str(A.shape[0]))
verify(A.shape[1] == 2, msg)
N = A.shape[0] # Number of vertices
line = ogr.Geometry(geometry_type)
for i in range(N):
line.AddPoint(A[i, 0], A[i, 1])
return line
def rings_equal(x, y, rtol=1.0e-6, atol=1.0e-8):
"""Compares to linear rings as numpy arrays
:param x: A 2d array of the first ring
:type x: numpy.ndarray
:param y: A 2d array of the second ring
:type y: numpy.ndarray
:param rtol: The relative tolerance parameter
:type rtol: float
:param atol: The relative tolerance parameter
:type rtol: float
Returns:
* True if x == y or x' == y (up to the specified tolerance)
where x' is x reversed in the first dimension. This corresponds to
linear rings being seen as equal irrespective of whether they are
organised in clock wise or counter clock wise order
"""
x = ensure_numeric(x, numpy.float)
y = ensure_numeric(y, numpy.float)
msg = 'Arrays must a 2d arrays of vertices. I got %s and %s' % (x, y)
verify(len(x.shape) == 2 and len(y.shape) == 2, msg)
msg = 'Arrays must have two columns. I got %s and %s' % (x, y)
verify(x.shape[1] == 2 and y.shape[1] == 2, msg)
if (numpy.allclose(x, y, rtol=rtol, atol=atol) or
numpy.allclose(x, y[::-1], rtol=rtol, atol=atol)):
return True
else:
return False
# FIXME (Ole): We can retire this messy function now
# Positive: Delete it :-)
def array_to_wkt(A, geom_type='POLYGON'):
"""Convert coordinates to wkt format
:param A: Nx2 Array of coordinates representing either a polygon or a line.
A can be either a numpy array or a list of coordinates.
:type A: numpy.array
:param geom_type: Determines output keyword 'POLYGON' or 'LINESTRING'
:type geom_type: str
:returns: wkt: geometry in the format known to ogr: Examples
Note:
POLYGON((1020 1030,1020 1045,1050 1045,1050 1030,1020 1030))
LINESTRING(1000 1000, 1100 1050)
"""
try:
A = ensure_numeric(A, numpy.float)
except Exception, e:
msg = ('Array (%s) could not be converted to numeric array. '
'I got type %s. Error message: %s'
% (geom_type, str(type(A)), e))
raise Exception(msg)
msg = 'Array must be a 2d array of vertices. I got %s' % (str(A.shape))
verify(len(A.shape) == 2, msg)
msg = 'A array must have two columns. I got %s' % (str(A.shape[0]))
verify(A.shape[1] == 2, msg)
if geom_type == 'LINESTRING':
# One bracket
n = 1
elif geom_type == 'POLYGON':
# Two brackets (tsk tsk)
n = 2
else:
msg = 'Unknown geom_type: %s' % geom_type
raise Exception(msg)
wkt_string = geom_type + '(' * n
N = len(A)
for i in range(N):
# Works for both lists and arrays
wkt_string += '%f %f, ' % tuple(A[i])
return wkt_string[:-2] + ')' * n
# Map of ogr numerical geometry types to their textual representation
# FIXME (Ole): Some of them don't exist, even though they show up
# when doing dir(ogr) - Why?:
geometry_type_map = {ogr.wkbPoint: 'Point',
ogr.wkbPoint25D: 'Point25D',
ogr.wkbPolygon: 'Polygon',
ogr.wkbPolygon25D: 'Polygon25D',
#ogr.wkbLinePoint: 'LinePoint', # ??
ogr.wkbGeometryCollection: 'GeometryCollection',
ogr.wkbGeometryCollection25D: 'GeometryCollection25D',
ogr.wkbLineString: 'LineString',
ogr.wkbLineString25D: 'LineString25D',
ogr.wkbLinearRing: 'LinearRing',
ogr.wkbMultiLineString: 'MultiLineString',
ogr.wkbMultiLineString25D: 'MultiLineString25D',
ogr.wkbMultiPoint: 'MultiPoint',
ogr.wkbMultiPoint25D: 'MultiPoint25D',
ogr.wkbMultiPolygon: 'MultiPolygon',
ogr.wkbMultiPolygon25D: 'MultiPolygon25D',
ogr.wkbNDR: 'NDR',
ogr.wkbNone: 'None',
ogr.wkbUnknown: 'Unknown'}
def geometry_type_to_string(g_type):
"""Provides string representation of numeric geometry types
:param g_type: geometry type:
:type g_type: ogr.wkb*, None
FIXME (Ole): I can't find anything like this in ORG. Why?
"""
if g_type in geometry_type_map:
return geometry_type_map[g_type]
elif g_type is None:
return 'No geometry type assigned'
else:
return 'Unknown geometry type: %s' % str(g_type)
# FIXME: Move to common numerics area along with polygon.py
def calculate_polygon_area(polygon, signed=False):
"""Calculate the signed area of non-self-intersecting polygon
:param polygon: Numeric array of points (longitude, latitude). It is
assumed to be closed, i.e. first and last points are identical
:type polygon: numpy.ndarray
:param signed: Optional flag deciding whether returned area retains its
sign:
If points are ordered counter clockwise, the signed area
will be positive.
If points are ordered clockwise, it will be negative
Default is False which means that the area is always
positive.
:type signed: bool
:returns: area: Area of polygon (subject to the value of argument signed)
:rtype: numpy.ndarray
Note:
Sources
http://paulbourke.net/geometry/polyarea/
http://en.wikipedia.org/wiki/Centroid
"""
# Make sure it is numeric
P = numpy.array(polygon)
msg = ('Polygon is assumed to consist of coordinate pairs. '
'I got second dimension %i instead of 2' % P.shape[1])
verify(P.shape[1] == 2, msg)
x = P[:, 0]
y = P[:, 1]
# Calculate 0.5 sum_{i=0}^{N-1} (x_i y_{i+1} - x_{i+1} y_i)
a = x[:-1] * y[1:]
b = y[:-1] * x[1:]
A = numpy.sum(a - b) / 2.
if signed:
return A
else:
return abs(A)
def calculate_polygon_centroid(polygon):
"""Calculate the centroid of non-self-intersecting polygon
:param polygon: Numeric array of points (longitude, latitude). It is
assumed to be closed, i.e. first and last points are identical
:type polygon: numpy.ndarray
:returns: calculated centroid
:rtype: numpy.ndarray
.. note::
Sources
http://paulbourke.net/geometry/polyarea/
http://en.wikipedia.org/wiki/Centroid
"""
# Make sure it is numeric
P = numpy.array(polygon)
# Normalise to ensure numerical accurracy.
# This requirement in backed by tests in test_io.py and without it
# centroids at building footprint level may get shifted outside the
# polygon!
P_origin = numpy.amin(P, axis=0)
P = P - P_origin
# Get area. This calculation could be incorporated to save time
# if necessary as the two formulas are very similar.
A = calculate_polygon_area(polygon, signed=True)
x = P[:, 0]
y = P[:, 1]
# Calculate
# Cx = sum_{i=0}^{N-1} (x_i + x_{i+1})(x_i y_{i+1} - x_{i+1} y_i)/(6A)
# Cy = sum_{i=0}^{N-1} (y_i + y_{i+1})(x_i y_{i+1} - x_{i+1} y_i)/(6A)
a = x[:-1] * y[1:]
b = y[:-1] * x[1:]
cx = x[:-1] + x[1:]
cy = y[:-1] + y[1:]
Cx = numpy.sum(cx * (a - b)) / (6. * A)
Cy = numpy.sum(cy * (a - b)) / (6. * A)
# Translate back to real location
C = numpy.array([Cx, Cy]) + P_origin
return C
def points_between_points(point1, point2, delta):
"""Creates an array of points between two points given a delta
:param point1: The first point
:type point1: numpy.ndarray
:param point2: The second point
:type point2: numpy.ndarray
:param delta: The increment between inserted points
:type delta: float
:returns: Array of points.
:rtype: numpy.ndarray
Note:
u = (x1-x0, y1-y0)/L, where
L=sqrt( (x1-x0)^2 + (y1-y0)^2).
If r is the resolution, then the
points will be given by
(x0, y0) + u * n * r for n = 1, 2, ....
while len(n*u*r) < L
"""
x0, y0 = point1
x1, y1 = point2
L = math.sqrt(math.pow((x1 - x0), 2) + math.pow((y1 - y0), 2))
pieces = int(L / delta)
uu = numpy.array([x1 - x0, y1 - y0]) / L
points = [point1]
for nn in range(pieces):
point = point1 + uu * (nn + 1) * delta
points.append(point)
return numpy.array(points)
def points_along_line(line, delta):
"""Calculate a list of points along a line with a given delta
:param line: Numeric array of points (longitude, latitude).
:type line: numpy.ndarray
:param delta: Decimal number to be used as step
:type delta: float
:returns: Numeric array of points (longitude, latitude).
:rtype: numpy.ndarray
Note:
Sources
http://paulbourke.net/geometry/polyarea/
http://en.wikipedia.org/wiki/Centroid
"""
# Make sure it is numeric
P = numpy.array(line)
points = []
for i in range(len(P) - 1):
pts = points_between_points(P[i], P[i + 1], delta)
# If the first point of this list is the same
# as the last one recorded, do not use it
if len(points) > 0:
if numpy.allclose(points[-1], pts[0]):
pts = pts[1:]
points.extend(pts)
C = numpy.array(points)
return C
def combine_polygon_and_point_layers(layers):
"""Combine polygon and point layers
:param layers: List of vector layers of type polygon or point
:type layers: list
:returns: One point layer with all input point layers and centroids from
all input polygon layers.
:rtype: numpy.ndarray
:raises: InaSAFEError (in case attribute names are not the same.)
"""
# This is to implement issue #276
print layers
def get_ring_data(ring):
"""Extract coordinates from OGR ring object
:param ring: OGR ring object
:type ring:
:returns: Nx2 numpy array of vertex coordinates (lon, lat)
:rtype: numpy.array
"""
N = ring.GetPointCount()
# noinspection PyTypeChecker
A = numpy.zeros((N, 2), dtype='d')
# FIXME (Ole): Is there any way to get the entire data vectors?
for j in range(N):
A[j, :] = ring.GetX(j), ring.GetY(j)
# Return ring as an Nx2 numpy array
return A
def get_polygon_data(G):
"""Extract polygon data from OGR geometry
:param G: OGR polygon geometry
:return: List of InaSAFE polygon instances
"""
# Get outer ring, then inner rings
# http://osgeo-org.1560.n6.nabble.com/
# gdal-dev-Polygon-topology-td3745761.html
number_of_rings = G.GetGeometryCount()
# Get outer ring
outer_ring = get_ring_data(G.GetGeometryRef(0))
# Get inner rings if any
inner_rings = []
if number_of_rings > 1:
for i in range(1, number_of_rings):
inner_ring = get_ring_data(G.GetGeometryRef(i))
inner_rings.append(inner_ring)
# Return Polygon instance
return Polygon(outer_ring=outer_ring,
inner_rings=inner_rings)
|
danylaksono/inasafe
|
safe/storage/utilities.py
|
Python
|
gpl-3.0
| 35,117
|
[
"NetCDF"
] |
fb16cde37570cdaf9ca9f4b92b47fdb18c99b69b00a2a457337e9c96811a084f
|
#!/usr/bin/env python
# module implementing Felix's artefact removal method, based on the earlier R version
import sys, os, os.path
import numpy as np
import scipy as sp
import numpy.random as rng
import scipy.interpolate as spi
import scipy.stats as sst
import scipy.signal as sig
# for the moment, we keep these locally rather than requiring separate installation
import wavepy as wv
import lowess
# our signal generator module
import siggen
# calculate a running function of some data -- SD by default
# currently uses a shuffled-extension policy for boundaries, may add other options later
def running ( x, margin=10, func=np.std ):
before = x[range(margin)]
after = x[range(len(x) - margin, len(x))]
rng.shuffle(before)
rng.shuffle(after)
padded = np.concatenate((before,x,after))
result = np.zeros(len(x))
for ii in range(len(x)):
result[ii] = func(padded[range(ii, ii + 2 * margin + 1)])
return result
# segment a data array (assumed non-negative) based on a simple threshold
# main use case is that x is the output of running(), above, but this is not required
# TODO: allow elimination of small intervals
def segment ( x, threshold ):
thrx = x > threshold
ends = np.where(np.concatenate((np.diff(thrx), np.array([1]))))[0]
starts = np.concatenate((np.array([0]), 1 + ends[range(len(ends) - 1)]))
bad = thrx[starts]
return { 'start':starts, 'end':ends, 'bad':bad }
# fit a cubic smoothing spline to a (bad) data segment and subtract it
# smoothness is essentially the mean SSE of the resulting fit
# (ie, we scale by len(x) for the call to UnivariateSpline)
# this relates in some unhelpful way to the p parameter in Matlab's csaps equivalent
# (as used in the original implementation) -- so a sensible default will need to be empirically determined...
def fit_spline ( x, t=None, smoothness=None ):
if smoothness is None:
smoothness = 0.02 * np.std(x)
if t is None:
t = np.linspace(0,1,len(x))
model = spi.UnivariateSpline(t, x, s=smoothness * len(x))
base = model(t)
return { 'model':model, 'baseline':base, 'signal': x - base, 't':t }
# fit a lowess (1st order) local smoothing spline, with broadly the same
# consequences as above, but no model is returned (lowess currently doesn't create one)
# span is the smoothing span, in (0, 1], controlling the smoothness
# iter is the number of robustness iterations -- higher may be less biased, but slower
def fit_lowess ( x, t=None, span=0.3, iter=4 ):
if t is None:
t = np.linspace(0,1,len(x))
base = lowess.lowess(t, x, f=span, iter=iter)
return { 'baseline':base, 'signal': x - base, 't':t }
# calculate offset for a segment wrt to the previous one by a mean value determined
# from some portion of each according to the ad hoc rules in Table 1 of the paper
def find_offset ( x1, x2, hz=20, alpha=None, beta=None ):
if alpha is None:
alpha = np.round(hz/3)
if beta is None:
beta = np.round(hz * 2)
l1 = len(x1)
if l1 < alpha:
a = np.mean(x1)
elif l1 < beta:
a = np.mean(x1[(-alpha):])
else:
theta1 = np.ceil(l1/10)
a = np.mean(x1[(-theta1):])
l2 = len(x2)
if l2 < alpha:
b = np.mean(x2)
elif l2 < beta:
b = np.mean(x2[:alpha])
else:
theta2 = np.ceil(l2/10)
b = np.mean(x2[:theta2])
return a-b
# combined artefact removal algorithm
def mara ( x, margin, thresh, hz=20, smoothness=None, func=np.std, alpha=None, beta=None, intermediates=True ):
criterion = running(x, margin, func)
segs = segment(criterion, thresh)
nn = len(segs['start'])
pieces = [None] * nn
fits = [None] * nn
for ii in range(len(segs['start'])):
if segs['bad'][ii]:
fits[ii] = fit_spline(x[segs['start'][ii]:(segs['end'][ii]+1)], smoothness=smoothness)
pieces[ii] = fits[ii]['signal']
else:
pieces[ii] = x[segs['start'][ii]:(segs['end'][ii]+1)]
offsets = [0] * nn
for ii in range(1, nn):
offsets[ii] = find_offset ( pieces[ii-1], pieces[ii], hz, alpha, beta )
pieces[ii] = pieces[ii] + offsets[ii]
final = np.concatenate(pieces)
if intermediates:
return { 'criterion' : criterion,
'segments' : seg,
'pieces' : pieces,
'fits' : fits,
'shifts' : offsets,
'final' : final }
else:
return final
# simulate a NIRI signal as a mixture of sinusoidal and noise componente
# defaults are as described in the paper
# f is frequency in Hz, mu is component amplitude, gamma is gaussian noise sd
# phi is an optional phase shift, lo & hi specify rescaling
def niri ( n=5000,
hz=20,
f=[1, 0.25, 0.1, 0.04],
mu=[0.6, 0.2, 0.9, 1],
gamma=[0.01, 0.01, 0.01, 0.05],
phi=[0, 0, 0, 0],
lo=-1,
hi=1 ):
tt = 2 * np.pi * np.array(range(n), dtype=np.float32)/hz
result = np.zeros(n)
for ii in range(len(f)):
result += mu[ii] * np.sin(f[ii] * tt) + gamma[ii] * rng.randn(n)
return siggen.rescale(result, lo, hi)
# simulate a baseline shift sequence similar to that termed MA1 in the paper
def ma1 ( n=5000, jumps=6, mu=0, dv=3 ):
result = np.zeros(n)
for ii in range(jumps):
idx = np.floor(rng.rand(1)[0] * n)
off = rng.randn(1)[0] * dv + mu
if rng.rand(1)[0] < 0.5:
result[:idx] += off
else:
result[idx:] += off
return result
# simulate a spike sequence similar to that termed MA2 in the paper
def ma2 ( n=5000, spikes=6, mu=0, dv=5 ):
result = np.zeros(n)
for ii in range(spikes):
idx = np.floor(rng.rand(1)[0] * n)
result[idx] = rng.randn(1)[0] * dv + mu
return result
# stats used in the paper for comparing recovered sequence to known original
def stats ( actual, recovered ):
rms = np.sqrt(np.mean((actual-recovered)**2))
prd = 100 * np.sqrt(np.sum((actual-recovered)**2)/len(actual))
r, p = sst.pearsonr(actual, recovered)
return { 'rms': rms, 'prd': prd, 'r': r, 'p': p }
# test with simulated data
def test ( margin=15, thresh=0.5, hz=20, smth=None, n=5000, jumps=6, spikes=6,
j_mu=0, j_dv=3, s_mu=0, s_dv=5, first_base='zero', data=None, intermediates=False ):
if data is None:
signal = niri ( n, hz )
off = ma1( n, jumps, j_mu, j_dv ) + ma2( n, spikes, s_mu, s_dv )
if first_base == 'zero':
off = off - off[0]
elif first_base == 'centre':
off = off - np.mean(off)
combo = signal + off
else:
signal = data['signal']
off = data['off']
combo = data['combo']
clean = mara( combo, margin, thresh, hz, smth, intermediates=intermediates )
if intermediates:
st = stats( signal, clean['final'] )
else:
st = stats( signal, clean )
return { 'signal': signal, 'off': off, 'combo': combo, 'clean': clean, 'stats': st }
# Felix's slightly dubious dispersion measure -- product of std dev and MAD (why?)
def std_mad ( x ):
return np.std(x) * mad(x)
# median absolute deviation
# why this isn't defined in SciPy be default I have no idea
# here we define only for a 1d array
# default scale factor taken from R -- this may not match the Matlab original
def mad ( x, scale=1.4826 ):
return scale * np.median(np.abs(x - np.median(x)))
# Felix's multiscale SD discontinuity detection
def msddd ( x, alpha=1e-5, kmin=1, kmax=52, step=10 ):
wins = range(kmin, kmax, step)
vsg = np.zeros((len(wins), len(x)))
for ii in range(len(wins)):
vsg[ii,] = running(x, margin=wins[ii], func=std_mad)
return discontinuities(vsg, alpha)
# Matt's wavelet-based discontinuity detection
def mswdd ( x, alpha=1e-5, nlevels=6, boundary=100, prop=0.1 ):
# pad to the next power of two in size
N = len(x)
maxlevs = np.ceil(np.log2(N))
newlen = 2 ** (1 + maxlevs)
padlen = newlen - N
boundary = np.min((boundary, np.floor(prop * N)))
padbefore = rng.choice(x[0:boundary], np.ceil(padlen/2))
padafter = rng.choice(x[(N-boundary+1):N], np.floor(padlen/2))
padded = np.concatenate((padbefore, x, padafter))
# get wavelet transform
J = np.min((nlevels + 1, maxlevs + 1))
vsg = wv.dwt.swt(padded, J, 'db1')[0].reshape(vsg, (J, newlen))
# shift rows to align the scale levels
shift = newlen/2
for ii in range(1, vsg.shape[0]):
idx = range(newlen - shift, newlen)
idx.extend(range(newlen - shift))
vsg[ii,] = vsg[ii, idx]
shift = shift/2
# drop 1st (DC) row and padding
vsg = vsg[1:,len(padbefore):(len(padbefore)+N)]
return discontinuities(vsg, alpha)
# shared outlier-based discontinuity detection
def discontinuities ( vsg, alpha=1e-5 ):
nr, nc = vsg.shape
vout = np.zeros((nr, nc))
for ii in range(nr):
vout[ii, find_outliers(vsg[ii,], alpha)] = 1
idx2 = np.sum(vout, 0)
idx1 = np.flatnonzero(idx2)
asr = 100 * float(len(idx1))/nc
return { 'vsg':vsg, 'vout':vout, 'idx1':idx1, 'idx2':idx2, 'asr':asr }
# return index of outliers in a data set, as determined by a Thompson tau test
def find_outliers ( x, alpha=1e-5 ):
result = []
X = x.copy()
mr = np.median(X)
q23 = np.percentile(X, [25, 75])
sr = (q23[1] - q23[0]) / 1.349
val = np.max(np.abs(X - mr))
while len(X) > 2 and val > sr * tau(len(X), alpha):
# indices of outlier value in original array
result.extend(np.flatnonzero(np.abs(x - mr) == val))
# remove outliers from working array
X = X[np.flatnonzero(np.flatnonzero(np.abs(X - mr) < val))]
# rinse and repeat
mr = np.median(X)
q23 = np.percentile(X, [25, 75])
sr = (q23[1] - q23[0]) / 1.349
return result
# test value for the Thompson outlier test
# N is the data count, alpha the significance level
def tau ( N, alpha=1e-5 ):
t = sst.t.ppf(alpha/2, N-2)
return t * (1 - N) / (np.sqrt(N) * sqrt(N - 2 + t * t))
# command-line invocation -- currently runs test with all defaults
# dumping results to stdout as tab-delim text, stats to stderr
if __name__ == '__main__':
tt = test()
print >> sys.stderr, 'Recovered signal statistics'
print >> sys.stderr, 'RMSE: %g' % tt['stats']['rms']
print >> sys.stderr, 'PRD: %g%%' % tt['stats']['prd']
print >> sys.stderr, 'r: %g' % tt['stats']['r']
print 'signal\toffset\tcombo\tclean'
for ii in range(len(tt['signal'])):
print '%g\t%g\t%g\t%g' % ( tt['signal'][ii], tt['off'][ii], tt['combo'][ii], tt['clean'][ii] )
|
bcmd/BCMD
|
batch/felix.py
|
Python
|
gpl-2.0
| 10,844
|
[
"Gaussian"
] |
e11a655c5c6add1e511fcc6170f4565f950aef6ffe0d5bd9c8eff2117b9aa47c
|
import getpass
import sys
from splinter import Browser
with Browser() as browser:
# Visit URL
url = 'https://www.sasktel.com/iam/SasktelLogin.jsp'
browser.visit(url)
username = browser.find_by_xpath('//input[contains(@name, "username")]')[0]
username.fill(sys.argv[1])
password = browser.find_by_xpath('//input[contains(@name, "password")]')[0]
password.fill(getpass.getpass())
browser.find_by_xpath('//input[contains(@name, "submitaccount")]').first.click()
figures = browser.find_by_xpath('//span[contains(text(), "$")]')
for figure in figures:
print figure.value
|
ezralalonde/water-bill-summary
|
sasktel.py
|
Python
|
bsd-3-clause
| 619
|
[
"VisIt"
] |
dfbb6efb151064c2d4ccd340af66a7099babb3911f3351f4b1634ae72adab889
|
"""
Physical units and dimensions.
The base class is Unit, where all here defined units (~200) inherit from.
The find_unit function can help you find units for a given quantity:
>>> import sympy.physics.units as u
>>> u.find_unit('coul')
['coulomb', 'coulombs']
>>> u.find_unit(u.charge)
['C', 'charge', 'coulomb', 'coulombs']
>>> u.coulomb
A*s
Units are always given in terms of base units that have a name and
an abbreviation:
>>> u.A.name
'ampere'
>>> u.ampere.abbrev
'A'
The generic name for a unit (like 'length', 'mass', etc...)
can help you find units:
>>> u.find_unit('magnet')
['magnetic_flux', 'magnetic_constant', 'magnetic_flux_density']
>>> u.find_unit(u.magnetic_flux)
['Wb', 'wb', 'weber', 'webers', 'magnetic_flux']
If, for a given session, you wish to add a unit you may do so:
>>> u.find_unit('gal')
[]
>>> u.gal = 4*u.quart
>>> u.gal/u.inch**3
231
To see a given quantity in terms of some other unit, divide by the desired
unit:
>>> mph = u.miles/u.hours
>>> (u.m/u.s/mph).n(2)
2.2
The units are defined in terms of base units, so when you divide similar
units you will obtain a pure number. This means, for example, that if you
divide a real-world mass (like grams) by the atomic mass unit (amu) you
will obtain Avogadro's number. To obtain the answer in moles you
should divide by the unit ``avogadro``:
>>> u.grams/u.amu
602214179000000000000000
>>> _/u.avogadro
mol
For chemical calculations the unit ``mmu`` (molar mass unit) has been
defined so this conversion is handled automatically. For example, the
number of moles in 1 kg of water might be calculated as:
>>> u.kg/(18*u.mmu).n(3)
55.5*mol
If you need the number of atoms in a mol as a pure number you can use
``avogadro_number`` but if you need it as a dimensional quantity you should use
``avogadro_constant``. (``avogadro`` is a shorthand for the dimensional
quantity.)
>>> u.avogadro_number
602214179000000000000000
>>> u.avogadro_constant
602214179000000000000000/mol
"""
from __future__ import print_function, division
from sympy import Rational, pi
from sympy.core import AtomicExpr
class Unit(AtomicExpr):
"""
Base class for base unit of physical units.
>>> from sympy.physics.units import Unit
>>> Unit("meter", "m")
m
Other units are derived from base units:
>>> import sympy.physics.units as u
>>> cm = u.m/100
>>> 100*u.cm
m
"""
is_positive = True # make sqrt(m**2) --> m
is_commutative = True
is_number = False
__slots__ = ["name", "abbrev"]
def __new__(cls, name, abbrev, **assumptions):
obj = AtomicExpr.__new__(cls, **assumptions)
assert isinstance(name, str), repr(type(name))
assert isinstance(abbrev, str), repr(type(abbrev))
obj.name = name
obj.abbrev = abbrev
return obj
def __getnewargs__(self):
return (self.name, self.abbrev)
def __eq__(self, other):
return isinstance(other, Unit) and self.name == other.name
def __hash__(self):
return super(Unit, self).__hash__()
def _hashable_content(self):
return (self.name, self.abbrev)
@property
def free_symbols(self):
return set()
# Dimensionless
percent = percents = Rational(1, 100)
permille = permille = Rational(1, 1000)
ten = Rational(10)
yotta = ten**24
zetta = ten**21
exa = ten**18
peta = ten**15
tera = ten**12
giga = ten**9
mega = ten**6
kilo = ten**3
deca = ten**1
deci = ten**-1
centi = ten**-2
milli = ten**-3
micro = ten**-6
nano = ten**-9
pico = ten**-12
femto = ten**-15
atto = ten**-18
zepto = ten**-21
yocto = ten**-24
rad = radian = radians = 1
deg = degree = degrees = pi/180
sr = steradian = steradians = 1
# Base units
length = m = meter = meters = Unit('meter', 'm')
mass = kg = kilogram = kilograms = Unit('kilogram', 'kg')
time = s = second = seconds = Unit('second', 's')
current = A = ampere = amperes = Unit('ampere', 'A')
temperature = K = kelvin = kelvins = Unit('kelvin', 'K')
amount = mol = mole = moles = Unit('mole', 'mol')
luminosity = cd = candela = candelas = Unit('candela', 'cd')
# Derived units
volume = meter**3
frequency = Hz = hz = hertz = 1/s
force = N = newton = newtons = m*kg/s**2
energy = J = joule = joules = N*m
power = W = watt = watts = J/s
pressure = Pa = pa = pascal = pascals = N/m**2
charge = C = coulomb = coulombs = s*A
voltage = v = V = volt = volts = W/A
resistance = ohm = ohms = V/A
conductance = S = siemens = mho = mhos = A/V
capacitance = F = farad = farads = C/V
magnetic_flux = Wb = wb = weber = webers = J/A
magnetic_flux_density = T = tesla = teslas = V*s/m**2
inductance = H = henry = henrys = V*s/A
speed = m/s
acceleration = m/s**2
density = kg/m**3
# Common length units
km = kilometer = kilometers = kilo*m
dm = decimeter = decimeters = deci*m
cm = centimeter = centimeters = centi*m
mm = millimeter = millimeters = milli*m
um = micrometer = micrometers = micron = microns = micro*m
nm = nanometer = nanometers = nano*m
pm = picometer = picometers = pico*m
ft = foot = feet = Rational('0.3048')*m
inch = inches = Rational('25.4')*mm
yd = yard = yards = 3*ft
mi = mile = miles = 5280*ft
# Common volume and area units
l = liter = liters = m**3 / 1000
dl = deciliter = deciliters = deci*l
cl = centiliter = centiliters = centi*l
ml = milliliter = milliliters = milli*l
# Common time units
ms = millisecond = milliseconds = milli*s
us = microsecond = microseconds = micro*s
ns = nanosecond = nanoseconds = nano*s
ps = picosecond = picoseconds = pico*s
minute = minutes = 60*s
h = hour = hours = 60*minute
day = days = 24*hour
sidereal_year = sidereal_years = Rational('31558149.540')*s
tropical_year = tropical_years = Rational('365.24219')*day
common_year = common_years = Rational('365')*day
julian_year = julian_years = Rational('365.25')*day
year = years = tropical_year
# Common mass units
g = gram = grams = kilogram / kilo
mg = milligram = milligrams = milli * g
ug = microgram = micrograms = micro * g
#----------------------------------------------------------------------------
# Physical constants
#
c = speed_of_light = 299792458 * m/s
G = gravitational_constant = Rational('6.67428') * ten**-11 * m**3 / kg / s**2
u0 = magnetic_constant = 4*pi * ten**-7 * N/A**2
e0 = electric_constant = 1/(u0 * c**2)
Z0 = vacuum_impedance = u0 * c
planck = Rational('6.62606896') * ten**-34 * J*s
hbar = planck / (2*pi)
avogadro_number = Rational('6.02214179') * 10**23
avogadro = avogadro_constant = avogadro_number / mol
boltzmann = Rational('1.3806505') * ten**-23 * J / K
gee = gees = Rational('9.80665') * m/s**2
atmosphere = atmospheres = atm = 101325 * pascal
kPa = kilo*Pa
bar = bars = 100*kPa
pound = pounds = 0.45359237 * kg * gee # exact
psi = pound / inch ** 2
dHg0 = 13.5951 # approx value at 0 C
mmHg = dHg0 * 9.80665 * Pa
amu = amus = gram / avogadro / mol
mmu = mmus = gram / mol
quart = quarts = Rational(231, 4) * inch**3
eV = 1.602176487e-19 * J
# Other convenient units and magnitudes
ly = lightyear = lightyears = c*julian_year
au = astronomical_unit = astronomical_units = 149597870691*m
def find_unit(quantity):
"""
Return a list of matching units names.
if quantity is a string -- units containing the string `quantity`
if quantity is a unit -- units having matching base units
Examples
========
>>> from sympy.physics import units as u
>>> u.find_unit('charge')
['charge']
>>> u.find_unit(u.charge)
['C', 'charge', 'coulomb', 'coulombs']
>>> u.find_unit('volt')
['volt', 'volts', 'voltage']
>>> u.find_unit(u.inch**3)[:5]
['l', 'cl', 'dl', 'ml', 'liter']
"""
import sympy.physics.units as u
rv = []
if isinstance(quantity, str):
rv = [i for i in dir(u) if quantity in i]
else:
units = quantity.as_coeff_Mul()[1]
for i in dir(u):
try:
if units == eval('u.' + i).as_coeff_Mul()[1]:
rv.append(str(i))
except:
pass
return sorted(rv, key=len)
# Delete this so it doesn't pollute the namespace
del Rational, pi
|
wdv4758h/ZipPy
|
edu.uci.python.benchmark/src/benchmarks/sympy/sympy/physics/units.py
|
Python
|
bsd-3-clause
| 8,212
|
[
"Avogadro"
] |
a64aaedddf8cf06d62899a930b78170550c81da7306a8e154e6e4c27b35de0ac
|
"""
@name: Modules/Core/__init__.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2014-2020 by D. Brian Kimmel
@note: Created on Mar 1, 2014
@license: MIT License
Core is the main portion of every PyHouse node.
It is always present.
"""
__updated__ = '2020-02-13'
__version_info__ = (20, 2, 10)
__version__ = '.'.join(map(str, __version_info__))
class ConfigInformation:
""" A collection of Yaml data used for Configuration
==> PyHouse._Config.xxx
"""
def __init__(self):
self.YamlFileName = None
# self.YamlTree = {} # ConfigFileInformation()
class ConfigFileInformation:
""" ==? pyhouse_obj._Config {}
Used to record where each confile is located so it can be updated.
"""
def __init__(self) -> None:
self.Name: Union[str, None] = None # LowerCase filemane without .yaml
self.Path: Union[str, None] = None # Full path to file
class AccessInformation:
"""
"""
def __init__(self):
"""
"""
self.Name = None # Username
self.Password = None
self.ApiKey = None
self.AccessKey = None
class HostInformation:
""" Used for all host related information
This is usually not completely filled in.
Twisted kinda likes hostnames instead of IP addresses.
"""
def __init__(self):
self.Name = None
self.Port = None
self.IPv4 = None
self.IPv6 = None
class RoomLocationInformation:
"""
"""
def __init__(self):
self.Name = None
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Core/Config/__init__.py
|
Python
|
mit
| 1,588
|
[
"Brian"
] |
590689b30acad114bc423c38fe7d302d418479dfadf352871a4946e5fbadb4ac
|
'''
Determine optimum aperture and use Source Extractor to get photometry
'''
import sys
import os
from subprocess import call, Popen, PIPE
import glob
import math
import numpy as np
import Sources
import Quadtree
import createSexConfig
import createSexParam
import findBestAperture
import calcZeropoint
import makeRegionFile
import phot_utils
import geom_utils
verbose=True
def associate(list1, tree2, tree3):
dist = 0.001
matches = []
for entry in list1:
match2 = tree2.match(entry.ra, entry.dec)
if match2 != None and geom_utils.equnorm(entry.ra, entry.dec, match.ra, match.dec) <= dist:
match3 = tree3.match(entry.ra, entry.dec)
if match3 != None and geom_utils.equnorm(entry.ra, entry.dec, match3.ra, match3.dec) <= dist:
# Match2 is r-magnitudes
entry.match2 = match2.mag_aper
# Match3 is i-magnitudes
entry.match3 = match3.mag_aper
matches.append(entry)
return matches
def get_photometry(system, in_images):
subs= []
imgs = []
with(open(in_images, "r")) as f:
for line in f:
cols = line.split()
subs.append(cols[0])
imgs.append(cols[1])
filter_file = "default.conv"
param_file = createSexParam.createSexParam(system, False)
path = '/Users/alexawork/Documents/GlobularClusters/Data/NGC4621'
for galsub, img in zip(subs, imgs):
image = phot_utils.load_fits(img, verbose=False)
path = os.getcwd()
fname = system + '_' + img[-6]
seeing = [1, 1]
satur = image[0].header['SATURATE']
#ap = findBestAperture.findBestAperture(path, img, satur, seeing[0])
ap = 5
# Extract sources with initial rough estimate of seeing
config = createSexConfig.createSexConfig(fname, filter_file,
param_file, satur, seeing[0], "nill", ap, False)
call(['sex', '-c', config, galsub, img])
seeing = phot_utils.calc_seeing(fname + '.cat', verbose=verbose)
"If the aperture is less than the seeing round it up to next interger"
if ap < seeing[1]:
ap = math.ceil(seeing[1])
# Re-extract with refined seeing
config = createSexConfig.createSexConfig(fname, filter_file,
param_file, satur, seeing[0], "nill", ap, False)
call(['sex', '-c', config, galsub, img])
# Re-name the check images created
checks = (glob.glob('*.fits'))
if not os.path.isdir('CheckImages'):
os.mkdir('CheckImages')
for check in checks:
os.rename(check, fname + '_' + check)
call(['mv', fname + '_' + check, 'CheckImages'])
def correct_mags(galaxy, catalog, band):
print "band: ", band
zp = calcZeropoint.calcZP(galaxy, catalog, band)
if verbose:
print "Zeropoint for " + band + "-band", zp
with open(catalog, 'r') as f:
tmp = filter(lambda line: phot_utils.no_head(line), f)
sources = map(lambda line: Sources.SCAMSource(line), tmp)
for source in sources:
source.mag_aper = round(source.mag_aper + zp, 3)
source.mag_auto = round(source.mag_auto + zp, 3)
source.mag_best = round(source.mag_best + zp, 3)
new_catalog = 'zpcorrected_' + catalog
with open(new_catalog, 'w') as output:
output.write(''.join(map(lambda source: '%5s' % source.name + '%15s' % source.flux_iso +
'%15s' % source.fluxerr_iso + '%15s' % source.flux_aper +
'%15s' % source.fluxerr_aper + '%15s' % source.ximg + '%15s' % source.yimg +
'%15s' % source.ra + '%15s' % source.dec + '%15s' % source.mag_auto +
'%15s' % source.mag_auto_err + '%15s' % source.mag_best +
'%15s' % source.mag_best_err + '%15s' % source.mag_aper +
'%15s' % source.mag_aper_err + '%15s' % source.a_world +
'%15s' % source.a_world_err + '%15s' % source.b_world +
'%15s' % source.b_world_err + '%15s' % source.theta_err +
'%15s' % source.theta + '%15s' % source.isoarea + '%15s' % source.mu +
'%15s' % source.flux_radius + '%15s' % source.flags + '%15s' % source.fwhm +
'%15s' % source.elogation + '%15s' % source.vignet + '\n', sources)))
return new_catalog
def make_trees(catalog):
with open(catalog, 'r') as f:
tmp = filter(lambda line: phot_utils.no_head(line), f)
tmp2 = map(lambda line: Sources.SCAMSource(line), tmp)
ra = map(lambda line: line.ra, tmp2)
dec = map(lambda line: line.dec, tmp2)
sources = Quadtree.ScamEquatorialQuadtree(min(ra), min(dec),
max(ra), max(dec))
map(lambda line: sources.insert(line), tmp2)
#if verbose:
# makeRegionFile.makeRegionFile('NGC4621_i.cat', 'NGC4621_i.reg', 10, 'blue')
return sources
def main():
# get_photometry(sys.argv[1], sys.argv[2])
# catalogs = (glob.glob('NGC4621*.cat'))
# for catalog in catalogs:
# if verbose:
# print "Working on catalog: ", catalog
# corrected_catalog = correct_mags(sys.argv[1], catalog, catalog[-5])
catalogs = (glob.glob('zpcorrected*.cat'))
trees = {}
for catalog in catalogs:
trees[catalog[-5]] = make_trees(catalog)
m59_ucd3_i = trees['i'].match(190.54601, 11.64478)
m59_ucd3_g = trees['g'].match(190.54601, 11.64478)
m59_ucd3_r = trees['r'].match(190.54601, 11.64478)
print '\n'
print "M59-UCD3's Location in catalog: ", m59_ucd3_i.name
print 'MAG_AUTO: '
print "I Mag and G Mag: ", m59_ucd3_i.mag_auto, m59_ucd3_g.mag_auto
print 'M59-UCD3 g-i: ', m59_ucd3_g.mag_auto - m59_ucd3_i.mag_auto
print 'MAG_APER: '
print "I Mag and G Mag: ", m59_ucd3_i.mag_aper, m59_ucd3_g.mag_aper
print 'M59-UCD3 g-i: ', m59_ucd3_g.mag_aper - m59_ucd3_i.mag_aper
print 'M59-UCD3 FWHM: ', m59_ucd3_g.fwhm*0.2
print 'M59_UCD3 Half-Light Radius: ', m59_ucd3_g.flux_radius
print "Coordinates from i-band catalog - "
print phot_utils.convertRA(m59_ucd3_i.ra), phot_utils.convertDEC(m59_ucd3_i.dec)
print "Coordinates from g-band catalog - "
print phot_utils.convertRA(m59_ucd3_g.ra), phot_utils.convertDEC(m59_ucd3_g.dec)
print '\n'
print '\n'
m59cO_i = trees['i'].match(190.48056, 11.66771)
m59cO_g = trees['g'].match(190.48056, 11.66771)
m59cO_r = trees['r'].match(190.48056, 11.66771)
print "M59cO's Location in catalog: ", m59cO_i.name
print "MAG_AUTO: "
print "I Mag and G Mag: ", m59cO_i.mag_auto, m59cO_g.mag_auto
print 'M59cO g-i: ', m59cO_g.mag_auto - m59cO_i.mag_auto
print "MAG_APER: "
print "I Mag and G Mag: ", m59cO_i.mag_aper, m59cO_g.mag_aper
print 'M59cO g-i: ', m59cO_g.mag_aper - m59cO_i.mag_aper
print 'M59cO Half-Light Radius: ', m59cO_g.flux_radius
print "Coordinates from i-band catalog - "
print phot_utils.convertRA(m59cO_i.ra), phot_utils.convertDEC(m59cO_i.dec)
print "Coordinates from g-band catalog - "
print phot_utils.convertRA(m59cO_g.ra), phot_utils.convertDEC(m59cO_g.dec)
print '\n'
print '\n'
m59_gcx_i = trees['i'].match(190.50245, 11.65993)
m59_gcx_g = trees['g'].match(190.50245, 11.65993)
m59_gcx_r = trees['r'].match(190.50245, 11.65993)
print "M59_gcx's Location in catalog: ", m59cO_i.name
print "MAG_AUTO: "
print "I Mag and G Mag: ", m59cO_i.mag_auto, m59cO_g.mag_auto
print 'M59_gcx g-i: ', m59cO_g.mag_auto - m59cO_i.mag_auto
print "MAG_APER: "
print "I Mag and G Mag: ", m59cO_i.mag_aper, m59cO_g.mag_aper
print 'M59_gcx g-i: ', m59cO_g.mag_aper - m59cO_i.mag_aper
print 'M59_gcx Half-Light Radius: ', m59cO_g.flux_radius
print "Coordinates from i-band catalog - "
print phot_utils.convertRA(m59cO_i.ra), phot_utils.convertDEC(m59cO_i.dec)
print "Coordinates from g-band catalog - "
print phot_utils.convertRA(m59cO_g.ra), phot_utils.convertDEC(m59cO_g.dec)
print '\n'
print '\n'
m59_gcy_i = trees['i'].match(190.51231, 11.63986)
m59_gcy_g = trees['g'].match(190.51231, 11.63986)
m59_gcy_r = trees['r'].match(190.51231, 11.63986)
print "M59_gcy's Location in catalog: ", m59cO_i.name
print "MAG_AUTO: "
print "I Mag and G Mag: ", m59cO_i.mag_auto, m59cO_g.mag_auto
print 'M59_gcy g-i: ', m59cO_g.mag_auto - m59cO_i.mag_auto
print "MAG_APER: "
print "I Mag and G Mag: ", m59cO_i.mag_aper, m59cO_g.mag_aper
print 'M59_gcy g-i: ', m59cO_g.mag_aper - m59cO_i.mag_aper
print 'M59_gcy Half-Light Radius: ', m59cO_g.flux_radius
print "Coordinates from i-band catalog - "
print phot_utils.convertRA(m59cO_i.ra), phot_utils.convertDEC(m59cO_i.dec)
print "Coordinates from g-band catalog - "
print phot_utils.convertRA(m59cO_g.ra), phot_utils.convertDEC(m59cO_g.dec)
print '\n'
print '\n'
ngc_4621_aimss1_i = trees['i'].match(190.47050, 11.63001)
ngc_4621_aimss1_g = trees['g'].match(190.47050, 11.63001)
ngc_4621_aimss1_r = trees['r'].match(190.47050, 11.63001)
print "ngc_4621_aimss's Location in catalog: ", m59cO_i.name
print "MAG_AUTO: "
print "I Mag and G Mag: ", m59cO_i.mag_auto, m59cO_g.mag_auto
print 'ngc_4621_aimss g-i: ', m59cO_g.mag_auto - m59cO_i.mag_auto
print "MAG_APER: "
print "I Mag and G Mag: ", m59cO_i.mag_aper, m59cO_g.mag_aper
print 'ngc_4621_aimss g-i: ', m59cO_g.mag_aper - m59cO_i.mag_aper
print 'ngc_4621_aimss Half-Light Radius: ', m59cO_g.flux_radius
print "Coordinates from i-band catalog - "
print phot_utils.convertRA(m59cO_i.ra), phot_utils.convertDEC(m59cO_i.dec)
print "Coordinates from g-band catalog - "
print phot_utils.convertRA(m59cO_g.ra), phot_utils.convertDEC(m59cO_g.dec)
print '\n'
print '\n'
# with open('NGC4621_g.cat', 'r') as catalog:
# tmp = filter(lambda line: phot_utils.no_head(line), catalog)
# g_sources = map(lambda source: Sources.SCAMSource(source), tmp)
#
# r_sources = make_trees('NGC4621_r.cat')
# i_sources = make_trees('NGC4621_i.cat')
#
# matches = associate(g_sources, r_sources, i_sources)
#
# with open('matched_gri.cat', 'w') as out:
# out.write()
if __name__ == '__main__':
sys.exit(main())
|
SAGES-UCSC/Photometry
|
Examples/UCD_Photometry.py
|
Python
|
mit
| 10,373
|
[
"Galaxy"
] |
346e295a0e8e7463276522bd711208d66cbeda313d8d8eb43c85748e00a1f900
|
import director.applogic as app
from director import lcmUtils
from director import transformUtils
from director import visualization as vis
from director import filterUtils
from director import drcargs
from director.shallowCopy import shallowCopy
from director.timercallback import TimerCallback
from director import vtkNumpy
from director import objectmodel as om
import director.vtkAll as vtk
from director.debugVis import DebugData
import PythonQt
from PythonQt import QtCore, QtGui
import bot_core as lcmbotcore
import numpy as np
from director.simpletimer import SimpleTimer
from director import ioUtils
import sys
import drc as lcmdrc
import multisense as lcmmultisense
from director.consoleapp import ConsoleApp
class KinectItem(om.ObjectModelItem):
def __init__(self, model):
om.ObjectModelItem.__init__(self, 'Kinect', om.Icons.Eye)
self.model = model
self.scalarBarWidget = None
self.addProperty('Color By', 1,
attributes=om.PropertyAttributes(enumNames=['Solid Color', 'rgb_colors']))
self.addProperty('Updates Enabled', True)
self.addProperty('Framerate', model.targetFps,
attributes=om.PropertyAttributes(decimals=0, minimum=1.0, maximum=30.0, singleStep=1, hidden=False))
self.addProperty('Visible', model.visible)
#self.addProperty('Point Size', model.pointSize,
# attributes=om.PropertyAttributes(decimals=0, minimum=1, maximum=20, singleStep=1, hidden=False))
#self.addProperty('Alpha', model.alpha,
# attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=1.0, singleStep=0.1, hidden=False))
#self.addProperty('Color', QtGui.QColor(255,255,255))
def _onPropertyChanged(self, propertySet, propertyName):
om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Updates Enabled':
if self.getProperty('Updates Enabled'):
self.model.start()
else:
self.model.stop()
#elif propertyName == 'Alpha':
# self.model.setAlpha(self.getProperty(propertyName))
elif propertyName == 'Visible':
self.model.setVisible(self.getProperty(propertyName))
#elif propertyName == 'Point Size':
# self.model.setPointSize(self.getProperty(propertyName))
elif propertyName == 'Framerate':
self.model.setFPS(self.getProperty('Framerate'))
elif propertyName == 'Color By':
self._updateColorBy()
self.model.polyDataObj._renderAllViews()
def _updateColorBy(self):
arrayMap = {
0 : 'Solid Color',
1 : 'rgb_colors'
}
colorBy = self.getProperty('Color By')
arrayName = arrayMap.get(colorBy)
self.model.polyDataObj.setProperty('Color By', arrayName)
class KinectSource(TimerCallback):
def __init__(self, view, _KinectQueue):
self.view = view
self.KinectQueue = _KinectQueue
self.visible = True
self.p = vtk.vtkPolyData()
utime = KinectQueue.getPointCloudFromKinect(self.p)
self.polyDataObj = vis.PolyDataItem('kinect source', shallowCopy(self.p), view)
self.polyDataObj.actor.SetPickable(1)
self.polyDataObj.initialized = False
om.addToObjectModel(self.polyDataObj)
self.queue = PythonQt.dd.ddBotImageQueue(lcmUtils.getGlobalLCMThread())
self.queue.init(lcmUtils.getGlobalLCMThread(), drcargs.args().config_file)
self.targetFps = 30
self.timerCallback = TimerCallback(targetFps=self.targetFps)
self.timerCallback.callback = self._updateSource
#self.timerCallback.start()
def start(self):
self.timerCallback.start()
def stop(self):
self.timerCallback.stop()
def setFPS(self, framerate):
self.targetFps = framerate
self.timerCallback.stop()
self.timerCallback.targetFps = framerate
self.timerCallback.start()
def setVisible(self, visible):
self.polyDataObj.setProperty('Visible', visible)
def _updateSource(self):
p = vtk.vtkPolyData()
utime = self.KinectQueue.getPointCloudFromKinect(p)
if not p.GetNumberOfPoints():
return
cameraToLocalFused = vtk.vtkTransform()
self.queue.getTransform('KINECT_RGB', 'local', utime, cameraToLocalFused)
p = filterUtils.transformPolyData(p,cameraToLocalFused)
self.polyDataObj.setPolyData(p)
if not self.polyDataObj.initialized:
self.polyDataObj.setProperty('Color By', 'rgb_colors')
self.polyDataObj.initialized = True
def init(view):
global KinectQueue, _kinectItem, _kinectSource
KinectQueue = PythonQt.dd.ddKinectLCM(lcmUtils.getGlobalLCMThread())
KinectQueue.init(lcmUtils.getGlobalLCMThread(), drcargs.args().config_file)
_kinectSource = KinectSource(view, KinectQueue)
_kinectSource.start()
sensorsFolder = om.getOrCreateContainer('sensors')
_kinectItem = KinectItem(_kinectSource)
om.addToObjectModel(_kinectItem, sensorsFolder)
# Hasn't been used - currently deactivated
#def renderLastKinectPointCloud():
# # view = view or app.getCurrentRenderView()
# # if view is None:
# # return
# p = vtk.vtkPolyData()
# print("will grab the last point cloud in python \n")
# KinectQueue.getPointCloudFromKinect(p)
# print("grabbed the last point cloud in python, will #render now \n")
# obj = vis.showPolyData (p, 'kinect cloud')
# print("director rendered last point cloud \n")
def startButton():
view = app.getCurrentRenderView()
init(view)
_kinectSource.start()
app.addToolbarMacro('start live kinect', startButton)
|
RobotLocomotion/director
|
src/python/director/kinectlcm.py
|
Python
|
bsd-3-clause
| 5,859
|
[
"VTK"
] |
60b71b5c917d834d5d8db71a7272566a67ee43f69c9f04acffa0a496d728086d
|
"""This file contains code for use with "Think Bayes",
by Allen B. Downey, available from greenteapress.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import math
import numpy
import random
import sys
import correlation
import thinkplot
import matplotlib.pyplot as pyplot
import thinkbayes
INTERVAL = 245/365.0
FORMATS = ['pdf', 'eps']
MINSIZE = 0.2
MAXSIZE = 20
BUCKET_FACTOR = 10
def log2(x, denom=math.log(2)):
"""Computes log base 2."""
return math.log(x) / denom
def SimpleModel():
"""Runs calculations based on a simple model."""
# time between discharge and diagnosis, in days
interval = 3291.0
# doubling time in linear measure is doubling time in volume * 3
dt = 811.0 * 3
# number of doublings since discharge
doublings = interval / dt
# how big was the tumor at time of discharge (diameter in cm)
d1 = 15.5
d0 = d1 / 2.0 ** doublings
print 'interval (days)', interval
print 'interval (years)', interval / 365
print 'dt', dt
print 'doublings', doublings
print 'd1', d1
print 'd0', d0
# assume an initial linear measure of 0.1 cm
d0 = 0.1
d1 = 15.5
# how many doublings would it take to get from d0 to d1
doublings = log2(d1 / d0)
# what linear doubling time does that imply?
dt = interval / doublings
print 'doublings', doublings
print 'dt', dt
# compute the volumetric doubling time and RDT
vdt = dt / 3
rdt = 365 / vdt
print 'vdt', vdt
print 'rdt', rdt
cdf = MakeCdf()
p = cdf.Prob(rdt)
print 'Prob{RDT > 2.4}', 1-p
def MakeCdf():
"""Uses the data from Zhang et al. to construct a CDF."""
n = 53.0
freqs = [0, 2, 31, 42, 48, 51, 52, 53]
ps = [freq/n for freq in freqs]
xs = numpy.arange(-1.5, 6.5, 1.0)
cdf = thinkbayes.Cdf(xs, ps)
return cdf
def PlotCdf(cdf):
"""Plots the actual and fitted distributions.
cdf: CDF object
"""
xs, ps = cdf.xs, cdf.ps
cps = [1-p for p in ps]
# CCDF on logy scale: shows exponential behavior
thinkplot.Clf()
thinkplot.Plot(xs, cps, 'bo-')
thinkplot.Save(root='kidney1',
formats=FORMATS,
xlabel='RDT',
ylabel='CCDF (log scale)',
yscale='log')
# CDF, model and data
thinkplot.Clf()
thinkplot.PrePlot(num=2)
mxs, mys = ModelCdf()
thinkplot.Plot(mxs, mys, label='model', linestyle='dashed')
thinkplot.Plot(xs, ps, 'gs', label='data')
thinkplot.Save(root='kidney2',
formats=FORMATS,
xlabel='RDT (volume doublings per year)',
ylabel='CDF',
title='Distribution of RDT',
axis=[-2, 7, 0, 1],
loc=4)
def QQPlot(cdf, fit):
"""Makes a QQPlot of the values from actual and fitted distributions.
cdf: actual Cdf of RDT
fit: model
"""
xs = [-1.5, 5.5]
thinkplot.Clf()
thinkplot.Plot(xs, xs, 'b-')
xs, ps = cdf.xs, cdf.ps
fs = [fit.Value(p) for p in ps]
thinkplot.Plot(xs, fs, 'gs')
thinkplot.Save(root = 'kidney3',
formats=FORMATS,
xlabel='Actual',
ylabel='Model')
def FitCdf(cdf):
"""Fits a line to the log CCDF and returns the slope.
cdf: Cdf of RDT
"""
xs, ps = cdf.xs, cdf.ps
cps = [1-p for p in ps]
xs = xs[1:-1]
lcps = [math.log(p) for p in cps[1:-1]]
_inter, slope = correlation.LeastSquares(xs, lcps)
return -slope
def CorrelatedGenerator(cdf, rho):
"""Generates a sequence of values from cdf with correlation.
Generates a correlated standard Gaussian series, then transforms to
values from cdf
cdf: distribution to choose from
rho: target coefficient of correlation
"""
def Transform(x):
"""Maps from a Gaussian variate to a variate with the given CDF."""
p = thinkbayes.GaussianCdf(x)
y = cdf.Value(p)
return y
# for the first value, choose from a Gaussian and transform it
x = random.gauss(0, 1)
yield Transform(x)
# for subsequent values, choose from the conditional distribution
# based on the previous value
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield Transform(x)
def UncorrelatedGenerator(cdf, _rho=None):
"""Generates a sequence of values from cdf with no correlation.
Ignores rho, which is accepted as a parameter to provide the
same interface as CorrelatedGenerator
cdf: distribution to choose from
rho: ignored
"""
while True:
x = cdf.Random()
yield x
def RdtGenerator(cdf, rho):
"""Returns an iterator with n values from cdf and the given correlation.
cdf: Cdf object
rho: coefficient of correlation
"""
if rho == 0.0:
return UncorrelatedGenerator(cdf)
else:
return CorrelatedGenerator(cdf, rho)
def GenerateRdt(pc, lam1, lam2):
"""Generate an RDT from a mixture of exponential distributions.
With prob pc, generate a negative value with param lam2;
otherwise generate a positive value with param lam1.
"""
if random.random() < pc:
return -random.expovariate(lam2)
else:
return random.expovariate(lam1)
def GenerateSample(n, pc, lam1, lam2):
"""Generates a sample of RDTs.
n: sample size
pc: probablity of negative growth
lam1: exponential parameter of positive growth
lam2: exponential parameter of negative growth
Returns: list of random variates
"""
xs = [GenerateRdt(pc, lam1, lam2) for _ in xrange(n)]
return xs
def GenerateCdf(n=1000, pc=0.35, lam1=0.79, lam2=5.0):
"""Generates a sample of RDTs and returns its CDF.
n: sample size
pc: probablity of negative growth
lam1: exponential parameter of positive growth
lam2: exponential parameter of negative growth
Returns: Cdf of generated sample
"""
xs = GenerateSample(n, pc, lam1, lam2)
cdf = thinkbayes.MakeCdfFromList(xs)
return cdf
def ModelCdf(pc=0.35, lam1=0.79, lam2=5.0):
"""
pc: probablity of negative growth
lam1: exponential parameter of positive growth
lam2: exponential parameter of negative growth
Returns: list of xs, list of ys
"""
cdf = thinkbayes.EvalExponentialCdf
x1 = numpy.arange(-2, 0, 0.1)
y1 = [pc * (1 - cdf(-x, lam2)) for x in x1]
x2 = numpy.arange(0, 7, 0.1)
y2 = [pc + (1-pc) * cdf(x, lam1) for x in x2]
return list(x1) + list(x2), y1+y2
def BucketToCm(y, factor=BUCKET_FACTOR):
"""Computes the linear dimension for a given bucket.
t: bucket number
factor: multiplicitive factor from one bucket to the next
Returns: linear dimension in cm
"""
return math.exp(y / factor)
def CmToBucket(x, factor=BUCKET_FACTOR):
"""Computes the bucket for a given linear dimension.
x: linear dimension in cm
factor: multiplicitive factor from one bucket to the next
Returns: float bucket number
"""
return round(factor * math.log(x))
def Diameter(volume, factor=3/math.pi/4, exp=1/3.0):
"""Converts a volume to a diameter.
d = 2r = 2 * (3/4/pi V)^1/3
"""
return 2 * (factor * volume) ** exp
def Volume(diameter, factor=4*math.pi/3):
"""Converts a diameter to a volume.
V = 4/3 pi (d/2)^3
"""
return factor * (diameter/2.0)**3
class Cache(object):
"""Records each observation point for each tumor."""
def __init__(self):
"""Initializes the cache.
joint: map from (age, bucket) to frequency
sequences: map from bucket to a list of sequences
initial_rdt: sequence of (V0, rdt) pairs
"""
self.joint = thinkbayes.Joint()
self.sequences = {}
self.initial_rdt = []
def GetBuckets(self):
"""Returns an iterator for the keys in the cache."""
return self.sequences.iterkeys()
def GetSequence(self, bucket):
"""Looks up a bucket in the cache."""
return self.sequences[bucket]
def ConditionalCdf(self, bucket, name=''):
"""Forms the cdf of ages for a given bucket.
bucket: int bucket number
name: string
"""
pmf = self.joint.Conditional(0, 1, bucket, name=name)
cdf = pmf.MakeCdf()
return cdf
def ProbOlder(self, cm, age):
"""Computes the probability of exceeding age, given size.
cm: size in cm
age: age in years
"""
bucket = CmToBucket(cm)
cdf = self.ConditionalCdf(bucket)
p = cdf.Prob(age)
return 1-p
def GetDistAgeSize(self, size_thresh=MAXSIZE):
"""Gets the joint distribution of age and size.
Map from (age, log size in cm) to log freq
Returns: new Pmf object
"""
joint = thinkbayes.Joint()
for val, freq in self.joint.Items():
age, bucket = val
cm = BucketToCm(bucket)
if cm > size_thresh:
continue
log_cm = math.log10(cm)
joint.Set((age, log_cm), math.log(freq) * 10)
return joint
def Add(self, age, seq, rdt):
"""Adds this observation point to the cache.
age: age of the tumor in years
seq: sequence of volumes
rdt: RDT during this interval
"""
final = seq[-1]
cm = Diameter(final)
bucket = CmToBucket(cm)
self.joint.Incr((age, bucket))
self.sequences.setdefault(bucket, []).append(seq)
initial = seq[-2]
self.initial_rdt.append((initial, rdt))
def Print(self):
"""Prints the size (cm) for each bucket, and the number of sequences."""
for bucket in sorted(self.GetBuckets()):
ss = self.GetSequence(bucket)
diameter = BucketToCm(bucket)
print diameter, len(ss)
def Correlation(self):
"""Computes the correlation between log volumes and rdts."""
vs, rdts = zip(*self.initial_rdt)
lvs = [math.log(v) for v in vs]
return correlation.Corr(lvs, rdts)
class Calculator(object):
"""Encapsulates the state of the computation."""
def __init__(self):
"""Initializes the cache."""
self.cache = Cache()
def MakeSequences(self, n, rho, cdf):
"""Returns a list of sequences of volumes.
n: number of sequences to make
rho: serial correlation
cdf: Cdf of rdts
Returns: list of n sequences of volumes
"""
sequences = []
for i in range(n):
rdt_seq = RdtGenerator(cdf, rho)
seq = self.MakeSequence(rdt_seq)
sequences.append(seq)
if i % 100 == 0:
print i
return sequences
def MakeSequence(self, rdt_seq, v0=0.01, interval=INTERVAL,
vmax=Volume(MAXSIZE)):
"""Simulate the growth of a tumor.
rdt_seq: sequence of rdts
v0: initial volume in mL (cm^3)
interval: timestep in years
vmax: volume to stop at
Returns: sequence of volumes
"""
seq = v0,
age = 0
for rdt in rdt_seq:
age += interval
final, seq = self.ExtendSequence(age, seq, rdt, interval)
if final > vmax:
break
return seq
def ExtendSequence(self, age, seq, rdt, interval):
"""Generates a new random value and adds it to the end of seq.
Side-effect: adds sub-sequences to the cache.
age: age of tumor at the end of this interval
seq: sequence of values so far
rdt: reciprocal doubling time in doublings per year
interval: timestep in years
Returns: final volume, extended sequence
"""
initial = seq[-1]
doublings = rdt * interval
final = initial * 2**doublings
new_seq = seq + (final,)
self.cache.Add(age, new_seq, rdt)
return final, new_seq
def PlotBucket(self, bucket, color='blue'):
"""Plots the set of sequences for the given bucket.
bucket: int bucket number
color: string
"""
sequences = self.cache.GetSequence(bucket)
for seq in sequences:
n = len(seq)
age = n * INTERVAL
ts = numpy.linspace(-age, 0, n)
PlotSequence(ts, seq, color)
def PlotBuckets(self):
"""Plots the set of sequences that ended in a given bucket."""
# 2.01, 4.95 cm, 9.97 cm
buckets = [7.0, 16.0, 23.0]
buckets = [23.0]
colors = ['blue', 'green', 'red', 'cyan']
thinkplot.Clf()
for bucket, color in zip(buckets, colors):
self.PlotBucket(bucket, color)
thinkplot.Save(root='kidney5',
formats=FORMATS,
title='History of simulated tumors',
axis=[-40, 1, MINSIZE, 12],
xlabel='years',
ylabel='diameter (cm, log scale)',
yscale='log')
def PlotJointDist(self):
"""Makes a pcolor plot of the age-size joint distribution."""
thinkplot.Clf()
joint = self.cache.GetDistAgeSize()
thinkplot.Contour(joint, contour=False, pcolor=True)
thinkplot.Save(root='kidney8',
formats=FORMATS,
axis=[0, 41, -0.7, 1.31],
yticks=MakeLogTicks([0.2, 0.5, 1, 2, 5, 10, 20]),
xlabel='ages',
ylabel='diameter (cm, log scale)')
def PlotConditionalCdfs(self):
"""Plots the cdf of ages for each bucket."""
buckets = [7.0, 16.0, 23.0, 27.0]
# 2.01, 4.95 cm, 9.97 cm, 14.879 cm
names = ['2 cm', '5 cm', '10 cm', '15 cm']
cdfs = []
for bucket, name in zip(buckets, names):
cdf = self.cache.ConditionalCdf(bucket, name)
cdfs.append(cdf)
thinkplot.Clf()
thinkplot.PrePlot(num=len(cdfs))
thinkplot.Cdfs(cdfs)
thinkplot.Save(root='kidney6',
title='Distribution of age for several diameters',
formats=FORMATS,
xlabel='tumor age (years)',
ylabel='CDF',
loc=4)
def PlotCredibleIntervals(self, xscale='linear'):
"""Plots the confidence interval for each bucket."""
xs = []
ts = []
percentiles = [95, 75, 50, 25, 5]
min_size = 0.3
# loop through the buckets, accumulate
# xs: sequence of sizes in cm
# ts: sequence of percentile tuples
for _, bucket in enumerate(sorted(self.cache.GetBuckets())):
cm = BucketToCm(bucket)
if cm < min_size or cm > 20.0:
continue
xs.append(cm)
cdf = self.cache.ConditionalCdf(bucket)
ps = [cdf.Percentile(p) for p in percentiles]
ts.append(ps)
# dump the results into a table
fp = open('kidney_table.tex', 'w')
PrintTable(fp, xs, ts)
fp.close()
# make the figure
linewidths = [1, 2, 3, 2, 1]
alphas = [0.3, 0.5, 1, 0.5, 0.3]
labels = ['95th', '75th', '50th', '25th', '5th']
# transpose the ts so we have sequences for each percentile rank
thinkplot.Clf()
yys = zip(*ts)
for ys, linewidth, alpha, label in zip(yys, linewidths, alphas, labels):
options = dict(color='blue', linewidth=linewidth,
alpha=alpha, label=label, markersize=2)
# plot the data points
thinkplot.Plot(xs, ys, 'bo', **options)
# plot the fit lines
fxs = [min_size, 20.0]
fys = FitLine(xs, ys, fxs)
thinkplot.Plot(fxs, fys, **options)
# put a label at the end of each line
x, y = fxs[-1], fys[-1]
pyplot.text(x*1.05, y, label, color='blue',
horizontalalignment='left',
verticalalignment='center')
# make the figure
thinkplot.Save(root='kidney7',
formats=FORMATS,
title='Credible interval for age vs diameter',
xlabel='diameter (cm, log scale)',
ylabel='tumor age (years)',
xscale=xscale,
xticks=MakeTicks([0.5, 1, 2, 5, 10, 20]),
axis=[0.25, 35, 0, 45],
legend=False,
)
def PlotSequences(sequences):
"""Plots linear measurement vs time.
sequences: list of sequences of volumes
"""
thinkplot.Clf()
options = dict(color='gray', linewidth=1, linestyle='dashed')
thinkplot.Plot([0, 40], [10, 10], **options)
for seq in sequences:
n = len(seq)
age = n * INTERVAL
ts = numpy.linspace(0, age, n)
PlotSequence(ts, seq)
thinkplot.Save(root='kidney4',
formats=FORMATS,
axis=[0, 40, MINSIZE, 20],
title='Simulations of tumor growth',
xlabel='tumor age (years)',
yticks=MakeTicks([0.2, 0.5, 1, 2, 5, 10, 20]),
ylabel='diameter (cm, log scale)',
yscale='log')
def PlotSequence(ts, seq, color='blue'):
"""Plots a time series of linear measurements.
ts: sequence of times in years
seq: sequence of columes
color: color string
"""
options = dict(color=color, linewidth=1, alpha=0.2)
xs = [Diameter(v) for v in seq]
thinkplot.Plot(ts, xs, **options)
def PrintCI(fp, cm, ps):
"""Writes a line in the LaTeX table.
fp: file pointer
cm: diameter in cm
ts: tuples of percentiles
"""
fp.write('%0.1f' % round(cm, 1))
for p in reversed(ps):
fp.write(' & %0.1f ' % round(p, 1))
fp.write(r'\\' '\n')
def PrintTable(fp, xs, ts):
"""Writes the data in a LaTeX table.
fp: file pointer
xs: diameters in cm
ts: sequence of tuples of percentiles
"""
fp.write(r'\begin{tabular}{|r||r|r|r|r|r|}' '\n')
fp.write(r'\hline' '\n')
fp.write(r'Diameter & \multicolumn{5}{c|}{Percentiles of age} \\' '\n')
fp.write(r'(cm) & 5th & 25th & 50th & 75th & 95th \\' '\n')
fp.write(r'\hline' '\n')
for i, (cm, ps) in enumerate(zip(xs, ts)):
#print cm, ps
if i % 3 == 0:
PrintCI(fp, cm, ps)
fp.write(r'\hline' '\n')
fp.write(r'\end{tabular}' '\n')
def FitLine(xs, ys, fxs):
"""Fits a line to the xs and ys, and returns fitted values for fxs.
Applies a log transform to the xs.
xs: diameter in cm
ys: age in years
fxs: diameter in cm
"""
lxs = [math.log(x) for x in xs]
inter, slope = correlation.LeastSquares(lxs, ys)
# res = correlation.Residuals(lxs, ys, inter, slope)
# r2 = correlation.CoefDetermination(ys, res)
lfxs = [math.log(x) for x in fxs]
fys = [inter + slope * x for x in lfxs]
return fys
def MakeTicks(xs):
"""Makes a pair of sequences for use as pyplot ticks.
xs: sequence of floats
Returns (xs, labels), where labels is a sequence of strings.
"""
labels = [str(x) for x in xs]
return xs, labels
def MakeLogTicks(xs):
"""Makes a pair of sequences for use as pyplot ticks.
xs: sequence of floats
Returns (xs, labels), where labels is a sequence of strings.
"""
lxs = [math.log10(x) for x in xs]
labels = [str(x) for x in xs]
return lxs, labels
def TestCorrelation(cdf):
"""Tests the correlated generator.
Makes sure that the sequence has the right distribution and correlation.
"""
n = 10000
rho = 0.4
rdt_seq = CorrelatedGenerator(cdf, rho)
xs = [rdt_seq.next() for _ in range(n)]
rho2 = correlation.SerialCorr(xs)
print rho, rho2
cdf2 = thinkbayes.MakeCdfFromList(xs)
thinkplot.Cdfs([cdf, cdf2])
thinkplot.Show()
def main(script):
for size in [1, 5, 10]:
bucket = CmToBucket(size)
print 'Size, bucket', size, bucket
SimpleModel()
random.seed(17)
cdf = MakeCdf()
lam1 = FitCdf(cdf)
fit = GenerateCdf(lam1=lam1)
# TestCorrelation(fit)
PlotCdf(cdf)
# QQPlot(cdf, fit)
calc = Calculator()
rho = 0.0
sequences = calc.MakeSequences(100, rho, fit)
PlotSequences(sequences)
calc.PlotBuckets()
_ = calc.MakeSequences(1900, rho, fit)
print 'V0-RDT correlation', calc.cache.Correlation()
print '15.5 Probability age > 8 year', calc.cache.ProbOlder(15.5, 8)
print '6.0 Probability age > 8 year', calc.cache.ProbOlder(6.0, 8)
calc.PlotConditionalCdfs()
calc.PlotCredibleIntervals(xscale='log')
calc.PlotJointDist()
if __name__ == '__main__':
main(*sys.argv)
|
jtrussell/think-bayes-workspace
|
src/vendor/AllenDowney/kidney.py
|
Python
|
mit
| 21,021
|
[
"Gaussian"
] |
ccf7c66a7e7ca759f8d0bb1a7abbc6d0a76fda2d6dabf5d67d0e23809233645e
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Calculate BOLD confounds
^^^^^^^^^^^^^^^^^^^^^^^^
.. autofunction:: init_bold_confs_wf
.. autofunction:: init_ica_aroma_wf
"""
from os import getenv
from nipype.algorithms import confounds as nac
from nipype.interfaces import utility as niu, fsl
from nipype.pipeline import engine as pe
from templateflow.api import get as get_template
from ...config import DEFAULT_MEMORY_MIN_GB
from ...interfaces import (
GatherConfounds, ICAConfounds, FMRISummary, DerivativesDataSink
)
def init_bold_confs_wf(
mem_gb,
metadata,
regressors_all_comps,
regressors_dvars_th,
regressors_fd_th,
freesurfer=False,
name="bold_confs_wf",
):
"""
Build a workflow to generate and write out confounding signals.
This workflow calculates confounds for a BOLD series, and aggregates them
into a :abbr:`TSV (tab-separated value)` file, for use as nuisance
regressors in a :abbr:`GLM (general linear model)`.
The following confounds are calculated, with column headings in parentheses:
#. Region-wise average signal (``csf``, ``white_matter``, ``global_signal``)
#. DVARS - original and standardized variants (``dvars``, ``std_dvars``)
#. Framewise displacement, based on head-motion parameters
(``framewise_displacement``)
#. Temporal CompCor (``t_comp_cor_XX``)
#. Anatomical CompCor (``a_comp_cor_XX``)
#. Cosine basis set for high-pass filtering w/ 0.008 Hz cut-off
(``cosine_XX``)
#. Non-steady-state volumes (``non_steady_state_XX``)
#. Estimated head-motion parameters, in mm and rad
(``trans_x``, ``trans_y``, ``trans_z``, ``rot_x``, ``rot_y``, ``rot_z``)
Prior to estimating aCompCor and tCompCor, non-steady-state volumes are
censored and high-pass filtered using a :abbr:`DCT (discrete cosine
transform)` basis.
The cosine basis, as well as one regressor per censored volume, are included
for convenience.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.bold.confounds import init_bold_confs_wf
wf = init_bold_confs_wf(
mem_gb=1,
metadata={},
regressors_all_comps=False,
regressors_dvars_th=1.5,
regressors_fd_th=0.5,
)
Parameters
----------
mem_gb : :obj:`float`
Size of BOLD file in GB - please note that this size
should be calculated after resamplings that may extend
the FoV
metadata : :obj:`dict`
BIDS metadata for BOLD file
name : :obj:`str`
Name of workflow (default: ``bold_confs_wf``)
regressors_all_comps : :obj:`bool`
Indicates whether CompCor decompositions should return all
components instead of the minimal number of components necessary
to explain 50 percent of the variance in the decomposition mask.
regressors_dvars_th : :obj:`float`
Criterion for flagging DVARS outliers
regressors_fd_th : :obj:`float`
Criterion for flagging framewise displacement outliers
Inputs
------
bold
BOLD image, after the prescribed corrections (STC, HMC and SDC)
when available.
bold_mask
BOLD series mask
movpar_file
SPM-formatted motion parameters file
rmsd_file
Framewise displacement as measured by ``fsl_motion_outliers``.
skip_vols
number of non steady state volumes
t1w_mask
Mask of the skull-stripped template image
t1w_tpms
List of tissue probability maps in T1w space
t1_bold_xform
Affine matrix that maps the T1w space into alignment with
the native BOLD space
Outputs
-------
confounds_file
TSV of all aggregated confounds
rois_report
Reportlet visualizing white-matter/CSF mask used for aCompCor,
the ROI for tCompCor and the BOLD brain mask.
confounds_metadata
Confounds metadata dictionary.
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.confounds import ExpandModel, SpikeRegressors
from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
from niworkflows.interfaces.images import SignalExtraction
from niworkflows.interfaces.masks import ROIsPlot
from niworkflows.interfaces.nibabel import ApplyMask, Binarize
from niworkflows.interfaces.patches import (
RobustACompCor as ACompCor,
RobustTCompCor as TCompCor,
)
from niworkflows.interfaces.plotting import (
CompCorVariancePlot, ConfoundsCorrelationPlot
)
from niworkflows.interfaces.utils import (
AddTSVHeader, TSV2JSON, DictMerge
)
from ...interfaces.confounds import aCompCorMasks
gm_desc = (
"dilating a GM mask extracted from the FreeSurfer's *aseg* segmentation" if freesurfer
else "thresholding the corresponding partial volume map at 0.05"
)
workflow = Workflow(name=name)
workflow.__desc__ = f"""\
Several confounding time-series were calculated based on the
*preprocessed BOLD*: framewise displacement (FD), DVARS and
three region-wise global signals.
FD was computed using two formulations following Power (absolute sum of
relative motions, @power_fd_dvars) and Jenkinson (relative root mean square
displacement between affines, @mcflirt).
FD and DVARS are calculated for each functional run, both using their
implementations in *Nipype* [following the definitions by @power_fd_dvars].
The three global signals are extracted within the CSF, the WM, and
the whole-brain masks.
Additionally, a set of physiological regressors were extracted to
allow for component-based noise correction [*CompCor*, @compcor].
Principal components are estimated after high-pass filtering the
*preprocessed BOLD* time-series (using a discrete cosine filter with
128s cut-off) for the two *CompCor* variants: temporal (tCompCor)
and anatomical (aCompCor).
tCompCor components are then calculated from the top 2% variable
voxels within the brain mask.
For aCompCor, three probabilistic masks (CSF, WM and combined CSF+WM)
are generated in anatomical space.
The implementation differs from that of Behzadi et al. in that instead
of eroding the masks by 2 pixels on BOLD space, the aCompCor masks are
subtracted a mask of pixels that likely contain a volume fraction of GM.
This mask is obtained by {gm_desc}, and it ensures components are not extracted
from voxels containing a minimal fraction of GM.
Finally, these masks are resampled into BOLD space and binarized by
thresholding at 0.99 (as in the original implementation).
Components are also calculated separately within the WM and CSF masks.
For each CompCor decomposition, the *k* components with the largest singular
values are retained, such that the retained components' time series are
sufficient to explain 50 percent of variance across the nuisance mask (CSF,
WM, combined, or temporal). The remaining components are dropped from
consideration.
The head-motion estimates calculated in the correction step were also
placed within the corresponding confounds file.
The confound time series derived from head motion estimates and global
signals were expanded with the inclusion of temporal derivatives and
quadratic terms for each [@confounds_satterthwaite_2013].
Frames that exceeded a threshold of {regressors_fd_th} mm FD or
{regressors_dvars_th} standardised DVARS were annotated as motion outliers.
"""
inputnode = pe.Node(niu.IdentityInterface(
fields=['bold', 'bold_mask', 'movpar_file', 'rmsd_file',
'skip_vols', 't1w_mask', 't1w_tpms', 't1_bold_xform']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['confounds_file', 'confounds_metadata', 'acompcor_masks', 'tcompcor_mask']),
name='outputnode')
# DVARS
dvars = pe.Node(nac.ComputeDVARS(save_nstd=True, save_std=True, remove_zerovariance=True),
name="dvars", mem_gb=mem_gb)
# Frame displacement
fdisp = pe.Node(nac.FramewiseDisplacement(parameter_source="SPM"),
name="fdisp", mem_gb=mem_gb)
# Generate aCompCor probseg maps
acc_masks = pe.Node(aCompCorMasks(is_aseg=freesurfer), name="acc_masks")
# Resample probseg maps in BOLD space via T1w-to-BOLD transform
acc_msk_tfm = pe.MapNode(ApplyTransforms(
interpolation='Gaussian', float=False), iterfield=["input_image"],
name='acc_msk_tfm', mem_gb=0.1)
acc_msk_brain = pe.MapNode(ApplyMask(), name="acc_msk_brain",
iterfield=["in_file"])
acc_msk_bin = pe.MapNode(Binarize(thresh_low=0.99), name='acc_msk_bin',
iterfield=["in_file"])
acompcor = pe.Node(
ACompCor(components_file='acompcor.tsv', header_prefix='a_comp_cor_', pre_filter='cosine',
save_pre_filter=True, save_metadata=True, mask_names=['CSF', 'WM', 'combined'],
merge_method='none', failure_mode='NaN'),
name="acompcor", mem_gb=mem_gb)
tcompcor = pe.Node(
TCompCor(components_file='tcompcor.tsv', header_prefix='t_comp_cor_', pre_filter='cosine',
save_pre_filter=True, save_metadata=True, percentile_threshold=.02,
failure_mode='NaN'),
name="tcompcor", mem_gb=mem_gb)
# Set number of components
if regressors_all_comps:
acompcor.inputs.num_components = 'all'
tcompcor.inputs.num_components = 'all'
else:
acompcor.inputs.variance_threshold = 0.5
tcompcor.inputs.variance_threshold = 0.5
# Set TR if present
if 'RepetitionTime' in metadata:
tcompcor.inputs.repetition_time = metadata['RepetitionTime']
acompcor.inputs.repetition_time = metadata['RepetitionTime']
# Global and segment regressors
signals_class_labels = [
"global_signal", "csf", "white_matter", "csf_wm", "tcompcor",
]
merge_rois = pe.Node(niu.Merge(3, ravel_inputs=True), name='merge_rois',
run_without_submitting=True)
signals = pe.Node(SignalExtraction(class_labels=signals_class_labels),
name="signals", mem_gb=mem_gb)
# Arrange confounds
add_dvars_header = pe.Node(
AddTSVHeader(columns=["dvars"]),
name="add_dvars_header", mem_gb=0.01, run_without_submitting=True)
add_std_dvars_header = pe.Node(
AddTSVHeader(columns=["std_dvars"]),
name="add_std_dvars_header", mem_gb=0.01, run_without_submitting=True)
add_motion_headers = pe.Node(
AddTSVHeader(columns=["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"]),
name="add_motion_headers", mem_gb=0.01, run_without_submitting=True)
add_rmsd_header = pe.Node(
AddTSVHeader(columns=["rmsd"]),
name="add_rmsd_header", mem_gb=0.01, run_without_submitting=True)
concat = pe.Node(GatherConfounds(), name="concat", mem_gb=0.01, run_without_submitting=True)
# CompCor metadata
tcc_metadata_fmt = pe.Node(
TSV2JSON(index_column='component', drop_columns=['mask'], output=None,
additional_metadata={'Method': 'tCompCor'}, enforce_case=True),
name='tcc_metadata_fmt')
acc_metadata_fmt = pe.Node(
TSV2JSON(index_column='component', output=None,
additional_metadata={'Method': 'aCompCor'}, enforce_case=True),
name='acc_metadata_fmt')
mrg_conf_metadata = pe.Node(niu.Merge(3), name='merge_confound_metadata',
run_without_submitting=True)
mrg_conf_metadata.inputs.in3 = {label: {'Method': 'Mean'}
for label in signals_class_labels}
mrg_conf_metadata2 = pe.Node(DictMerge(), name='merge_confound_metadata2',
run_without_submitting=True)
# Expand model to include derivatives and quadratics
model_expand = pe.Node(ExpandModel(
model_formula='(dd1(rps + wm + csf + gsr))^^2 + others'),
name='model_expansion')
# Add spike regressors
spike_regress = pe.Node(SpikeRegressors(
fd_thresh=regressors_fd_th,
dvars_thresh=regressors_dvars_th),
name='spike_regressors')
# Generate reportlet (ROIs)
mrg_compcor = pe.Node(niu.Merge(2, ravel_inputs=True),
name='mrg_compcor', run_without_submitting=True)
rois_plot = pe.Node(ROIsPlot(colors=['b', 'magenta'], generate_report=True),
name='rois_plot', mem_gb=mem_gb)
ds_report_bold_rois = pe.Node(
DerivativesDataSink(desc='rois', datatype="figures", dismiss_entities=("echo",)),
name='ds_report_bold_rois', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
# Generate reportlet (CompCor)
mrg_cc_metadata = pe.Node(niu.Merge(2), name='merge_compcor_metadata',
run_without_submitting=True)
compcor_plot = pe.Node(
CompCorVariancePlot(variance_thresholds=(0.5, 0.7, 0.9),
metadata_sources=['tCompCor', 'aCompCor']),
name='compcor_plot')
ds_report_compcor = pe.Node(
DerivativesDataSink(desc='compcorvar', datatype="figures", dismiss_entities=("echo",)),
name='ds_report_compcor', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
# Generate reportlet (Confound correlation)
conf_corr_plot = pe.Node(
ConfoundsCorrelationPlot(reference_column='global_signal', max_dim=20),
name='conf_corr_plot')
ds_report_conf_corr = pe.Node(
DerivativesDataSink(desc='confoundcorr', datatype="figures", dismiss_entities=("echo",)),
name='ds_report_conf_corr', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
def _last(inlist):
return inlist[-1]
def _select_cols(table):
import pandas as pd
return [
col for col in pd.read_table(table, nrows=2).columns
if not col.startswith(("a_comp_cor_", "t_comp_cor_", "std_dvars"))
]
workflow.connect([
# connect inputnode to each non-anatomical confound node
(inputnode, dvars, [('bold', 'in_file'),
('bold_mask', 'in_mask')]),
(inputnode, fdisp, [('movpar_file', 'in_file')]),
# aCompCor
(inputnode, acompcor, [("bold", "realigned_file"),
("skip_vols", "ignore_initial_volumes")]),
(inputnode, acc_masks, [("t1w_tpms", "in_vfs"),
(("bold", _get_zooms), "bold_zooms")]),
(inputnode, acc_msk_tfm, [("t1_bold_xform", "transforms"),
("bold_mask", "reference_image")]),
(inputnode, acc_msk_brain, [("bold_mask", "in_mask")]),
(acc_masks, acc_msk_tfm, [("out_masks", "input_image")]),
(acc_msk_tfm, acc_msk_brain, [("output_image", "in_file")]),
(acc_msk_brain, acc_msk_bin, [("out_file", "in_file")]),
(acc_msk_bin, acompcor, [("out_file", "mask_files")]),
# tCompCor
(inputnode, tcompcor, [("bold", "realigned_file"),
("skip_vols", "ignore_initial_volumes"),
("bold_mask", "mask_files")]),
# Global signals extraction (constrained by anatomy)
(inputnode, signals, [('bold', 'in_file')]),
(inputnode, merge_rois, [('bold_mask', 'in1')]),
(acc_msk_bin, merge_rois, [('out_file', 'in2')]),
(tcompcor, merge_rois, [('high_variance_masks', 'in3')]),
(merge_rois, signals, [('out', 'label_files')]),
# Collate computed confounds together
(inputnode, add_motion_headers, [('movpar_file', 'in_file')]),
(inputnode, add_rmsd_header, [('rmsd_file', 'in_file')]),
(dvars, add_dvars_header, [('out_nstd', 'in_file')]),
(dvars, add_std_dvars_header, [('out_std', 'in_file')]),
(signals, concat, [('out_file', 'signals')]),
(fdisp, concat, [('out_file', 'fd')]),
(tcompcor, concat, [('components_file', 'tcompcor'),
('pre_filter_file', 'cos_basis')]),
(acompcor, concat, [('components_file', 'acompcor')]),
(add_motion_headers, concat, [('out_file', 'motion')]),
(add_rmsd_header, concat, [('out_file', 'rmsd')]),
(add_dvars_header, concat, [('out_file', 'dvars')]),
(add_std_dvars_header, concat, [('out_file', 'std_dvars')]),
# Confounds metadata
(tcompcor, tcc_metadata_fmt, [('metadata_file', 'in_file')]),
(acompcor, acc_metadata_fmt, [('metadata_file', 'in_file')]),
(tcc_metadata_fmt, mrg_conf_metadata, [('output', 'in1')]),
(acc_metadata_fmt, mrg_conf_metadata, [('output', 'in2')]),
(mrg_conf_metadata, mrg_conf_metadata2, [('out', 'in_dicts')]),
# Expand the model with derivatives, quadratics, and spikes
(concat, model_expand, [('confounds_file', 'confounds_file')]),
(model_expand, spike_regress, [('confounds_file', 'confounds_file')]),
# Set outputs
(spike_regress, outputnode, [('confounds_file', 'confounds_file')]),
(mrg_conf_metadata2, outputnode, [('out_dict', 'confounds_metadata')]),
(tcompcor, outputnode, [("high_variance_masks", "tcompcor_mask")]),
(acc_msk_bin, outputnode, [("out_file", "acompcor_masks")]),
(inputnode, rois_plot, [('bold', 'in_file'),
('bold_mask', 'in_mask')]),
(tcompcor, mrg_compcor, [('high_variance_masks', 'in1')]),
(acc_msk_bin, mrg_compcor, [(('out_file', _last), 'in2')]),
(mrg_compcor, rois_plot, [('out', 'in_rois')]),
(rois_plot, ds_report_bold_rois, [('out_report', 'in_file')]),
(tcompcor, mrg_cc_metadata, [('metadata_file', 'in1')]),
(acompcor, mrg_cc_metadata, [('metadata_file', 'in2')]),
(mrg_cc_metadata, compcor_plot, [('out', 'metadata_files')]),
(compcor_plot, ds_report_compcor, [('out_file', 'in_file')]),
(concat, conf_corr_plot, [('confounds_file', 'confounds_file'),
(('confounds_file', _select_cols), 'columns')]),
(conf_corr_plot, ds_report_conf_corr, [('out_file', 'in_file')]),
])
return workflow
def init_carpetplot_wf(mem_gb, metadata, cifti_output, name="bold_carpet_wf"):
"""
Build a workflow to generate *carpet* plots.
Resamples the MNI parcellation (ad-hoc parcellation derived from the
Harvard-Oxford template and others).
Parameters
----------
mem_gb : :obj:`float`
Size of BOLD file in GB - please note that this size
should be calculated after resamplings that may extend
the FoV
metadata : :obj:`dict`
BIDS metadata for BOLD file
name : :obj:`str`
Name of workflow (default: ``bold_carpet_wf``)
Inputs
------
bold
BOLD image, after the prescribed corrections (STC, HMC and SDC)
when available.
bold_mask
BOLD series mask
confounds_file
TSV of all aggregated confounds
t1_bold_xform
Affine matrix that maps the T1w space into alignment with
the native BOLD space
std2anat_xfm
ANTs-compatible affine-and-warp transform file
cifti_bold
BOLD image in CIFTI format, to be used in place of volumetric BOLD
Outputs
-------
out_carpetplot
Path of the generated SVG file
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
inputnode = pe.Node(niu.IdentityInterface(
fields=['bold', 'bold_mask', 'confounds_file',
't1_bold_xform', 'std2anat_xfm', 'cifti_bold']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['out_carpetplot']), name='outputnode')
# List transforms
mrg_xfms = pe.Node(niu.Merge(2), name='mrg_xfms')
# Warp segmentation into EPI space
resample_parc = pe.Node(ApplyTransforms(
dimension=3,
input_image=str(get_template(
'MNI152NLin2009cAsym', resolution=1, desc='carpet',
suffix='dseg', extension=['.nii', '.nii.gz'])),
interpolation='MultiLabel'),
name='resample_parc')
# Carpetplot and confounds plot
conf_plot = pe.Node(FMRISummary(
tr=metadata['RepetitionTime'],
confounds_list=[
('global_signal', None, 'GS'),
('csf', None, 'GSCSF'),
('white_matter', None, 'GSWM'),
('std_dvars', None, 'DVARS'),
('framewise_displacement', 'mm', 'FD')]),
name='conf_plot', mem_gb=mem_gb)
ds_report_bold_conf = pe.Node(
DerivativesDataSink(desc='carpetplot', datatype="figures", extension="svg",
dismiss_entities=("echo",)),
name='ds_report_bold_conf', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow = Workflow(name=name)
# no need for segmentations if using CIFTI
if cifti_output:
workflow.connect(inputnode, 'cifti_bold', conf_plot, 'in_func')
else:
workflow.connect([
(inputnode, mrg_xfms, [('t1_bold_xform', 'in1'),
('std2anat_xfm', 'in2')]),
(inputnode, resample_parc, [('bold_mask', 'reference_image')]),
(mrg_xfms, resample_parc, [('out', 'transforms')]),
# Carpetplot
(inputnode, conf_plot, [
('bold', 'in_func'),
('bold_mask', 'in_mask')]),
(resample_parc, conf_plot, [('output_image', 'in_segm')])
])
workflow.connect([
(inputnode, conf_plot, [('confounds_file', 'confounds_file')]),
(conf_plot, ds_report_bold_conf, [('out_file', 'in_file')]),
(conf_plot, outputnode, [('out_file', 'out_carpetplot')]),
])
return workflow
def init_ica_aroma_wf(
mem_gb,
metadata,
omp_nthreads,
aroma_melodic_dim=-200,
err_on_aroma_warn=False,
name='ica_aroma_wf',
susan_fwhm=6.0,
):
"""
Build a workflow that runs `ICA-AROMA`_.
This workflow wraps `ICA-AROMA`_ to identify and remove motion-related
independent components from a BOLD time series.
The following steps are performed:
#. Remove non-steady state volumes from the bold series.
#. Smooth data using FSL `susan`, with a kernel width FWHM=6.0mm.
#. Run FSL `melodic` outside of ICA-AROMA to generate the report
#. Run ICA-AROMA
#. Aggregate identified motion components (aggressive) to TSV
#. Return ``classified_motion_ICs`` and ``melodic_mix`` for user to complete
non-aggressive denoising in T1w space
#. Calculate ICA-AROMA-identified noise components
(columns named ``AROMAAggrCompXX``)
Additionally, non-aggressive denoising is performed on the BOLD series
resampled into MNI space.
There is a current discussion on whether other confounds should be extracted
before or after denoising `here
<http://nbviewer.jupyter.org/github/poldracklab/fmriprep-notebooks/blob/922e436429b879271fa13e76767a6e73443e74d9/issue-817_aroma_confounds.ipynb>`__.
.. _ICA-AROMA: https://github.com/maartenmennes/ICA-AROMA
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.bold.confounds import init_ica_aroma_wf
wf = init_ica_aroma_wf(
mem_gb=3,
metadata={'RepetitionTime': 1.0},
omp_nthreads=1)
Parameters
----------
metadata : :obj:`dict`
BIDS metadata for BOLD file
mem_gb : :obj:`float`
Size of BOLD file in GB
omp_nthreads : :obj:`int`
Maximum number of threads an individual process may use
name : :obj:`str`
Name of workflow (default: ``bold_tpl_trans_wf``)
susan_fwhm : :obj:`float`
Kernel width (FWHM in mm) for the smoothing step with
FSL ``susan`` (default: 6.0mm)
err_on_aroma_warn : :obj:`bool`
Do not fail on ICA-AROMA errors
aroma_melodic_dim : :obj:`int`
Set the dimensionality of the MELODIC ICA decomposition.
Negative numbers set a maximum on automatic dimensionality estimation.
Positive numbers set an exact number of components to extract.
(default: -200, i.e., estimate <=200 components)
Inputs
------
itk_bold_to_t1
Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
anat2std_xfm
ANTs-compatible affine-and-warp transform file
name_source
BOLD series NIfTI file
Used to recover original information lost during processing
skip_vols
number of non steady state volumes
bold_split
Individual 3D BOLD volumes, not motion corrected
bold_mask
BOLD series mask in template space
hmc_xforms
List of affine transforms aligning each volume to ``ref_image`` in ITK format
movpar_file
SPM-formatted motion parameters file
Outputs
-------
aroma_confounds
TSV of confounds identified as noise by ICA-AROMA
aroma_noise_ics
CSV of noise components identified by ICA-AROMA
melodic_mix
FSL MELODIC mixing matrix
nonaggr_denoised_file
BOLD series with non-aggressive ICA-AROMA denoising applied
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.segmentation import ICA_AROMARPT
from niworkflows.interfaces.utility import KeySelect
from niworkflows.interfaces.utils import TSV2JSON
workflow = Workflow(name=name)
workflow.__postdesc__ = """\
Automatic removal of motion artifacts using independent component analysis
[ICA-AROMA, @aroma] was performed on the *preprocessed BOLD on MNI space*
time-series after removal of non-steady state volumes and spatial smoothing
with an isotropic, Gaussian kernel of 6mm FWHM (full-width half-maximum).
Corresponding "non-aggresively" denoised runs were produced after such
smoothing.
Additionally, the "aggressive" noise-regressors were collected and placed
in the corresponding confounds file.
"""
inputnode = pe.Node(niu.IdentityInterface(
fields=[
'bold_std',
'bold_mask_std',
'movpar_file',
'name_source',
'skip_vols',
'spatial_reference',
]), name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['aroma_confounds', 'aroma_noise_ics', 'melodic_mix',
'nonaggr_denoised_file', 'aroma_metadata']), name='outputnode')
# extract out to BOLD base
select_std = pe.Node(KeySelect(fields=['bold_mask_std', 'bold_std']),
name='select_std', run_without_submitting=True)
select_std.inputs.key = 'MNI152NLin6Asym_res-2'
rm_non_steady_state = pe.Node(niu.Function(function=_remove_volumes,
output_names=['bold_cut']),
name='rm_nonsteady')
calc_median_val = pe.Node(fsl.ImageStats(op_string='-k %s -p 50'), name='calc_median_val')
calc_bold_mean = pe.Node(fsl.MeanImage(), name='calc_bold_mean')
def _getusans_func(image, thresh):
return [tuple([image, thresh])]
getusans = pe.Node(niu.Function(function=_getusans_func, output_names=['usans']),
name='getusans', mem_gb=0.01)
smooth = pe.Node(fsl.SUSAN(fwhm=susan_fwhm), name='smooth')
# melodic node
melodic = pe.Node(fsl.MELODIC(
no_bet=True, tr_sec=float(metadata['RepetitionTime']), mm_thresh=0.5, out_stats=True,
dim=aroma_melodic_dim), name="melodic")
# ica_aroma node
ica_aroma = pe.Node(ICA_AROMARPT(
denoise_type='nonaggr', generate_report=True, TR=metadata['RepetitionTime'],
args='-np'), name='ica_aroma')
add_non_steady_state = pe.Node(niu.Function(function=_add_volumes,
output_names=['bold_add']),
name='add_nonsteady')
# extract the confound ICs from the results
ica_aroma_confound_extraction = pe.Node(ICAConfounds(err_on_aroma_warn=err_on_aroma_warn),
name='ica_aroma_confound_extraction')
ica_aroma_metadata_fmt = pe.Node(
TSV2JSON(index_column='IC', output=None, enforce_case=True,
additional_metadata={'Method': {
'Name': 'ICA-AROMA',
'Version': getenv('AROMA_VERSION', 'n/a')}}),
name='ica_aroma_metadata_fmt')
ds_report_ica_aroma = pe.Node(
DerivativesDataSink(desc='aroma', datatype="figures", dismiss_entities=("echo",)),
name='ds_report_ica_aroma', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
def _getbtthresh(medianval):
return 0.75 * medianval
# connect the nodes
workflow.connect([
(inputnode, select_std, [('spatial_reference', 'keys'),
('bold_std', 'bold_std'),
('bold_mask_std', 'bold_mask_std')]),
(inputnode, ica_aroma, [('movpar_file', 'motion_parameters')]),
(inputnode, rm_non_steady_state, [
('skip_vols', 'skip_vols')]),
(select_std, rm_non_steady_state, [
('bold_std', 'bold_file')]),
(select_std, calc_median_val, [
('bold_mask_std', 'mask_file')]),
(rm_non_steady_state, calc_median_val, [
('bold_cut', 'in_file')]),
(rm_non_steady_state, calc_bold_mean, [
('bold_cut', 'in_file')]),
(calc_bold_mean, getusans, [('out_file', 'image')]),
(calc_median_val, getusans, [('out_stat', 'thresh')]),
# Connect input nodes to complete smoothing
(rm_non_steady_state, smooth, [
('bold_cut', 'in_file')]),
(getusans, smooth, [('usans', 'usans')]),
(calc_median_val, smooth, [(('out_stat', _getbtthresh), 'brightness_threshold')]),
# connect smooth to melodic
(smooth, melodic, [('smoothed_file', 'in_files')]),
(select_std, melodic, [
('bold_mask_std', 'mask')]),
# connect nodes to ICA-AROMA
(smooth, ica_aroma, [('smoothed_file', 'in_file')]),
(select_std, ica_aroma, [
('bold_mask_std', 'report_mask'),
('bold_mask_std', 'mask')]),
(melodic, ica_aroma, [('out_dir', 'melodic_dir')]),
# generate tsvs from ICA-AROMA
(ica_aroma, ica_aroma_confound_extraction, [('out_dir', 'in_directory')]),
(inputnode, ica_aroma_confound_extraction, [
('skip_vols', 'skip_vols')]),
(ica_aroma_confound_extraction, ica_aroma_metadata_fmt, [
('aroma_metadata', 'in_file')]),
# output for processing and reporting
(ica_aroma_confound_extraction, outputnode, [('aroma_confounds', 'aroma_confounds'),
('aroma_noise_ics', 'aroma_noise_ics'),
('melodic_mix', 'melodic_mix')]),
(ica_aroma_metadata_fmt, outputnode, [('output', 'aroma_metadata')]),
(ica_aroma, add_non_steady_state, [
('nonaggr_denoised_file', 'bold_cut_file')]),
(select_std, add_non_steady_state, [
('bold_std', 'bold_file')]),
(inputnode, add_non_steady_state, [
('skip_vols', 'skip_vols')]),
(add_non_steady_state, outputnode, [('bold_add', 'nonaggr_denoised_file')]),
(ica_aroma, ds_report_ica_aroma, [('out_report', 'in_file')]),
])
return workflow
def _remove_volumes(bold_file, skip_vols):
"""Remove skip_vols from bold_file."""
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
if skip_vols == 0:
return bold_file
out = fname_presuffix(bold_file, suffix='_cut')
bold_img = nb.load(bold_file)
bold_img.__class__(bold_img.dataobj[..., skip_vols:],
bold_img.affine, bold_img.header).to_filename(out)
return out
def _add_volumes(bold_file, bold_cut_file, skip_vols):
"""Prepend skip_vols from bold_file onto bold_cut_file."""
import nibabel as nb
import numpy as np
from nipype.utils.filemanip import fname_presuffix
if skip_vols == 0:
return bold_cut_file
bold_img = nb.load(bold_file)
bold_cut_img = nb.load(bold_cut_file)
bold_data = np.concatenate((bold_img.dataobj[..., :skip_vols],
bold_cut_img.dataobj), axis=3)
out = fname_presuffix(bold_cut_file, suffix='_addnonsteady')
bold_img.__class__(bold_data, bold_img.affine, bold_img.header).to_filename(out)
return out
def _get_zooms(in_file):
import nibabel as nb
return tuple(nb.load(in_file).header.get_zooms()[:3])
|
poldracklab/fmriprep
|
fmriprep/workflows/bold/confounds.py
|
Python
|
bsd-3-clause
| 33,266
|
[
"Gaussian"
] |
bf7fc098b94c5735f1c987fdf6c8d79259e4ada25ea9f74c8fcdac0dfc2e2db1
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
import logging
logger = logging.getLogger('camelot.view.export.outlook')
"""Functions to send files by email using outlook
After http://win32com.goermezer.de/content/view/227/192/
"""
def open_html_in_outlook(html):
try:
import pythoncom
import win32com.client
pythoncom.CoInitialize()
outlook_app = win32com.client.Dispatch("Outlook.Application")
except Exception, e:
"""We're probably not running windows"""
logger.warn('unable to launch Outlook', exc_info=e)
return
msg = outlook_app.CreateItem(0)
#msg.BodyFormat=2
msg.HTMLBody=html
#msg.Subject=o_subject
msg.Display(True)
|
jeroendierckx/Camelot
|
camelot/view/export/outlook.py
|
Python
|
gpl-2.0
| 1,770
|
[
"VisIt"
] |
e87acb7eac3fe2b1a5402a38ef82730425aae5d0658363f0f744db92581d71e1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import distutils.spawn
import itertools
import logging
import numpy as np
import os
from rmgpy import getPath
from rmgpy.qm.main import QMCalculator
from rmgpy.molecule import Molecule
from rmgpy.qm.gaussian import Gaussian, GaussianMolPM3, GaussianMolPM6
executablePath = Gaussian.executablePath
NO_GAUSSIAN = not os.path.exists(executablePath)
mol1 = Molecule().fromSMILES('C1=CC=C2C=CC=CC2=C1')
class TestGaussianMolPM3(unittest.TestCase):
"""
Contains unit tests for the Geometry class.
"""
@unittest.skipIf(NO_GAUSSIAN, "Gaussian not found. Try resetting your environment variables if you want to use it.")
def setUp(self):
"""
A function run before each unit test in this class.
"""
RMGpy_path = os.path.normpath(os.path.join(getPath(),'..'))
qm = QMCalculator(software = 'gaussian',
method = 'pm3',
fileStore = os.path.join(RMGpy_path, 'testing', 'qm', 'QMfiles'),
scratchDirectory = os.path.join(RMGpy_path, 'testing', 'qm', 'QMscratch'),
)
if not os.path.exists(qm.settings.fileStore):
os.makedirs(qm.settings.fileStore)
self.qmmol1 = GaussianMolPM3(mol1, qm.settings)
def testGenerateThermoData(self):
"""
Test that generateThermoData() works correctly on gaussian PM3.
"""
# First ensure any old data are removed, or else they'll be reused!
for directory in (self.qmmol1.settings.fileStore, self.qmmol1.settings.scratchDirectory):
shutil.rmtree(directory, ignore_errors=True)
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM GaussianMolPM3 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertAlmostEqual(result.molecularMass.value, 128.0626, 3)
def testLoadThermoData(self):
"""
Test that generateThermoData() can load thermo from the previous gaussian PM3 run.
Check that it loaded, and the values are the same as above.
"""
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM GaussianMolPM3 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertAlmostEqual(result.molecularMass.value, 128.0626, 3)
class TestGaussianMolPM6(unittest.TestCase):
"""
Contains unit tests for the Geometry class.
"""
@unittest.skipIf(NO_GAUSSIAN, "Gaussian not found. Try resetting your environment variables if you want to use it.")
def setUp(self):
"""
A function run before each unit test in this class.
"""
RMGpy_path = os.path.normpath(os.path.join(getPath(),'..'))
qm = QMCalculator(software = 'gaussian',
method = 'pm6',
fileStore = os.path.join(RMGpy_path, 'testing', 'qm', 'QMfiles'),
scratchDirectory = os.path.join(RMGpy_path, 'testing', 'qm', 'QMscratch'),
)
if not os.path.exists(qm.settings.fileStore):
os.makedirs(qm.settings.fileStore)
self.qmmol1 = GaussianMolPM6(mol1, qm.settings)
@unittest.skipIf('g03' in executablePath, "This test was shown not to work on g03.")
def testGenerateThermoData(self):
"""
Test that generateThermoData() works correctly for gaussian PM6.
"""
# First ensure any old data are removed, or else they'll be reused!
for directory in (self.qmmol1.settings.fileStore, self.qmmol1.settings.scratchDirectory):
shutil.rmtree(directory, ignore_errors=True)
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM GaussianMolPM6 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertAlmostEqual(result.molecularMass.value, 128.0626, 3)
@unittest.skipIf('g03' in executablePath, "This test was shown not to work on g03.")
def testLoadThermoData(self):
"""
Test that generateThermoData() can load thermo from the previous gaussian PM6 run.
Check that it loaded, and the values are the same as above.
"""
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM GaussianMolPM6 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertAlmostEqual(result.molecularMass.value, 128.0626, 3)
################################################################################
if __name__ == '__main__':
unittest.main( testRunner = unittest.TextTestRunner(verbosity=2) )
|
pierrelb/RMG-Py
|
rmgpy/qm/gaussianTest.py
|
Python
|
mit
| 5,432
|
[
"Gaussian"
] |
25be911f95f6ea91432414d89ba9599b08d334b111f101fbdb5e8c825b94b18f
|
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from distutils.util import strtobool
from functools import partial
from swh.web.api.apidoc import api_doc, format_docstring
from swh.web.api.apiurls import api_route
from swh.web.api.utils import (
enrich_origin,
enrich_origin_search_result,
enrich_origin_visit,
)
from swh.web.api.views.utils import api_lookup
from swh.web.common import archive
from swh.web.common.exc import BadInputExc
from swh.web.common.origin_visits import get_origin_visits
from swh.web.common.utils import reverse
DOC_RETURN_ORIGIN = """
:>json string origin_visits_url: link to in order to get information
about the visits for that origin
:>json string url: the origin canonical url
"""
DOC_RETURN_ORIGIN_ARRAY = DOC_RETURN_ORIGIN.replace(":>json", ":>jsonarr")
DOC_RETURN_ORIGIN_VISIT = """
:>json string date: ISO representation of the visit date (in UTC)
:>json str origin: the origin canonical url
:>json string origin_url: link to get information about the origin
:>jsonarr string snapshot: the snapshot identifier of the visit
(may be null if status is not **full**).
:>jsonarr string snapshot_url: link to
:http:get:`/api/1/snapshot/(snapshot_id)/` in order to get
information about the snapshot of the visit
(may be null if status is not **full**).
:>json string status: status of the visit (either **full**,
**partial** or **ongoing**)
:>json number visit: the unique identifier of the visit
"""
DOC_RETURN_ORIGIN_VISIT_ARRAY = DOC_RETURN_ORIGIN_VISIT.replace(":>json", ":>jsonarr")
DOC_RETURN_ORIGIN_VISIT_ARRAY += """
:>jsonarr number id: the unique identifier of the origin
:>jsonarr string origin_visit_url: link to
:http:get:`/api/1/origin/(origin_url)/visit/(visit_id)/`
in order to get information about the visit
"""
@api_route(r"/origins/", "api-1-origins")
@api_doc("/origins/", noargs=True)
@format_docstring(return_origin_array=DOC_RETURN_ORIGIN_ARRAY)
def api_origins(request):
"""
.. http:get:: /api/1/origins/
Get list of archived software origins.
.. warning::
This endpoint used to provide an ``origin_from`` query parameter,
and guarantee an order on results. This is no longer true,
and only the Link header should be used for paginating through
results.
:query int origin_count: The maximum number of origins to return
(default to 100, can not exceed 10000)
{return_origin_array}
{common_headers}
{resheader_link}
:statuscode 200: no error
**Example:**
.. parsed-literal::
:swh_web_api:`origins?origin_count=500`
"""
old_param_origin_from = request.query_params.get("origin_from")
if old_param_origin_from:
raise BadInputExc("Please use the Link header to browse through result")
page_token = request.query_params.get("page_token", None)
limit = min(int(request.query_params.get("origin_count", "100")), 10000)
page_result = archive.lookup_origins(page_token, limit)
origins = [enrich_origin(o, request=request) for o in page_result.results]
next_page_token = page_result.next_page_token
response = {"results": origins, "headers": {}}
if next_page_token is not None:
response["headers"]["link-next"] = reverse(
"api-1-origins",
query_params={"page_token": next_page_token, "origin_count": limit},
request=request,
)
return response
@api_route(r"/origin/(?P<origin_url>.+)/get/", "api-1-origin")
@api_doc("/origin/")
@format_docstring(return_origin=DOC_RETURN_ORIGIN)
def api_origin(request, origin_url):
"""
.. http:get:: /api/1/origin/(origin_url)/get/
Get information about a software origin.
:param string origin_url: the origin url
{return_origin}
{common_headers}
:statuscode 200: no error
:statuscode 404: requested origin can not be found in the archive
**Example:**
.. parsed-literal::
:swh_web_api:`origin/https://github.com/python/cpython/get/`
"""
ori_dict = {"url": origin_url}
error_msg = "Origin with url %s not found." % ori_dict["url"]
return api_lookup(
archive.lookup_origin,
ori_dict,
notfound_msg=error_msg,
enrich_fn=enrich_origin,
request=request,
)
@api_route(
r"/origin/search/(?P<url_pattern>.+)/",
"api-1-origin-search",
throttle_scope="swh_api_origin_search",
)
@api_doc("/origin/search/")
@format_docstring(return_origin_array=DOC_RETURN_ORIGIN_ARRAY)
def api_origin_search(request, url_pattern):
"""
.. http:get:: /api/1/origin/search/(url_pattern)/
Search for software origins whose urls contain a provided string
pattern or match a provided regular expression.
The search is performed in a case insensitive way.
.. warning::
This endpoint used to provide an ``offset`` query parameter,
and guarantee an order on results. This is no longer true,
and only the Link header should be used for paginating through
results.
:param string url_pattern: a string pattern
:query int limit: the maximum number of found origins to return
(bounded to 1000)
:query boolean with_visit: if true, only return origins with at least
one visit by Software heritage
{return_origin_array}
{common_headers}
{resheader_link}
:statuscode 200: no error
**Example:**
.. parsed-literal::
:swh_web_api:`origin/search/python/?limit=2`
"""
result = {}
limit = min(int(request.query_params.get("limit", "70")), 1000)
page_token = request.query_params.get("page_token")
with_visit = request.query_params.get("with_visit", "false")
visit_type = request.query_params.get("visit_type")
(results, page_token) = api_lookup(
archive.search_origin,
url_pattern,
limit,
bool(strtobool(with_visit)),
[visit_type] if visit_type else None,
page_token,
enrich_fn=enrich_origin_search_result,
request=request,
)
if page_token is not None:
query_params = {}
query_params["limit"] = limit
query_params["page_token"] = page_token
query_params["visit_type"] = visit_type
result["headers"] = {
"link-next": reverse(
"api-1-origin-search",
url_args={"url_pattern": url_pattern},
query_params=query_params,
request=request,
)
}
result.update({"results": results})
return result
@api_route(r"/origin/metadata-search/", "api-1-origin-metadata-search")
@api_doc("/origin/metadata-search/", noargs=True)
@format_docstring(return_origin_array=DOC_RETURN_ORIGIN_ARRAY)
def api_origin_metadata_search(request):
"""
.. http:get:: /api/1/origin/metadata-search/
Search for software origins whose metadata (expressed as a
JSON-LD/CodeMeta dictionary) match the provided criteria.
For now, only full-text search on this dictionary is supported.
:query str fulltext: a string that will be matched against origin
metadata; results are ranked and ordered starting with the best
ones.
:query int limit: the maximum number of found origins to return
(bounded to 100)
{return_origin_array}
{common_headers}
:statuscode 200: no error
**Example:**
.. parsed-literal::
:swh_web_api:`origin/metadata-search/?limit=2&fulltext=Jane%20Doe`
"""
fulltext = request.query_params.get("fulltext", None)
limit = min(int(request.query_params.get("limit", "70")), 100)
if not fulltext:
content = '"fulltext" must be provided and non-empty.'
raise BadInputExc(content)
results = api_lookup(
archive.search_origin_metadata, fulltext, limit, request=request
)
return {
"results": results,
}
@api_route(r"/origin/(?P<origin_url>.*)/visits/", "api-1-origin-visits")
@api_doc("/origin/visits/")
@format_docstring(return_origin_visit_array=DOC_RETURN_ORIGIN_VISIT_ARRAY)
def api_origin_visits(request, origin_url):
"""
.. http:get:: /api/1/origin/(origin_url)/visits/
Get information about all visits of a software origin.
Visits are returned sorted in descending order according
to their date.
:param str origin_url: a software origin URL
:query int per_page: specify the number of visits to list, for
pagination purposes
:query int last_visit: visit to start listing from, for pagination
purposes
{common_headers}
{resheader_link}
{return_origin_visit_array}
:statuscode 200: no error
:statuscode 404: requested origin can not be found in the archive
**Example:**
.. parsed-literal::
:swh_web_api:`origin/https://github.com/hylang/hy/visits/`
"""
result = {}
origin_query = {"url": origin_url}
notfound_msg = "No origin {} found".format(origin_url)
url_args_next = {"origin_url": origin_url}
per_page = int(request.query_params.get("per_page", "10"))
last_visit = request.query_params.get("last_visit")
if last_visit:
last_visit = int(last_visit)
def _lookup_origin_visits(origin_query, last_visit=last_visit, per_page=per_page):
all_visits = get_origin_visits(origin_query)
all_visits.reverse()
visits = []
if not last_visit:
visits = all_visits[:per_page]
else:
for i, v in enumerate(all_visits):
if v["visit"] == last_visit:
visits = all_visits[i + 1 : i + 1 + per_page]
break
for v in visits:
yield v
results = api_lookup(
_lookup_origin_visits,
origin_query,
notfound_msg=notfound_msg,
enrich_fn=partial(
enrich_origin_visit, with_origin_link=False, with_origin_visit_link=True
),
request=request,
)
if results:
nb_results = len(results)
if nb_results == per_page:
new_last_visit = results[-1]["visit"]
query_params = {}
query_params["last_visit"] = new_last_visit
if request.query_params.get("per_page"):
query_params["per_page"] = per_page
result["headers"] = {
"link-next": reverse(
"api-1-origin-visits",
url_args=url_args_next,
query_params=query_params,
request=request,
)
}
result.update({"results": results})
return result
@api_route(
r"/origin/(?P<origin_url>.*)/visit/latest/",
"api-1-origin-visit-latest",
throttle_scope="swh_api_origin_visit_latest",
)
@api_doc("/origin/visit/latest/")
@format_docstring(return_origin_visit=DOC_RETURN_ORIGIN_VISIT)
def api_origin_visit_latest(request, origin_url=None):
"""
.. http:get:: /api/1/origin/(origin_url)/visit/latest/
Get information about the latest visit of a software origin.
:param str origin_url: a software origin URL
:query boolean require_snapshot: if true, only return a visit
with a snapshot
{common_headers}
{return_origin_visit}
:statuscode 200: no error
:statuscode 404: requested origin or visit can not be found in the
archive
**Example:**
.. parsed-literal::
:swh_web_api:`origin/https://github.com/hylang/hy/visit/latest/`
"""
require_snapshot = request.query_params.get("require_snapshot", "false")
return api_lookup(
archive.lookup_origin_visit_latest,
origin_url,
bool(strtobool(require_snapshot)),
notfound_msg=("No visit for origin {} found".format(origin_url)),
enrich_fn=partial(
enrich_origin_visit, with_origin_link=True, with_origin_visit_link=False
),
request=request,
)
@api_route(
r"/origin/(?P<origin_url>.*)/visit/(?P<visit_id>[0-9]+)/", "api-1-origin-visit"
)
@api_doc("/origin/visit/")
@format_docstring(return_origin_visit=DOC_RETURN_ORIGIN_VISIT)
def api_origin_visit(request, visit_id, origin_url):
"""
.. http:get:: /api/1/origin/(origin_url)/visit/(visit_id)/
Get information about a specific visit of a software origin.
:param str origin_url: a software origin URL
:param int visit_id: a visit identifier
{common_headers}
{return_origin_visit}
:statuscode 200: no error
:statuscode 404: requested origin or visit can not be found in the
archive
**Example:**
.. parsed-literal::
:swh_web_api:`origin/https://github.com/hylang/hy/visit/1/`
"""
return api_lookup(
archive.lookup_origin_visit,
origin_url,
int(visit_id),
notfound_msg=("No visit {} for origin {} found".format(visit_id, origin_url)),
enrich_fn=partial(
enrich_origin_visit, with_origin_link=True, with_origin_visit_link=False
),
request=request,
)
@api_route(
r"/origin/(?P<origin_url>.+)" "/intrinsic-metadata", "api-origin-intrinsic-metadata"
)
@api_doc("/origin/intrinsic-metadata/")
@format_docstring()
def api_origin_intrinsic_metadata(request, origin_url):
"""
.. http:get:: /api/1/origin/(origin_url)/intrinsic-metadata
Get intrinsic metadata of a software origin (as a JSON-LD/CodeMeta dictionary).
:param string origin_url: the origin url
:>json string ???: intrinsic metadata field of the origin
{common_headers}
:statuscode 200: no error
:statuscode 404: requested origin can not be found in the archive
**Example:**
.. parsed-literal::
:swh_web_api:`origin/https://github.com/python/cpython/intrinsic-metadata`
"""
return api_lookup(
archive.lookup_origin_intrinsic_metadata,
origin_url,
notfound_msg=f"Origin with url {origin_url} not found",
enrich_fn=enrich_origin,
request=request,
)
|
SoftwareHeritage/swh-web-ui
|
swh/web/api/views/origin.py
|
Python
|
agpl-3.0
| 14,799
|
[
"VisIt"
] |
b85bb22f5b960e562d637da8ca827dc8ea3aaccce7b18a30edfbf86ab01479f5
|
import os
import xarray.tests.test_dataset as td
from pywps import Process
from pywps import ComplexOutput, FORMATS
from pywps.ext_autodoc import MetadataUrl
import logging
LOGGER = logging.getLogger("PYWPS")
class NcMLAgg(Process):
def __init__(self):
inputs = []
outputs = [
ComplexOutput('d1', 'NetCDF file 1',
as_reference=True,
supported_formats=[FORMATS.NETCDF]),
ComplexOutput('d2', 'NetCDF file 2',
as_reference=True,
supported_formats=[FORMATS.NETCDF]),
ComplexOutput('ncml', 'NcML aggregation',
as_reference=True,
supported_formats=[FORMATS.DODS]), # FORMATS.NCML To become available in PyWPS 4.2.5
]
super(NcMLAgg, self).__init__(
self._handler,
identifier='ncml',
title="Test NcML THREDDS capability",
abstract="Return links to an NcML file aggregating netCDF files with moving time units.",
version="1",
metadata=[
MetadataUrl('User Guide',
'http://emu.readthedocs.io/en/latest/',
anonymous=True),
],
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True)
def _handler(self, request, response):
# Create test datasets
d1, d2, _ = td.create_append_test_data()
# Save datasets to disk
d1fn = os.path.join(self.workdir, "d1.nc")
d2fn = os.path.join(self.workdir, "d2.nc")
d1.to_netcdf(d1fn)
d2.to_netcdf(d2fn)
# Create NcML aggregation
ncml = """
<netcdf xmlns="http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2">
<aggregation dimName="time" type="joinExisting">
<scan location="." suffix=".nc" subdirs="false"/>
</aggregation>
</netcdf>
"""
# Write response
response.outputs["d1"].file = d1fn
response.outputs["d2"].file = d2fn
response.outputs['ncml'].data = ncml
return response
|
bird-house/emu
|
emu/processes/wps_ncml.py
|
Python
|
apache-2.0
| 2,245
|
[
"NetCDF"
] |
2162b4a71476f7d3fe660675bd22acd0b1c003c16de34c1694ed8237769dfb5a
|
# Standard library imports
import string, pkgutil
from xceptions import *
# Third party imports
#### netcdf --- currently support cdms2, python-netCDF4 and Scientific
l = pkgutil.iter_modules()
ll = map( lambda x: x[1], l )
supportedNetcdf = ['cdms2','netCDF4','Scientific','ncq3']
installedSupportedNetcdf = []
##ll = []
for x in supportedNetcdf:
if x in ll:
if len(installedSupportedNetcdf) == 0:
try:
cmd = 'import %s' % x
exec cmd
installedSupportedNetcdf.append( x )
except:
print 'Failed to install %s' % x
else:
installedSupportedNetcdf.append( x )
if len(installedSupportedNetcdf) > 0:
ncLib = installedSupportedNetcdf[0]
else:
print """No supported netcdf module found.
Supported modules are %s.
Attempting to run with experimental ncq3
Execution may fail, depending on options chosen.
""" % str(supportedNetcdf)
ncLib = 'ncq3'
if ncLib == 'Scientific':
from Scientific.IO import NetCDF as ncdf
## end of netcdf import.
## utility function to convert "type" to string and standardise terminology
def tstr( x ):
x1 = str(x)
return {'real':'float32', 'integer':'int32', 'float':'float32', 'double':'float64' }.get( x1, x1 )
class fileMetadata(object):
def __init__(self,dummy=False,attributeMappingsLog=None,forceLib=None):
self.dummy = dummy
self.atMapLog = attributeMappingsLog
self.forceLib = forceLib
self.ncLib = ncLib
if self.atMapLog == None:
self.atMapLog = open( 'cccc_atMapLog.txt', 'a' )
if self.forceLib == 'ncq3':
import ncq3
self.ncq3 = ncq3
self.ncLib = 'ncq3'
elif self.forceLib == 'cdms2':
import cdms2
self.cdms2 = cdms2
self.ncLib = 'cdms2'
elif self.forceLib == 'netCDF4':
import netCDF4
self.netCDF4 = netCDF4
self.ncLib = 'netCDF4 [%s]' % netCDF4.__version__
elif self.forceLib == 'Scientific':
import Scientific
from Scientific.IO import NetCDF as ncdf
self.ncdf = ncdf
self.ncLib = 'Scientific [%s]' % Scientific.__version__
else:
self.ncLib = ncLib
def close(self):
self.atMapLog.close()
def loadNc(self,fpath):
self.fpath = fpath
self.fn = string.split( fpath, '/' )[-1]
self.fparts = string.split( self.fn[:-3], '_' )
self.ga = {}
self.va = {}
self.da = {}
if self.dummy:
self.makeDummyFileImage()
return
elif self.ncLib == 'cdms2':
import cdms2
self.cdms2 = cdms2
self.loadNc__Cdms(fpath)
elif self.ncLib[:7] == 'netCDF4':
import netCDF4
self.netCDF4 = netCDF4
self.loadNc__Netcdf4(fpath)
elif self.ncLib[:10] == 'Scientific':
from Scientific.IO import NetCDF as ncdf
self.ncdf = ncdf
self.loadNc__Scientific(fpath)
else:
import ncq3
self.ncq3 = ncq3
self.loadNc__ncq(fpath)
##raise baseException( 'No supported netcdf module assigned' )
def loadNc__ncq(self,fpath):
self.nc0 = self.ncq3.open( fpath )
self.nc0.getDigest()
self.ncq3.close( self.nc0 )
self.nc = self.ncq3.Browse( self.nc0.digest )
for a in self.nc._gal:
self.ga[a.name] = a.value
for v in self.nc._vdict.keys():
thisv = self.nc._vdict[v][0]
if v not in self.nc._ddict.keys():
self.va[v] = {}
for a in self.nc._ll[thisv.id]:
self.va[v][a.name] = a.value
self.va[v]['_type'] = tstr( thisv.type )
if v in ['plev','plev_bnds','height']:
x = thisv.data
if type(x) != type([]):
x = [x]
self.va[v]['_data'] = x
else:
self.da[v] = {}
thisa = self.nc._ddict[v]
for a in self.nc._ll[thisv.id]:
self.da[v][a.name] = a.value
self.da[v]['_type'] = tstr( thisv.type )
self.da[v]['_data'] = thisv.data
def loadNc__Cdms(self,fpath):
self.nc = self.cdms2.open( fpath )
for k in self.nc.attributes.keys():
self.ga[k] = self.nc.attributes[k]
if len( self.ga[k] ) == 1:
self.ga[k] = self.ga[k][0]
## nasty fix to deal with fact that cdms2 does not read the 'id' global attribute
try:
thisid = self.nc.id
self.ga['id'] = thisid
except:
pass
for v in self.nc.variables.keys():
self.va[v] = {}
for k in self.nc.variables[v].attributes.keys():
x = self.nc.variables[v].attributes[k]
## returns a list for some scalar attributes.
if type(x) == type([]) and len(x) == 1:
x = x[0]
self.va[v][k] = x
self.va[v]['_type'] = tstr( self.nc.variables[v].dtype )
if v in ['plev','plev_bnds','height']:
x = self.nc.variables[v].getValue().tolist()
if type(x) != type([]):
x = [x]
self.va[v]['_data'] = x
### Note: returns a scalar if data has a scalar value.
## remove missing_value == None
if self.va[v].has_key( 'missing_value' ) and self.va[v]['missing_value'] == None:
self.va[v].pop( 'missing_value' )
for v in self.nc.axes.keys():
self.da[v] = {}
for k in self.nc.axes[v].attributes.keys():
self.da[v][k] = self.nc.axes[v].attributes[k]
self.da[v]['_type'] = tstr( self.nc.axes[v].getValue().dtype )
self.da[v]['_data'] = self.nc.axes[v].getValue().tolist()
self.nc.close()
###
### attributes in .__dict__ dictionary
### variables in .variables dicttionary
### dimension lengths in .dimensions
### <variable>.getValue() returns an numpy.ndarray
### data type in <variable>.getValue().dtype
### for scalar variables, <variable>.getValue().tolist() returns a scalar.
###
def loadNc__Scientific(self,fpath):
self.nc = self.ncdf.NetCDFFile( fpath, 'r' )
for k in self.nc.__dict__.keys():
self.ga[k] = self.nc.__dict__[k]
if type(self.ga[k]) not in [type('x'),type(1),type(1.)] and len(self.ga[k]) == 1:
self.ga[k] = self.ga[k][0]
for v in self.nc.variables.keys():
if v not in self.nc.dimensions.keys():
self.va[v] = {}
for k in self.nc.variables[v].__dict__.keys():
self.va[v][k] = self.nc.variables[v].__dict__[k]
self.va[v]['_type'] = tstr( self.nc.variables[v].getValue().dtype )
if v in ['plev','plev_bnds','height']:
### Note: returns a scalar if data has a scalar value.
x = self.nc.variables[v].getValue().tolist()
if type(x) != type([]):
x = [x]
self.va[v]['_data'] = x
for v in self.nc.dimensions.keys():
self.da[v] = {}
if v in self.nc.variables.keys():
for k in self.nc.variables[v].__dict__.keys():
self.da[v][k] = self.nc.variables[v].__dict__[k]
self.da[v]['_type'] = tstr( self.nc.variables[v].getValue().dtype )
self.da[v]['_data'] = self.nc.variables[v].getValue().tolist()
else:
self.da[v]['_type'] = 'index (no data variable)'
self.nc.close()
def loadNc__Netcdf4(self,fpath):
self.nc = self.netCDF4.Dataset(fpath, 'r')
for k in self.nc.ncattrs():
self.ga[k] = self.nc.getncattr(k)
if type( self.ga[k] ) in [ type([]),type(()) ]:
if len( self.ga[k] ) == 1:
self.ga[k] = self.ga[k][0]
for v in self.nc.variables.keys():
if v not in self.nc.dimensions.keys():
self.va[v] = {}
for k in self.nc.variables[v].ncattrs():
self.va[v][k] = self.nc.variables[v].getncattr(k)
try:
self.va[v]['_type'] = tstr( self.nc.variables[v].dtype )
except:
self.va[v]['_type'] = tstr( self.nc.variables[v].datatype )
if v in ['plev','plev_bnds','height']:
self.va[v]['_data'] = self.nc.variables[v][:].tolist()
if type( self.va[v]['_data'] ) != type( [] ):
self.va[v]['_data'] = [self.va[v]['_data'],]
for v in self.nc.dimensions.keys():
self.da[v] = {}
if v in self.nc.variables.keys():
for k in self.nc.variables[v].ncattrs():
self.da[v][k] = self.nc.variables[v].getncattr(k)
try:
self.da[v]['_type'] = tstr( self.nc.variables[v].dtype )
except:
self.da[v]['_type'] = tstr( self.nc.variables[v].datatype )
self.da[v]['_data'] = self.nc.variables[v][:].tolist()
if type( self.da[v]['_data'] ) != type( [] ):
self.da[v]['_data'] = [self.da[v]['_data'],]
else:
self.da[v]['_type'] = 'index (no data variable)'
self.nc.close()
def makeDummyFileImage(self):
for k in range(10):
self.ga['ga%s' % k] = str(k)
for v in [self.fparts[0],]:
self.va[v] = {}
self.va[v]['standard_name'] = 's%s' % v
self.va[v]['long_name'] = v
self.va[v]['cell_methods'] = 'time: point'
self.va[v]['units'] = '1'
self.va[v]['_type'] = 'float32'
for v in ['lat','lon','time']:
self.da[v] = {}
self.da[v]['_type'] = 'float64'
self.da[v]['_data'] = range(5)
dlist = ['lat','lon','time']
svals = lambda p,q: map( lambda y,z: self.da[y].__setitem__(p, z), dlist, q )
svals( 'standard_name', ['latitude', 'longitude','time'] )
svals( 'long_name', ['latitude', 'longitude','time'] )
svals( 'units', ['degrees_north', 'degrees_east','days since 19590101'] )
def applyMap( self, mapList, globalAttributesInFn, log=None ):
for m in mapList:
if m[0] == 'am001':
if m[1][0][0] == "@var":
if m[1][0][1] in self.va.keys():
this = self.va[m[1][0][1]]
apThis = True
for c in m[1][1:]:
if c[0] not in this.keys():
apThis = False
elif c[1] != this[c[0]]:
apThis = False
if m[2][0] != '':
targ = m[2][0]
else:
targ = m[1][-1][0]
if apThis:
if log != None:
log.info( 'Setting %s to %s' % (targ,m[2][1]) )
##print 'Setting %s:%s to %s' % (m[1][0][1],targ,m[2][1])
thisval = self.va[m[1][0][1]].get( targ, None )
self.va[m[1][0][1]][targ] = m[2][1]
self.atMapLog.write( '@var:"%s","%s","%s","%s","%s"\n' % (self.fpath, m[1][0][1], targ, thisval, m[2][1] ) )
elif m[1][0][0] == "@ax":
##print 'checking dimension ',m[1][0][1], self.da.keys()
if m[1][0][1] in self.da.keys():
##print 'checking dimension [2]',m[1][0][1]
this = self.da[m[1][0][1]]
apThis = True
for c in m[1][1:]:
if c[0] not in this.keys():
apThis = False
elif c[1] != this[c[0]]:
apThis = False
if m[2][0] != '':
targ = m[2][0]
else:
targ = m[1][-1][0]
if apThis:
if log != None:
log.info( 'Setting %s to %s' % (targ,m[2][1]) )
##print 'Setting %s:%s to %s' % (m[1][0][1],targ,m[2][1])
thisval = self.da[m[1][0][1]].get( targ, None )
self.da[m[1][0][1]][targ] = m[2][1]
self.atMapLog.write( '@ax:"%s","%s","%s","%s","%s"\n' % (self.fpath, m[1][0][1], targ, thisval, m[2][1]) )
elif m[1][0][0] == "@":
this = self.ga
apThis = True
## apply change where attribute absent only
for c in m[1][1:]:
if c[0] not in this.keys():
if c[1] != '__absent__':
apThis = False
elif c[1] == '__absent__' or c[1] != this[c[0]]:
apThis = False
if m[2][0] != '':
targ = m[2][0]
else:
targ = m[1][-1][0]
if apThis:
if log != None:
log.info( 'Setting %s to %s' % (targ,m[2][1]) )
##print 'Setting %s to %s' % (targ,m[2][1])
thisval = self.ga.get( targ, None )
self.ga[targ] = m[2][1]
self.atMapLog.write( '@:"%s","%s","%s","%s","%s"\n' % (self.fpath, 'ga', targ, thisval, m[2][1]) )
##
if targ in globalAttributesInFn:
i = globalAttributesInFn.index(targ)
thisval = self.fparts[ i ]
self.fparts[ i ] = m[2][1]
self.fn = string.join( self.fparts, '_' ) + '.nc'
self.atMapLog.write( '@fn:"%s","%s","%s"\n' % (self.fpath, thisval, m[2][1]) )
else:
print 'Token %s not recognised' % m[1][0][0]
|
martinjuckes/ceda_cc
|
ceda_cc/file_utils.py
|
Python
|
bsd-3-clause
| 12,507
|
[
"NetCDF"
] |
f48596912c63f5e233746f53094d1b43638c6b8208af3cc611e626466602d703
|
import numpy as np
"""
Read AMBER topology file
"""
class AMBERFILES_TOP:
def __init__(self, top=''):
self.topology_file = top
flag_atom_name = flag_charge = flag_mass = flag_atom_type = flag_num_excl = flag_nonbonded_parm_index = flag_residue_label = flag_residue_pointer = flag_bond_const = \
flag_bond_value = flag_angle_const = flag_angle_value = flag_dihedral_const = flag_dihedral_peri = \
flag_dihedral_phase = flag_LJ_A = flag_LJ_B = flag_bond_inc_h = flag_bond_wo_h = \
flag_angle_inc_h = flag_angle_wo_h = flag_dihedral_inc_h = flag_dihedral_wo_h = count = flag_excl_list = flag_hbond_A = flag_hbond_B = flag_atom_type_name = 0
charge_list = mass_list = bond_const_list = bond_value_list = angle_const_list = angle_value_list = dihedral_const_list = np.array([], dtype=float)
dihedral_phase_list = LJ_A_list = LJ_B_list = coor_list = excl_list = hbond_A_list = hbond_B_list = np.array([], dtype=float)
atom_type_list = num_excl_list = residue_pointer = nonbonded_parm_index_list = dihedral_peri_list = np.array([], dtype=int)
bond_inc_h_list = bond_wo_h_list = angle_inc_h_list = angle_wo_h_list = dihedral_inc_h_list = dihedral_wo_h_list = np.array([], dtype=int)
atom_name = residue_label = atom_type_name = np.array([], dtype = str)
box = angle = np.zeros(3)
for line in open(self.topology_file):
line = line.split()
if(len(line) >=1):
if(line [0] == '%FLAG' and line[1] == 'ATOM_NAME'):
flag_atom_name = 1
elif(line [0] == '%FLAG' and line[1] == 'CHARGE'):
flag_atom_name = 0
flag_charge = 1
elif(line [0] == '%FLAG' and line[1] == 'ATOMIC_NUMBER'):
flag_charge = 0
elif(line [0] == '%FLAG' and line[1] == 'MASS'):
flag_mass = 1
elif(line [0] == '%FLAG' and line[1] == 'ATOM_TYPE_INDEX'):
flag_mass = 0
flag_atom_type = 1
elif(line [0] == '%FLAG' and line[1] == 'NUMBER_EXCLUDED_ATOMS'):
flag_atom_type = 0
flag_num_excl = 1
elif(line [0] == '%FLAG' and line[1] == 'NONBONDED_PARM_INDEX'):
flag_num_excl = 0
flag_nonbonded_parm_index = 1
elif(line [0] == '%FLAG' and line[1] == 'RESIDUE_LABEL'):
flag_residue_label = 1
flag_nonbonded_parm_index = 0
elif(line [0] == '%FLAG' and line[1] == 'RESIDUE_POINTER'):
flag_residue_label = 0
flag_residue_pointer = 1
elif(line [0] == '%FLAG' and line[1] == 'BOND_FORCE_CONSTANT'):
flag_residue_pointer = 0
flag_bond_const = 1
elif(line [0] == '%FLAG' and line[1] == 'BOND_EQUIL_VALUE'):
flag_bond_const = 0
flag_bond_value = 1
elif(line [0] == '%FLAG' and line[1] == 'ANGLE_FORCE_CONSTANT'):
flag_angle_const = 1
flag_bond_value = 0
elif(line [0] == '%FLAG' and line[1] == 'ANGLE_EQUIL_VALUE'):
flag_angle_const = 0
flag_angle_value = 1
elif(line [0] == '%FLAG' and line[1] == 'DIHEDRAL_FORCE_CONSTANT'):
flag_dihedral_const = 1
flag_angle_value = 0
elif(line [0] == '%FLAG' and line[1] == 'DIHEDRAL_PERIODICITY'):
flag_dihedral_const = 0
flag_dihedral_peri = 1
elif(line [0] == '%FLAG' and line[1] == 'DIHEDRAL_PHASE'):
flag_dihedral_phase = 1
flag_dihedral_peri = 0
elif(line [0] == '%FLAG' and line[1] == 'SCEE_SCALE_FACTOR'):
flag_dihedral_phase = 0
elif(line [0] == '%FLAG' and line[1] == 'LENNARD_JONES_ACOEF'):
flag_LJ_A = 1
elif(line [0] == '%FLAG' and line[1] == 'LENNARD_JONES_BCOEF'):
flag_LJ_B = 1
flag_LJ_A = 0
elif(line [0] == '%FLAG' and line[1] == 'BONDS_INC_HYDROGEN'):
flag_LJ_B = 0
flag_bond_inc_h = 1
elif(line [0] == '%FLAG' and line[1] == 'BONDS_WITHOUT_HYDROGEN'):
flag_bond_inc_h = 0
flag_bond_wo_h = 1
elif(line [0] == '%FLAG' and line[1] == 'ANGLES_INC_HYDROGEN'):
flag_bond_wo_h = 0
flag_angle_inc_h = 1
elif(line [0] == '%FLAG' and line[1] == 'ANGLES_WITHOUT_HYDROGEN'):
flag_angle_inc_h = 0
flag_angle_wo_h = 1
elif(line [0] == '%FLAG' and line[1] == 'DIHEDRALS_INC_HYDROGEN'):
flag_angle_wo_h = 0
flag_dihedral_inc_h = 1
elif(line [0] == '%FLAG' and line[1] == 'DIHEDRALS_WITHOUT_HYDROGEN'):
flag_dihedral_inc_h = 0
flag_dihedral_wo_h = 1
elif(line [0] == '%FLAG' and line[1] == 'EXCLUDED_ATOMS_LIST'):
flag_dihedral_wo_h = 0
flag_excl_list = 1
elif(line [0] == '%FLAG' and line[1] == 'HBOND_ACOEF'):
flag_excl_list = 0
flag_hbond_A = 1
elif(line [0] == '%FLAG' and line[1] == 'HBOND_BCOEF'):
flag_hbond_A = 0
flag_hbond_B = 1
elif(line [0] == '%FLAG' and line[1] == 'HBCUT'):
flag_hbond_B = 0
elif(line [0] == '%FLAG' and line[1] == 'AMBER_ATOM_TYPE'):
flag_atom_type_name = 1
elif(line [0] == '%FLAG' and line[1] == 'TREE_CHAIN_CLASSIFICATION'):
flag_atom_type_name = 0
else:
a=0
if(flag_atom_name == 1 and len(line[0]) >= 1 and not 'FORMAT' in line[0] and not 'FLAG' in line[0]):
for i in range(len(line)):
line[i] = str(line[i])
atom_name = np.append(atom_name, line)
elif(flag_charge == 1 and len(line[0]) >= 14 and not 'FORMAT' in line[0]):
for i in range(len(line)):
line[i] = float(line[i])
charge_list = np.append(charge_list, line)
elif(flag_mass == 1 and len(line[0]) >= 14 and not 'FORMAT' in line[0]):
for i in range(len(line)):
line[i] = float(line[i])
mass_list = np.append(mass_list, line)
elif(flag_atom_type == 1 and len(line[0]) >= 1 and not 'FORMAT' in line[0] and not 'FLAG' in line[0]):
for i in range(len(line)):
line[i] = int(line[i])
atom_type_list = np.append(atom_type_list, line)
elif(flag_num_excl == 1 and len(line[0]) >= 1 and not 'FORMAT' in line[0] and not 'FLAG' in line[0]):
for i in range(len(line)):
line[i] = int(line[i])
num_excl_list = np.append(num_excl_list, line)
elif(flag_nonbonded_parm_index == 1 and len(line[0]) >= 1 and not 'FORMAT' in line[0] and not 'FLAG' in line[0]):
for i in range(len(line)):
line[i] = int(line[i])
nonbonded_parm_index_list = np.append(nonbonded_parm_index_list, line)
elif(flag_residue_label == 1 and len(line[0]) >= 1 and not 'FORMAT' in line[0] and not 'FLAG' in line[0]):
for i in range(len(line)):
line[i] = str(line[i])
residue_label = np.append(residue_label, line)
elif(flag_residue_pointer == 1 and len(line[0]) >= 1 and not 'FORMAT' in line[0] and not 'FLAG' in line[0]):
for i in range(len(line)):
line[i] = int(line[i])
residue_pointer = np.append(residue_pointer, line)
elif(flag_bond_const == 1 and len(line[0]) >= 14 and not 'FORMAT' in line[0]):
for i in range(len(line)):
line[i] = float(line[i])
bond_const_list = np.append(bond_const_list, line)
elif(flag_bond_value == 1 and len(line[0]) >= 14 and not 'FORMAT' in line[0]):
for i in range(len(line)):
line[i] = float(line[i])
bond_value_list = np.append(bond_value_list, line)
elif(flag_angle_const == 1 and len(line[0]) >= 14 and not 'FORMAT' in line[0]):
for i in range(len(line)):
line[i] = float(line[i])
angle_const_list = np.append(angle_const_list, line)
elif(flag_angle_value == 1 and len(line[0]) >= 14 and not 'FORMAT' in line[0]):
for i in range(len(line)):
line[i] = float(line[i])
angle_value_list = np.append(angle_value_list, line)
elif(flag_dihedral_const == 1 and len(line[0]) >= 14 and not 'FORMAT' in line[0]):
for i in range(len(line)):
line[i] = float(line[i])
dihedral_const_list = np.append(dihedral_const_list, line)
elif(flag_dihedral_peri == 1 and len(line[0]) >= 14 and not 'FORMAT' in line[0]):
for i in range(len(line)):
line[i] = float(line[i])
dihedral_peri_list = np.append(dihedral_peri_list, line)
elif(flag_dihedral_phase == 1 and len(line[0]) >= 14 and not 'FORMAT' in line[0]):
for i in range(len(line)):
line[i] = float(line[i])
dihedral_phase_list = np.append(dihedral_phase_list, line)
elif(flag_LJ_A == 1 and len(line[0]) >= 14 and not 'FORMAT' in line[0]):
for i in range(len(line)):
line[i] = float(line[i])
LJ_A_list = np.append(LJ_A_list, line)
elif(flag_LJ_B == 1 and len(line[0]) >= 14 and not 'FORMAT' in line[0]):
for i in range(len(line)):
line[i] = float(line[i])
LJ_B_list = np.append(LJ_B_list, line)
elif(flag_bond_inc_h == 1 and len(line) >= 1 and not 'FORMAT' in line[0] and not 'FLAG' in line[0]):
for i in range(len(line)):
line[i] = int(line[i])
bond_inc_h_list = np.append(bond_inc_h_list, line)
elif(flag_bond_wo_h == 1 and len(line) >= 1 and not 'FORMAT' in line[0] and not 'FLAG' in line[0]):
for i in range(len(line)):
line[i] = int(line[i])
bond_wo_h_list = np.append(bond_wo_h_list, line)
elif(flag_angle_inc_h == 1 and len(line) >= 1 and not 'FORMAT' in line[0] and not 'FLAG' in line[0]):
for i in range(len(line)):
line[i] = int(line[i])
angle_inc_h_list = np.append(angle_inc_h_list, line)
elif(flag_angle_wo_h == 1 and len(line) >= 1 and not 'FORMAT' in line[0] and not 'FLAG' in line[0]):
for i in range(len(line)):
line[i] = int(line[i])
angle_wo_h_list = np.append(angle_wo_h_list, line)
elif(flag_dihedral_inc_h == 1 and len(line) >= 1 and not 'FORMAT' in line[0] and not 'FLAG' in line[0]):
for i in range(len(line)):
line[i] = int(line[i])
dihedral_inc_h_list = np.append(dihedral_inc_h_list, line)
elif(flag_dihedral_wo_h == 1 and len(line) >= 1 and not 'FORMAT' in line[0] and not 'FLAG' in line[0]):
for i in range(len(line)):
line[i] = int(line[i])
dihedral_wo_h_list = np.append(dihedral_wo_h_list, line)
elif(flag_excl_list == 1 and len(line) >=1 and len(line[0]) >= 1 and not 'FORMAT' in line[0] and not 'FLAG' in line[0]):
for i in range(len(line)):
line[i] = int(line[i])
excl_list = np.append(excl_list, line)
elif(flag_hbond_A == 1 and len(line) >=1):
if(flag_hbond_A == 1 and len(line[0]) >= 14 and not 'FORMAT' in line[0]):
for i in range(len(line)):
line[i] = int(line[i])
hbond_A_list = np.append(hbond_A_list, line)
elif(flag_hbond_B == 1 and len(line) >=1):
if(flag_hbond_B == 1 and len(line[0]) >= 14 and not 'FORMAT' in line[0]):
for i in range(len(line)):
line[i] = int(line[i])
hbond_B_list = np.append(hbond_B_list, line)
elif(flag_atom_type_name == 1 and len(line[0]) >= 1 and not 'FORMAT' in line[0] and not 'FLAG' in line[0]):
for i in range(len(line)):
line[i] = str(line[i])
atom_type_name = np.append(atom_type_name, line)
self._atom_name = atom_name
self._charge_list = charge_list
self._mass_list = mass_list
self._atom_type_list = atom_type_list
self._num_excl_list = num_excl_list
self._residue_label = residue_label
self._residue_pointer = residue_pointer
self._nonbonded_parm_index_list = nonbonded_parm_index_list
self._bond_const_list = bond_const_list
self._bond_value_list = bond_value_list
self._angle_const_list = angle_const_list
self._angle_value_list = angle_value_list
self._dihedral_const_list = dihedral_const_list
self._dihedral_peri_list = dihedral_peri_list
self._dihedral_phase_list = dihedral_phase_list
self._LJ_A_list = LJ_A_list
self._LJ_B_list = LJ_B_list
self._bond_inc_h_list = bond_inc_h_list
self._bond_wo_h_list = bond_wo_h_list
self._angle_inc_h_list = angle_inc_h_list
self._angle_wo_h_list = angle_wo_h_list
self._dihedral_inc_h_list = dihedral_inc_h_list
self._dihedral_wo_h_list = dihedral_wo_h_list
self._excl_list = excl_list
self._hbond_A_list = hbond_A_list
self._hbond_B_list = hbond_B_list
self._atom_type_name = atom_type_name
class resinfo(AMBERFILES_TOP):
def resid(self, resid):
self.resid = resid
return self.resid
def resname(self, resid):
self.resname = self._residue_label[resid-1]
return self.resname
def atoms(self, resid):
self.atoms = self._atom_name[self._residue_pointer[resid-1]-1:self._residue_pointer[resid]-1]
return self.atoms
def atoms_type(self, resid):
self.atoms_type = self._atom_type_name[self._residue_pointer[resid-1]-1:self._residue_pointer[resid]-1]
return self.atoms_type
def charges(self, resid):
self.charges = self._charge_list[self._residue_pointer[resid-1]-1:self._residue_pointer[resid]-1]/18.2223
return self.charges
def bondterms(self, resid):
begin = self._residue_pointer[resid-1]
end = self._residue_pointer[resid]
self.bondterms = np.array([])
for i in range(0, len(self._bond_inc_h_list), 3):
atom_a_id = self._bond_inc_h_list[i] / 3 + 1
atom_b_id = self._bond_inc_h_list[i+1] / 3 + 1
bond_id = self._bond_inc_h_list[i+2]
if atom_a_id >= begin and atom_a_id < end and atom_b_id >= begin:
self.bondterms = np.append(self.bondterms, str(self._atom_name[atom_a_id-1]) + '(' + str(atom_a_id) + ')-' + str(self._atom_name[atom_b_id-1]) + '(' + str(atom_b_id) + ') ' + str(self._bond_const_list[bond_id-1]) + " " + str(self._bond_value_list[bond_id-1]))
for i in range(0, len(self._bond_wo_h_list), 3):
atom_a_id = self._bond_wo_h_list[i] / 3 + 1
atom_b_id = self._bond_wo_h_list[i+1] / 3 + 1
bond_id = self._bond_wo_h_list[i+2]
if atom_a_id >= begin and atom_a_id < end and atom_b_id >= begin:
self.bondterms = np.append(self.bondterms, str(self._atom_name[atom_a_id-1]) + '(' + str(atom_a_id) + ')-' + str(self._atom_name[atom_b_id-1]) + '(' + str(atom_b_id) +') ' + str(self._bond_const_list[bond_id-1]) + " " + str(self._bond_value_list[bond_id-1]))
return self.bondterms
def angleterms(self, resid):
begin = self._residue_pointer[resid-1]
end = self._residue_pointer[resid]
self.angleterms = np.array([])
for i in range(0, len(self._angle_inc_h_list), 4):
atom_a_id = self._angle_inc_h_list[i] / 3 + 1
atom_b_id = self._angle_inc_h_list[i+1] / 3 + 1
atom_c_id = self._angle_inc_h_list[i+2] / 3 + 1
angle_id = self._angle_inc_h_list[i+3]
if atom_a_id >= begin and atom_a_id < end:
self.angleterms = np.append(self.angleterms, str(self._atom_name[atom_a_id-1]) + '(' + str(atom_a_id) + ')-' + str(self._atom_name[atom_b_id-1]) + '(' + str(atom_b_id) + ')-' + str(self._atom_name[atom_c_id-1]) + '(' + str(atom_c_id) + ') ' + str(self._angle_const_list[angle_id-1]) + " " + str(self._angle_value_list[angle_id-1]))
for i in range(0, len(self._angle_wo_h_list), 4):
atom_a_id = self._angle_wo_h_list[i] / 3 + 1
atom_b_id = self._angle_wo_h_list[i+1] / 3 + 1
atom_c_id = self._angle_wo_h_list[i+2] / 3 + 1
angle_id = self._angle_wo_h_list[i+3]
if atom_a_id >= begin and atom_a_id < end:
self.angleterms = np.append(self.angleterms, str(self._atom_name[atom_a_id-1]) + '(' + str(atom_a_id) + ')-' + str(self._atom_name[atom_b_id-1]) + '(' + str(atom_b_id) + ')-' + str(self._atom_name[atom_c_id-1]) + '(' + str(atom_c_id) + ') ' + str(self._angle_const_list[angle_id-1]) + " " + str(self._angle_value_list[angle_id-1]))
return self.angleterms
def dihedralterms(self, resid):
begin = self._residue_pointer[resid-1]
end = self._residue_pointer[resid]
self.dihedralterms = np.array([])
for i in range(0, len(self._dihedral_inc_h_list), 5):
atom_a_id = self._dihedral_inc_h_list[i] / 3 + 1
atom_b_id = self._dihedral_inc_h_list[i+1] / 3 + 1
atom_c_id = self._dihedral_inc_h_list[i+2] / 3 + 1
atom_d_id = self._dihedral_inc_h_list[i+3] / 3 + 1
dihedral_id = self._dihedral_inc_h_list[i+4]
if atom_a_id >= begin and atom_a_id < end:
self.dihedralterms = np.append(self.dihedralterms, str(self._atom_name[atom_a_id-1]) + '(' + str(atom_a_id) + ')-' + str(self._atom_name[atom_b_id-1]) + '(' + str(atom_b_id) + ')-' + str(self._atom_name[atom_c_id-1]) + '(' + str(atom_c_id) + ')-' + str(self._atom_name[atom_d_id-1]) + '(' + str(atom_d_id) + ') ' + str(self._dihedral_const_list[dihedral_id-1]) + " " + str(self._dihedral_phase_list[dihedral_id-1]) + " " + str(self._dihedral_peri_list[dihedral_id-1]))
for i in range(0, len(self._dihedral_wo_h_list), 5):
atom_a_id = self._dihedral_wo_h_list[i] / 3 + 1
atom_b_id = self._dihedral_wo_h_list[i+1] / 3 + 1
atom_c_id = self._dihedral_wo_h_list[i+2] / 3 + 1
atom_d_id = self._dihedral_wo_h_list[i+3] / 3 + 1
dihedral_id = self._dihedral_wo_h_list[i+4]
if atom_a_id >= begin and atom_a_id < end:
self.dihedralterms = np.append(self.dihedralterms, str(self._atom_name[atom_a_id-1]) + '(' + str(atom_a_id) + ')-' + str(self._atom_name[atom_b_id-1]) + '(' + str(atom_b_id) + ')-' + str(self._atom_name[atom_c_id-1]) + '(' + str(atom_c_id) + ')-' + str(self._atom_name[atom_d_id-1]) + '(' + str(atom_d_id) + ') ' + str(self._dihedral_const_list[dihedral_id-1]) + " " + str(self._dihedral_phase_list[dihedral_id-1]) + " " + str(self._dihedral_peri_list[dihedral_id-1]))
return self.dihedralterms
def summary(self, resid):
print "==================================="
print "Summary of residue " + str(resid)
print "-----------------------------------"
print "Residue Name: " + str(self._residue_label[resid-1])
print "Net Charge: " + str(sum(self._charge_list[self._residue_pointer[resid-1]-1:self._residue_pointer[resid]-1]/18.2223))
print "Number of Atom(s): " + str(len(self._atom_name[self._residue_pointer[resid-1]-1:self._residue_pointer[resid]-1]))
print "Atom(s): " + str(self._atom_name[self._residue_pointer[resid-1]-1:self._residue_pointer[resid]-1])
print "Atom type: " + str(self._atom_type_name[self._residue_pointer[resid-1]-1:self._residue_pointer[resid]-1])
print "Partial Charges: " + str(self._charge_list[self._residue_pointer[resid-1]-1:self._residue_pointer[resid]-1]/18.2223)
|
CTEricLai/amberfiles
|
MD_IO.py
|
Python
|
gpl-3.0
| 21,744
|
[
"Amber"
] |
651159d6e3afe9e407087cef59d23b40643f8f0ea3e813a494a8b3c5eb9cd381
|
import tomviz.operators
from vtk import vtkImageData, VTK_DOUBLE
class TestOperator(tomviz.operators.Operator):
def transform_scalars(self, data):
image_data = vtkImageData()
image_data.SetDimensions(3, 4, 5)
image_data.AllocateScalars(VTK_DOUBLE, 1)
dims = image_data.GetDimensions()
for z in range(dims[2]):
for y in range(dims[1]):
for x in range(dims[0]):
image_data.SetScalarComponentFromDouble(x, y, z, 0, 2.0)
self.progress.data = image_data
|
mathturtle/tomviz
|
tests/cxx/fixtures/update_data.py
|
Python
|
bsd-3-clause
| 553
|
[
"VTK"
] |
99c33b4f34baf2d8f0c5307568c81ac1d6039a3bb6b15801862f02e423860573
|
import math
from chainer.functions.activation import softplus
from chainer.functions.math import exponential
from chainer.functions.math import sum
from chainer import variable
def gaussian_kl_divergence(mean, ln_var):
"""Computes the KL-divergence of Gaussian variables from the standard one.
Given two variable ``mean`` representing :math:`\\mu` and ``ln_var``
representing :math:`\\log(\\sigma^2)`, this function returns a variable
representing the KL-divergence between the given multi-dimensional Gaussian
:math:`N(\\mu, S)` and the standard Gaussian :math:`N(0, I)`
.. math::
D_{\\mathbf{KL}}(N(\\mu, S) \\| N(0, I)),
where :math:`S` is a diagonal matrix such that :math:`S_{ii} = \\sigma_i^2`
and :math:`I` is an identity matrix.
Args:
mean (~chainer.Variable): A variable representing mean of given
gaussian distribution, :math:`\\mu`.
ln_var (~chainer.Variable): A variable representing logarithm of
variance of given gaussian distribution, :math:`\\log(\\sigma^2)`.
Returns:
~chainer.Variable: A variable representing KL-divergence between
given gaussian distribution and the standard gaussian.
"""
assert isinstance(mean, variable.Variable)
assert isinstance(ln_var, variable.Variable)
J = mean.data.size
var = exponential.exp(ln_var)
return (sum.sum(mean * mean) + sum.sum(var) - sum.sum(ln_var) - J) * 0.5
def bernoulli_nll(x, y):
"""Computes the negative log-likelihood of a Bernoulli distribution.
This function calculates the negative log-likelihood of a Bernoulli
distribution.
.. math::
-B(x; p) = -\\sum_i {x_i \\log(p_i) + (1 - x_i)\\log(1 - p_i)},
where :math:`p = \\sigma(y)`, and :math:`\\sigma(\\cdot)` is a sigmoid
function.
.. note::
As this function uses a sigmoid function, you can pass a result of
fully-connected layer (that means :class:`Linear`) to this function
directly.
Args:
x (~chainer.Variable): Input variable.
y (~chainer.Variable): A variable representing the parameter of
Bernoulli distribution.
Returns:
~chainer.Variable: A variable representing negative log-likelihood.
"""
assert isinstance(x, variable.Variable)
assert isinstance(y, variable.Variable)
return sum.sum(softplus.softplus(y)) - sum.sum(x * y)
def gaussian_nll(x, mean, ln_var):
"""Computes the negative log-likelihood of a Gaussian distribution.
Given two variable ``mean`` representing :math:`\\mu` and ``ln_var``
representing :math:`\\log(\\sigma^2)`, this function returns the negative
log-likelihood of :math:`x` on a Gaussian distribution :math:`N(\\mu, S)`,
.. math::
-\\log N(x; \\mu, \\sigma^2) =
\\log\\left(\\sqrt{(2\\pi)^D |S|}\\right) +
\\frac{1}{2}(x - \\mu)^\\top S^{-1}(x - \\mu),
where :math:`D` is a dimension of :math:`x` and :math:`S` is a diagonal
matrix where :math:`S_{ii} = \\sigma_i^2`.
Args:
x (~chainer.Variable): Input variable.
mean (~chainer.Variable): A variable representing mean of a Gaussian
distribution, :math:`\\mu`.
ln_var (~chainer.Variable): A variable representing logarithm of
variance of a Gaussian distribution, :math:`\\log(\\sigma^2)`.
Returns:
~chainer.Variable: A variable representing the negative log-likelihood.
"""
assert isinstance(x, variable.Variable)
assert isinstance(mean, variable.Variable)
assert isinstance(ln_var, variable.Variable)
D = x.data.size
x_prec = exponential.exp(-ln_var)
x_diff = x - mean
x_power = (x_diff * x_diff) * x_prec * -0.5
return (sum.sum(ln_var) + D * math.log(2 * math.pi)) / 2 - sum.sum(x_power)
|
AlpacaDB/chainer
|
chainer/functions/loss/vae.py
|
Python
|
mit
| 3,825
|
[
"Gaussian"
] |
18c1f38edb66ef5e79b1c69281f48c9237ad354eac6f7726c5a2675fbc01f88b
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import subprocess
import os
import vtk
from PyQt5 import QtCore, QtWidgets
from .ExodusPlugin import ExodusPlugin
from .VTKWindowPlugin import VTKWindowPlugin
import mooseutils
class ExternalVTKWindowPlugin(VTKWindowPlugin):
"""
VTK window for external gold/diff use, it handles storing size and de-selecting main window check boxes.
"""
def __init__(self, toggle, size=None, text=None):
super(ExternalVTKWindowPlugin, self).__init__(size=size)
self.setWindowFlags(QtCore.Qt.SubWindow | QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint | \
QtCore.Qt.WindowMinMaxButtonsHint | QtCore.Qt.WindowCloseButtonHint)
self._widget_size = None
# The toggle button that controls the window
self._toggle = toggle
# Add text annotation
self._text = None
if text:
self.setWindowTitle(text)
def onJobStart(*args):
"""
Ignores the job start time.
"""
pass
def sizeHint(self, *args):
"""
Return the saved size.
"""
if self._widget_size:
return self._widget_size
else:
return super(ExternalVTKWindowPlugin, self).size()
def closeEvent(self, *args):
"""
Store the size of the window.
"""
self._widget_size = self.size()
self._toggle.setCheckState(QtCore.Qt.Unchecked)
self._toggle.clicked.emit(False)
class GoldDiffPlugin(QtWidgets.QGroupBox, ExodusPlugin):
"""
Plugin for toggling the Gold/Diff VTK windows.
"""
windowRequiresUpdate = QtCore.pyqtSignal()
cameraChanged = QtCore.pyqtSignal(tuple, tuple, tuple)
def __init__(self, size=None):
super(GoldDiffPlugin, self).__init__()
self.MainLayout = QtWidgets.QHBoxLayout(self)
self.GoldToggle = QtWidgets.QCheckBox("Gold")
self.DiffToggle = QtWidgets.QCheckBox("Exodiff")
self.LinkToggle = QtWidgets.QCheckBox("Link Camera(s)")
self.MainLayout.addWidget(self.GoldToggle)
self.MainLayout.addWidget(self.DiffToggle)
self.MainLayout.addWidget(self.LinkToggle)
self.GoldVTKWindow = ExternalVTKWindowPlugin(self.GoldToggle, size=size, text='GOLD')
self.DiffVTKWindow = None#ExternalVTKWindowPlugin(self.DiffToggle, size=size)
# Locate MOOSE exodiff program
self._exodiff = None
moose_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..'))
exodiff = os.path.join(os.getenv('MOOSE_DIR', moose_dir), 'framework', 'contrib', 'exodiff', 'exodiff')
if os.path.isfile(exodiff):
self.DiffVTKWindow = ExternalVTKWindowPlugin(self.DiffToggle, size=size, text='EXODIFF')
self._exodiff = exodiff
self.setup()
self._gold_observer = None
self._diff_observer = None
self._main_observer = None
def _loadPlugin(self):
"""
Loads plugin state.
"""
self.load(self.GoldToggle)
self.load(self.DiffToggle)
self.load(self.LinkToggle)
def onSetVariable(self, *args):
"""
Update variable for open Gold/Diff windows.
"""
super(GoldDiffPlugin, self).onSetVariable(*args)
if self.hasGoldWindow():
self.GoldVTKWindow.onSetVariable(*args)
if self.hasDiffWindow():
self.DiffVTKWindow.onSetVariable(*args)
def onSetComponent(self, *args):
"""
Update component for open Gold/Diff windows.
"""
super(GoldDiffPlugin, self).onSetComponent(*args)
if self.hasGoldWindow():
self.GoldVTKWindow.onSetComponent(*args)
if self.hasDiffWindow():
self.DiffVTKWindow.onSetComponent(*args)
def onReaderOptionsChanged(self, options):
"""
Pass on the reader options to the gold/diff window(s).
"""
self.updateOptions()
if self.hasGoldWindow():
self.GoldVTKWindow.onReaderOptionsChanged(options)
if self.hasDiffWindow():
self.DiffVTKWindow.onReaderOptionsChanged(options)
def onResultOptionsChanged(self, options):
"""
Pass on the result options to the gold/diff window(s).
"""
self.updateOptions()
if self.hasGoldWindow():
self.GoldVTKWindow.onResultOptionsChanged(options)
if self.hasDiffWindow():
self.DiffVTKWindow.onResultOptionsChanged(options)
def onWindowOptionsChanged(self, options):
"""
Pass on the window options to the gold/diff window(s).
"""
self.updateOptions()
if self.hasGoldWindow():
self.GoldVTKWindow.onWindowOptionsChanged(options)
if self.hasDiffWindow():
self.DiffVTKWindow.onWindowOptionsChanged(options)
def onCameraChanged(self, *args):
"""
Slot for when camera is changed.
"""
link = self.LinkToggle.isChecked()
if link and self.hasGoldWindow():
self.GoldVTKWindow.onCameraChanged(*args)
if link and self.hasDiffWindow():
self.DiffVTKWindow.onCameraChanged(*args)
def hasGoldWindow(self):
"""
Return True if the Gold window is open.
"""
return self.GoldToggle.isChecked() and self.GoldVTKWindow.isVisible()
def hasDiffWindow(self):
"""
Return True if the Diff window is open.
"""
diff = self.DiffToggle.isChecked() if self._exodiff else False
return diff and self.DiffVTKWindow.isVisible()
def updateOptions(self):
"""
Control the Gold/Diff VTK windows.
"""
value = mooseutils.gold(self._filename) is not None
self.setVisible(value)
self.setEnabled(value)
if not value:
self.GoldToggle.setChecked(False)
self.DiffToggle.setChecked(False)
# Gold window toggle
gold = self.GoldToggle.isChecked() if self.GoldVTKWindow else False
goldname = mooseutils.gold(self._filename)
if gold and (not self.GoldVTKWindow.isVisible()):
self.GoldVTKWindow.show()
self.GoldVTKWindow.onSetFilename(goldname)
self.GoldVTKWindow.onSetVariable(self._variable)
self.GoldVTKWindow.onSetComponent(self._component)
self.GoldVTKWindow.onWindowRequiresUpdate()
elif (not gold) and self.GoldVTKWindow and self.GoldVTKWindow.isVisible():
self.GoldVTKWindow.hide()
# Diff Window toggle
diff = self.DiffToggle.isChecked() if self._exodiff else False
if diff and (not self.DiffVTKWindow.isVisible()):
diffname = self._filename + '.diff'
cmd = [self._exodiff, '-map', '-F', '1e-10', '-t', '5.5e-06',
os.path.abspath(self._filename),
os.path.abspath(goldname),
os.path.abspath(diffname)]
subprocess.call(cmd)
self.DiffVTKWindow.show()
self.DiffVTKWindow.onSetFilename(diffname)
self.DiffVTKWindow.onSetVariable(self._variable)
self.DiffVTKWindow.onSetComponent(self._component)
self.DiffVTKWindow.onWindowRequiresUpdate()
elif (not diff) and (self.DiffVTKWindow is not None) and self.DiffVTKWindow.isVisible():
self.DiffVTKWindow.hide()
# Camera linkage
link = self.LinkToggle.isChecked()
if link:
if gold and (self._gold_observer is None):
self._gold_observer = self.GoldVTKWindow._window.getVTKInteractor().AddObserver("RenderEvent", self._callbackGoldRenderEvent)
if diff and (self._diff_observer is None):
self._diff_observer = self.DiffVTKWindow._window.getVTKInteractor().AddObserver("RenderEvent", self._callbackDiffRenderEvent)
else:
if self._gold_observer is not None:
self.GoldVTKWindow._window.getVTKInteractor().RemoveObserver(self._gold_observer)
self._gold_observer = None
if self._diff_observer is not None:
self.DiffVTKWindow._window.getVTKInteractor().RemoveObserver(self._diff_observer)
self._diff_observer = None
def _setupGoldToggle(self, qobject):
"""
The setup method for GoldToggle widget.
Args:
qobject: The widget being setup.
"""
qobject.clicked.connect(self._callbackGoldToggle)
def _callbackGoldToggle(self, value):
"""
Callback for GoldToggle widget.
Args:
value[bool]: True/False indicating the toggle state of the widget.
"""
self.store(self.GoldToggle)#, key=(self._filename, None, None))
self.updateOptions()
self.windowRequiresUpdate.emit()
def _setupDiffToggle(self, qobject):
"""
The setup method for DiffToggle widget.
Args:
qobject: The widget being setup.
"""
self.DiffToggle.setEnabled(bool(self._exodiff))
if self._exodiff:
qobject.clicked.connect(self._callbackDiffToggle)
def _callbackDiffToggle(self, value):
"""
Callback for DiffToggle widget.
Args:
value[bool]: True/False indicating the toggle state of the widget.
"""
self.store(self.DiffToggle)#, key=(self._filename, None, None))
self.updateOptions()
self.windowRequiresUpdate.emit()
def _setupLinkToggle(self, qobject):
"""
Setup the camera link toggling.
"""
qobject.setCheckState(QtCore.Qt.Checked)
qobject.clicked.connect(self._callbackLinkToggle)
def _callbackLinkToggle(self, value):
"""
Connect/disconnect the cameras between windows.
NOTE: This doesn't get called (b/c the button is disabled) if VTKWindowPlugin does not exist
on the plugin manager, see initialization
"""
self.store(self.LinkToggle)#, key=(self._filename, None, None))
self.updateOptions()
self.windowRequiresUpdate.emit()
def _callbackGoldRenderEvent(self, *args):
"""
Called when the gold window RenderEvent occurs.
"""
camera = self.GoldVTKWindow._result.getVTKRenderer().GetActiveCamera()
view, position, focal = camera.GetViewUp(), camera.GetPosition(), camera.GetFocalPoint()
self.cameraChanged.emit(view, position, focal)
if self._exodiff and self.DiffVTKWindow.isVisible():
self.DiffVTKWindow.onCameraChanged(view, position, focal)
def _callbackDiffRenderEvent(self, *args):
"""
Called when the diff window RenderEvent occurs.
"""
camera = self.DiffVTKWindow._result.getVTKRenderer().GetActiveCamera()
view, position, focal = camera.GetViewUp(), camera.GetPosition(), camera.GetFocalPoint()
self.cameraChanged.emit(view, position, focal)
if self.GoldVTKWindow.isVisible():
self.GoldVTKWindow.onCameraChanged(view, position, focal)
def main(size=None):
"""
Run the VTKFilePlugin all by its lonesome.
"""
from ..ExodusPluginManager import ExodusPluginManager
from .VTKWindowPlugin import VTKWindowPlugin
from .FilePlugin import FilePlugin
widget = ExodusPluginManager(plugins=[lambda: VTKWindowPlugin(size=size), FilePlugin, lambda: GoldDiffPlugin(size=size)])
widget.show()
return widget, widget.VTKWindowPlugin
if __name__ == '__main__':
import sys
from peacock.utils import Testing
app = QtWidgets.QApplication(sys.argv)
filenames = Testing.get_chigger_input_list('mug_blocks_out.e', 'vector_out.e', 'displace.e')
widget, window = main()
widget.FilePlugin.onSetFilenames(filenames)
sys.exit(app.exec_())
|
nuclear-wizard/moose
|
python/peacock/ExodusViewer/plugins/GoldDiffPlugin.py
|
Python
|
lgpl-2.1
| 12,189
|
[
"MOOSE",
"VTK"
] |
1c82b21ca059c2554fdb0c91c04ef3063d3f58816d9dd1daed38adfc6e223d9e
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import random
import numpy
from torch.utils.data import TensorDataset, DataLoader
import numpy as np
def loader_to_creator(loader):
# Warning, this data creator will not respect the batch_size changing.
def data_creator(config, batch_size):
return loader
return data_creator
def np_to_creator(data):
def data_creator(config, batch_size):
return DataLoader(TensorDataset(torch.from_numpy(data[0]).float(),
torch.from_numpy(data[1]).float()),
batch_size=batch_size,
shuffle=True)
return data_creator
def set_pytorch_seed(seed):
if seed is not None and isinstance(seed, int):
torch.manual_seed(seed)
numpy.random.seed(seed)
random.seed(seed)
def xshard_to_np(shard, mode="fit", expand_dim=None):
if mode == "fit":
data_local = shard.collect()
return (np.concatenate([data_local[i]['x'] for i
in range(len(data_local))], axis=0),
np.concatenate([data_local[i]['y'] for i
in range(len(data_local))], axis=0))
if mode == "predict":
data_local = shard.collect()
return np.concatenate([data_local[i]['x'] for i
in range(len(data_local))], axis=0)
if mode == "yhat":
yhat = shard.collect()
yhat = np.concatenate([yhat[i]['prediction'] for i in range(len(yhat))], axis=0)
if len(expand_dim) >= 1:
yhat = np.expand_dims(yhat, axis=expand_dim)
return yhat
def np_to_xshard(x, prefix="x"):
from bigdl.orca.data import XShards
x = XShards.partition(x)
def transform_to_dict(train_data):
return {prefix: train_data}
return x.transform_shard(transform_to_dict)
def check_data(x, y, data_config):
assert data_config["past_seq_len"] == x.shape[-2], \
"The x shape should be (batch_size, past_seq_len, input_feature_num), "\
"Got past_seq_len of {} in config while x input shape of {}."\
.format(data_config["past_seq_len"], x.shape[-2])
assert data_config["future_seq_len"] == y.shape[-2], \
"The y shape should be (batch_size, future_seq_len, output_feature_num), "\
"Got future_seq_len of {} in config while y input shape of {}."\
.format(data_config["future_seq_len"], y.shape[-2])
assert data_config["input_feature_num"] == x.shape[-1],\
"The x shape should be (batch_size, past_seq_len, input_feature_num), "\
"Got input_feature_num of {} in config while x input shape of {}."\
.format(data_config["input_feature_num"], x.shape[-1])
assert data_config["output_feature_num"] == y.shape[-1], \
"The y shape should be (batch_size, future_seq_len, output_feature_num), "\
"Got output_feature_num of {} in config while y input shape of {}."\
.format(data_config["output_feature_num"], y.shape[-1])
|
intel-analytics/BigDL
|
python/chronos/src/bigdl/chronos/forecaster/utils.py
|
Python
|
apache-2.0
| 3,604
|
[
"ORCA"
] |
e596070464bcba6815c555ee26b2c7e14d88f120e98e3e507afe90b0f72e6c79
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class servicecontrolCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'allocate_quota': ('service_name', 'allocate_operation', 'service_config_id', ),
'check': ('service_name', 'operation', 'service_config_id', ),
'report': ('service_name', 'operations', 'service_config_id', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=servicecontrolCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the servicecontrol client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
googleapis/python-service-control
|
scripts/fixup_servicecontrol_v1_keywords.py
|
Python
|
apache-2.0
| 6,155
|
[
"VisIt"
] |
60d40f2d852da890de34a202482029e83ccdbeda70ef4faa2d69b2dc539e167e
|
#!/usr/bin/env python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import sys
root = os.path.dirname(os.path.realpath(__file__))
# => Driver Code <= #
if __name__ == '__main__':
# > Working Dirname < #
if len(sys.argv) == 1:
dirname = '.'
elif len(sys.argv) == 2:
dirname = sys.argv[1]
else:
raise Exception('Usage: fsapt.py [dirname]')
# > Copy Files < #
os.system('cp %s/pymol/*pymol %s' % (root, dirname))
|
psi4/psi4
|
psi4/share/psi4/fsapt/copy_pymol.py
|
Python
|
lgpl-3.0
| 1,344
|
[
"Psi4",
"PyMOL"
] |
abf472fcec50500824b33e323e53debe10c90c8d24d5b8a961a3123eeacd3650
|
"Yang/Wu's OEP implementation, in PyQuante."
from math import sqrt
from PyQuante.NumWrap import zeros,matrixmultiply,transpose,dot,identity,\
array,solve
from PyQuante.Ints import getbasis, getints, getJ,get2JmK,getK
from PyQuante.LA2 import geigh,mkdens,trace2,simx
from PyQuante.hartree_fock import get_fock
from PyQuante.CGBF import three_center
from PyQuante.optimize import fminBFGS
from PyQuante.fermi_dirac import get_efermi, get_fermi_occs,mkdens_occs,\
get_entropy,mkdens_fermi
import logging
logger = logging.getLogger("pyquante")
gradcall=0
class EXXSolver:
"EXXSolver(solver)"
def __init__(self,solver):
# Solver is a pointer to a HF or a DFT calculation that has
# already converged
self.solver = solver
self.bfs = self.solver.bfs
self.nbf = len(self.bfs)
self.S = self.solver.S
self.h = self.solver.h
self.Ints = self.solver.Ints
self.molecule = self.solver.molecule
self.nel = self.molecule.get_nel()
self.nclosed, self.nopen = self.molecule.get_closedopen()
self.Enuke = self.molecule.get_enuke()
self.norb = self.nbf
self.orbs = self.solver.orbs
self.orbe = self.solver.orbe
self.Gij = []
for g in xrange(self.nbf):
gmat = zeros((self.nbf,self.nbf),'d')
self.Gij.append(gmat)
gbf = self.bfs[g]
for i in xrange(self.nbf):
ibf = self.bfs[i]
for j in xrange(i+1):
jbf = self.bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
D0 = mkdens(self.orbs,0,self.nclosed)
J0 = getJ(self.Ints,D0)
Vfa = (2.0*(self.nel-1.0)/self.nel)*J0
self.H0 = self.h + Vfa
self.b = zeros(self.nbf,'d')
return
def iterate(self,**opts):
self.iter = 0
self.etemp = opts.get("etemp",False)
logging.debug("iter Energy <b|b>")
logging.debug("---- ------ -----")
self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging)
return
def get_energy(self,b):
self.iter += 1
self.Hoep = get_Hoep(b,self.H0,self.Gij)
self.orbe,self.orbs = geigh(self.Hoep,self.S)
if self.etemp:
self.D,self.entropy = mkdens_fermi(self.nel,self.orbe,self.orbs,
self.etemp)
else:
self.D = mkdens(self.orbs,0,self.nclosed)
self.entropy=0
self.F = get_fock(self.D,self.Ints,self.h)
self.energy = trace2(self.h+self.F,self.D)+self.Enuke + self.entropy
if self.iter == 1 or self.iter % 10 == 0:
logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b)))
return self.energy
def get_gradient(self,b):
energy = self.get_energy(b)
Fmo = simx(self.F,self.orbs)
bp = zeros(self.nbf,'d')
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbs)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nclosed):
for a in xrange(self.nclosed,self.norb):
bp[g] = bp[g] + Fmo[i,a]*Gmo[i,a]/(self.orbe[i]-self.orbe[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return bp
class UEXXSolver:
"EXXSolver(solver)"
def __init__(self,solver):
# Solver is a pointer to a UHF calculation that has
# already converged
self.solver = solver
self.bfs = self.solver.bfs
self.nbf = len(self.bfs)
self.S = self.solver.S
self.h = self.solver.h
self.Ints = self.solver.Ints
self.molecule = self.solver.molecule
self.nel = self.molecule.get_nel()
self.nalpha, self.nbeta = self.molecule.get_alphabeta()
self.Enuke = self.molecule.get_enuke()
self.norb = self.nbf
self.orbsa = self.solver.orbsa
self.orbsb = self.solver.orbsb
self.orbea = self.solver.orbea
self.orbeb = self.solver.orbeb
self.Gij = []
for g in xrange(self.nbf):
gmat = zeros((self.nbf,self.nbf),'d')
self.Gij.append(gmat)
gbf = self.bfs[g]
for i in xrange(self.nbf):
ibf = self.bfs[i]
for j in xrange(i+1):
jbf = self.bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
D0 = mkdens(self.orbsa,0,self.nalpha)+mkdens(self.orbsb,0,self.nbeta)
J0 = getJ(self.Ints,D0)
Vfa = ((self.nel-1.)/self.nel)*J0
self.H0 = self.h + Vfa
self.b = zeros(2*self.nbf,'d')
return
def iterate(self,**opts):
self.etemp = opts.get("etemp",False)
self.iter = 0
logging.debug("iter Energy <b|b>")
logging.debug("---- ------ -----")
self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging)
return
def get_energy(self,b):
self.iter += 1
ba = b[:self.nbf]
bb = b[self.nbf:]
self.Hoepa = get_Hoep(ba,self.H0,self.Gij)
self.Hoepb = get_Hoep(bb,self.H0,self.Gij)
self.orbea,self.orbsa = geigh(self.Hoepa,self.S)
self.orbeb,self.orbsb = geigh(self.Hoepb,self.S)
if self.etemp:
self.Da,entropya = mkdens_fermi(2*self.nalpha,self.orbea,self.orbsa,
self.etemp)
self.Db,entropyb = mkdens_fermi(2*self.nbeta,self.orbeb,self.orbsb,
self.etemp)
self.entropy = 0.5*(entropya+entropyb)
else:
self.Da = mkdens(self.orbsa,0,self.nalpha)
self.Db = mkdens(self.orbsb,0,self.nbeta)
self.entropy=0
J = getJ(self.Ints,self.Da+self.Db)
Ka = getK(self.Ints,self.Da)
Kb = getK(self.Ints,self.Db)
self.Fa = self.h + J - Ka
self.Fb = self.h + J - Kb
self.energy = 0.5*(trace2(self.h+self.Fa,self.Da) +
trace2(self.h+self.Fb,self.Db))\
+ self.Enuke + self.entropy
if self.iter == 1 or self.iter % 10 == 0:
logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b)))
return self.energy
def get_gradient(self,b):
energy = self.get_energy(b)
Fmoa = simx(self.Fa,self.orbsa)
Fmob = simx(self.Fb,self.orbsb)
bp = zeros(2*self.nbf,'d')
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbsa)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nalpha):
for a in xrange(self.nalpha,self.norb):
bp[g] += Fmoa[i,a]*Gmo[i,a]/(self.orbea[i]-self.orbea[a])
for g in xrange(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbsb)
# Now sum the appropriate terms to get the b gradient
for i in xrange(self.nbeta):
for a in xrange(self.nbeta,self.norb):
bp[self.nbf+g] += Fmob[i,a]*Gmo[i,a]/(self.orbeb[i]-self.orbeb[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return bp
def exx(atoms,orbs,**opts):
return oep_hf(atoms,orbs,**opts)
def oep_hf(atoms,orbs,**opts):
"""oep_hf - Form the optimized effective potential for HF exchange.
See notes on options and other args in oep routine.
"""
return oep(atoms,orbs,get_exx_energy,get_exx_gradient,**opts)
def oep(atoms,orbs,energy_func,grad_func=None,**opts):
"""oep - Form the optimized effective potential for a given energy expression
oep(atoms,orbs,energy_func,grad_func=None,**opts)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
energy_func The function that returns the energy for the given method
grad_func The function that returns the force for the given method
Options
-------
verbose False Output terse information to stdout (default)
True Print out additional information
ETemp False Use ETemp value for finite temperature DFT (default)
float Use (float) for the electron temperature
bfs None The basis functions to use. List of CGBF's
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
verbose = opts.get('verbose',False)
ETemp = opts.get('ETemp',False)
opt_method = opts.get('opt_method','BFGS')
bfs = opts.get('bfs',None)
if not bfs:
basis = opts.get('basis',None)
bfs = getbasis(atoms,basis)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = opts.get('pbfs',None)
if not pbfs: pbfs = bfs
npbf = len(pbfs)
integrals = opts.get('integrals',None)
if integrals:
S,h,Ints = integrals
else:
S,h,Ints = getints(bfs,atoms)
nel = atoms.get_nel()
nocc,nopen = atoms.get_closedopen()
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
bp = zeros(nbf,'d')
bvec = opts.get('bvec',None)
if bvec:
assert len(bvec) == npbf
b = array(bvec)
else:
b = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in xrange(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in xrange(nbf):
ibf = bfs[i]
for j in xrange(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbs,0,nocc)
J0 = getJ(Ints,D0)
Vfa = (2*(nel-1.)/nel)*J0
H0 = h + Vfa
b = fminBFGS(energy_func,b,grad_func,
(nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij),
logger=logging)
energy,orbe,orbs = energy_func(b,nbf,nel,nocc,ETemp,Enuke,
S,h,Ints,H0,Gij,return_flag=1)
return energy,orbe,orbs
def get_exx_energy(b,nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij,**opts):
"""Computes the energy for the OEP/HF functional
Options:
return_flag 0 Just return the energy
1 Return energy, orbe, orbs
2 Return energy, orbe, orbs, F
"""
return_flag = opts.get('return_flag',0)
Hoep = get_Hoep(b,H0,Gij)
orbe,orbs = geigh(Hoep,S)
if ETemp:
efermi = get_efermi(nel,orbe,ETemp)
occs = get_fermi_occs(efermi,orbe,ETemp)
D = mkdens_occs(orbs,occs)
entropy = get_entropy(occs,ETemp)
else:
D = mkdens(orbs,0,nocc)
F = get_fock(D,Ints,h)
energy = trace2(h+F,D)+Enuke
if ETemp: energy += entropy
iref = nel/2
gap = 627.51*(orbe[iref]-orbe[iref-1])
logging.debug("EXX Energy, B, Gap: %10.5f %10.5f %10.5f"
% (energy,sqrt(dot(b,b)),gap))
#logging.debug("%s" % orbe)
if return_flag == 1:
return energy,orbe,orbs
elif return_flag == 2:
return energy,orbe,orbs,F
return energy
def get_exx_gradient(b,nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij,**opts):
"""Computes the gradient for the OEP/HF functional.
return_flag 0 Just return gradient
1 Return energy,gradient
2 Return energy,gradient,orbe,orbs
"""
# Dump the gradient every 10 steps so we can restart...
global gradcall
gradcall += 1
#if gradcall % 5 == 0: logging.debug("B vector:\n%s" % b)
# Form the new potential and the new orbitals
energy,orbe,orbs,F = get_exx_energy(b,nbf,nel,nocc,ETemp,Enuke,
S,h,Ints,H0,Gij,return_flag=2)
Fmo = matrixmultiply(transpose(orbs),matrixmultiply(F,orbs))
norb = nbf
bp = zeros(nbf,'d') # dE/db
for g in xrange(nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = matrixmultiply(transpose(orbs),matrixmultiply(Gij[g],orbs))
# Now sum the appropriate terms to get the b gradient
for i in xrange(nocc):
for a in xrange(nocc,norb):
bp[g] = bp[g] + Fmo[i,a]*Gmo[i,a]/(orbe[i]-orbe[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return_flag = opts.get('return_flag',0)
if return_flag == 1:
return energy,bp
elif return_flag == 2:
return energy,bp,orbe,orbs
return bp
def get_Hoep(b,H0,Gij):
Hoep = H0
# Add the contributions from the gaussian potential functions
# H[ij] += b[g]*<ibf|g|jbf>
for g in xrange(len(b)):
Hoep = Hoep + b[g]*Gij[g]
return Hoep
# Here's a much faster way to do this. Haven't figured out how to
# do it for more generic functions like OEP-GVB
def oep_hf_an(atoms,orbs,**opts):
"""oep_hf - Form the optimized effective potential for HF exchange.
Implementation of Wu and Yang's Approximate Newton Scheme
from J. Theor. Comp. Chem. 2, 627 (2003).
oep_hf(atoms,orbs,**opts)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
Options
-------
bfs None The basis functions to use for the wfn
pbfs None The basis functions to use for the pot
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
maxiter = opts.get('maxiter',100)
tol = opts.get('tol',1e-5)
bfs = opts.get('bfs',None)
if not bfs:
basis = opts.get('basis',None)
bfs = getbasis(atoms,basis)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = opts.get('pbfs',None)
if not pbfs: pbfs = bfs
npbf = len(pbfs)
integrals = opts.get('integrals',None)
if integrals:
S,h,Ints = integrals
else:
S,h,Ints = getints(bfs,atoms)
nel = atoms.get_nel()
nocc,nopen = atoms.get_closedopen()
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
bp = zeros(nbf,'d')
bvec = opts.get('bvec',None)
if bvec:
assert len(bvec) == npbf
b = array(bvec)
else:
b = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in xrange(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in xrange(nbf):
ibf = bfs[i]
for j in xrange(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbs,0,nocc)
J0 = getJ(Ints,D0)
Vfa = (2*(nel-1.)/nel)*J0
H0 = h + Vfa
b = zeros(nbf,'d')
eold = 0
for iter in xrange(maxiter):
Hoep = get_Hoep(b,H0,Gij)
orbe,orbs = geigh(Hoep,S)
D = mkdens(orbs,0,nocc)
Vhf = get2JmK(Ints,D)
energy = trace2(2*h+Vhf,D)+Enuke
if abs(energy-eold) < tol:
break
else:
eold = energy
logging.debug("OEP AN Opt: %d %f" % (iter,energy))
dV_ao = Vhf-Vfa
dV = matrixmultiply(transpose(orbs),matrixmultiply(dV_ao,orbs))
X = zeros((nbf,nbf),'d')
c = zeros(nbf,'d')
Gkt = zeros((nbf,nbf),'d')
for k in xrange(nbf):
# This didn't work; in fact, it made things worse:
Gk = matrixmultiply(transpose(orbs),matrixmultiply(Gij[k],orbs))
for i in xrange(nocc):
for a in xrange(nocc,norb):
c[k] += dV[i,a]*Gk[i,a]/(orbe[i]-orbe[a])
for l in xrange(nbf):
Gl = matrixmultiply(transpose(orbs),matrixmultiply(Gij[l],orbs))
for i in xrange(nocc):
for a in xrange(nocc,norb):
X[k,l] += Gk[i,a]*Gl[i,a]/(orbe[i]-orbe[a])
# This should actually be a pseudoinverse...
b = solve(X,c)
logger.info("Final OEP energy = %f" % energy)
return energy,orbe,orbs
def oep_uhf_an(atoms,orbsa,orbsb,**opts):
"""oep_hf - Form the optimized effective potential for HF exchange.
Implementation of Wu and Yang's Approximate Newton Scheme
from J. Theor. Comp. Chem. 2, 627 (2003).
oep_uhf(atoms,orbs,**opts)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
Options
-------
bfs None The basis functions to use for the wfn
pbfs None The basis functions to use for the pot
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
maxiter = opts.get('maxiter',100)
tol = opts.get('tol',1e-5)
ETemp = opts.get('ETemp',False)
bfs = opts.get('bfs',None)
if not bfs:
basis = opts.get('basis',None)
bfs = getbasis(atoms,basis)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = opts.get('pbfs',None)
if not pbfs: pbfs = bfs
npbf = len(pbfs)
integrals = opts.get('integrals',None)
if integrals:
S,h,Ints = integrals
else:
S,h,Ints = getints(bfs,atoms)
nel = atoms.get_nel()
nclosed,nopen = atoms.get_closedopen()
nalpha,nbeta = nclosed+nopen,nclosed
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
ba = zeros(npbf,'d')
bb = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in xrange(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in xrange(nbf):
ibf = bfs[i]
for j in xrange(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbsa,0,nalpha)+mkdens(orbsb,0,nbeta)
J0 = getJ(Ints,D0)
Vfa = ((nel-1.)/nel)*J0
H0 = h + Vfa
eold = 0
for iter in xrange(maxiter):
Hoepa = get_Hoep(ba,H0,Gij)
Hoepb = get_Hoep(ba,H0,Gij)
orbea,orbsa = geigh(Hoepa,S)
orbeb,orbsb = geigh(Hoepb,S)
if ETemp:
efermia = get_efermi(2*nalpha,orbea,ETemp)
occsa = get_fermi_occs(efermia,orbea,ETemp)
Da = mkdens_occs(orbsa,occsa)
efermib = get_efermi(2*nbeta,orbeb,ETemp)
occsb = get_fermi_occs(efermib,orbeb,ETemp)
Db = mkdens_occs(orbsb,occsb)
entropy = 0.5*(get_entropy(occsa,ETemp)+get_entropy(occsb,ETemp))
else:
Da = mkdens(orbsa,0,nalpha)
Db = mkdens(orbsb,0,nbeta)
J = getJ(Ints,Da) + getJ(Ints,Db)
Ka = getK(Ints,Da)
Kb = getK(Ints,Db)
energy = (trace2(2*h+J-Ka,Da)+trace2(2*h+J-Kb,Db))/2\
+Enuke
if ETemp: energy += entropy
if abs(energy-eold) < tol:
break
else:
eold = energy
logging.debug("OEP AN Opt: %d %f" % (iter,energy))
# Do alpha and beta separately
# Alphas
dV_ao = J-Ka-Vfa
dV = matrixmultiply(orbsa,matrixmultiply(dV_ao,transpose(orbsa)))
X = zeros((nbf,nbf),'d')
c = zeros(nbf,'d')
for k in xrange(nbf):
Gk = matrixmultiply(orbsa,matrixmultiply(Gij[k],
transpose(orbsa)))
for i in xrange(nalpha):
for a in xrange(nalpha,norb):
c[k] += dV[i,a]*Gk[i,a]/(orbea[i]-orbea[a])
for l in xrange(nbf):
Gl = matrixmultiply(orbsa,matrixmultiply(Gij[l],
transpose(orbsa)))
for i in xrange(nalpha):
for a in xrange(nalpha,norb):
X[k,l] += Gk[i,a]*Gl[i,a]/(orbea[i]-orbea[a])
# This should actually be a pseudoinverse...
ba = solve(X,c)
# Betas
dV_ao = J-Kb-Vfa
dV = matrixmultiply(orbsb,matrixmultiply(dV_ao,transpose(orbsb)))
X = zeros((nbf,nbf),'d')
c = zeros(nbf,'d')
for k in xrange(nbf):
Gk = matrixmultiply(orbsb,matrixmultiply(Gij[k],
transpose(orbsb)))
for i in xrange(nbeta):
for a in xrange(nbeta,norb):
c[k] += dV[i,a]*Gk[i,a]/(orbeb[i]-orbeb[a])
for l in xrange(nbf):
Gl = matrixmultiply(orbsb,matrixmultiply(Gij[l],
transpose(orbsb)))
for i in xrange(nbeta):
for a in xrange(nbeta,norb):
X[k,l] += Gk[i,a]*Gl[i,a]/(orbeb[i]-orbeb[a])
# This should actually be a pseudoinverse...
bb = solve(X,c)
logger.info("Final OEP energy = %f" % energy)
return energy,(orbea,orbeb),(orbsa,orbsb)
def test_old():
from PyQuante.Molecule import Molecule
from PyQuante.Ints import getbasis,getints
from PyQuante.hartree_fock import rhf
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
#mol = Molecule('HF',[('H',(0.,0.,0.)),('F',(0.,0.,0.898369))],
# units='Angstrom')
mol = Molecule('LiH',[(1,(0,0,1.5)),(3,(0,0,-1.5))],units = 'Bohr')
bfs = getbasis(mol)
S,h,Ints = getints(bfs,mol)
print "after integrals"
E_hf,orbe_hf,orbs_hf = rhf(mol,bfs=bfs,integrals=(S,h,Ints),DoAveraging=True)
print "RHF energy = ",E_hf
E_exx,orbe_exx,orbs_exx = exx(mol,orbs_hf,bfs=bfs,integrals=(S,h,Ints))
return
def test():
from PyQuante import Molecule, HFSolver, DFTSolver, UHFSolver
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
mol = Molecule("He",[(2,(0,0,0))])
solver = HFSolver(mol)
solver.iterate()
print "HF energy = ",solver.energy
dft_solver = DFTSolver(mol)
dft_solver.iterate()
print "DFT energy = ",dft_solver.energy
oep = EXXSolver(solver)
# Testing 0 temp
oep.iterate()
# Testing finite temp
oep.iterate(etemp=40000)
return
def utest():
from PyQuante import Molecule, HFSolver, DFTSolver, UHFSolver
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
mol = Molecule("He",[(2,(0,0,0))])
mol = Molecule("Li",[(3,(0,0,0))],multiplicity=2)
solver = UHFSolver(mol)
solver.iterate()
print "HF energy = ",solver.energy
dft_solver = DFTSolver(mol)
dft_solver.iterate()
print "DFT energy = ",dft_solver.energy
oep = UEXXSolver(solver)
# Testing 0 temp
oep.iterate()
# Testing finite temp
oep.iterate(etemp=10000)
return
if __name__ == '__main__':
test()
utest()
|
gabrielelanaro/pyquante
|
PyQuante/OEP.py
|
Python
|
bsd-3-clause
| 25,758
|
[
"Gaussian"
] |
20bac74ef41451d292aea47ac1f5c936f5812ac07e6c7713d28d86e0aa55b43e
|
"""
Purpose: To snag which residues need to be converted to DNA from RNA
Syntax: python R2Dparser.py ~/Location/outputFileName
*Replace Location with location of the output file from charmm that lists the residues needing to be converted
Output: a file named patch.out with the patch expressions for in xrange(1,10):
pass atoms from RNA to DNA
Note: The outputFileName from charmm must be
Written by: Connor Fourt
Last Updated: May 23 2014
Written: May 23 2014
"""
from __future__ import print_function
from sys import argv
import re
#taking in arguments and assigning them to i
i = argv
#making filenames
inputFileName = i[1]
outputFileName = "patch.out"
#priming files
f = open(inputFileName, 'r')
g = open(outputFileName, 'w')
f.seek(0) #Reset to beginning of file
#setting variables
patchesFound = 0
previousPatchFound = 0
lineNumber =0
segid = raw_input("What is the segid? (should be defined at the top of your charmm input file)> " )
segid = segid + ' '
if len(segid) == 0:
segid = "segid "
#programatic sugar
print ('\nReading:', inputFileName)
print ('Writing to: ', outputFileName + '\n')
#console output
print('COPY BELOW THIS LINE'+ '\n')
print('-'*50 + '\n')
#finding patches and writing them to a file
for line in f:
lineNumber = lineNumber + 1
match = re.search(r'\d+\s+(\d+)\s+(\w\w\w)', line)
numberMatch = re.search(r'9999',line)
if match and numberMatch and match.group(1) and not previousPatchFound == match.group(1) :
#updating the patch found variable so that no duplicates are found for a single residue
previousPatchFound = match.group(1)
atomNumber = match.group(1)
residue = match.group(2)
#if the residue has a cystine or thymine base, use pyrimadine patch
if match.group(2) == "CYT" or match.group(2) == "THY":
lineToWrite = ("patch deo1 " + str(segid) + str(match.group(1)) + '\n')
g.write(lineToWrite)
print (lineToWrite.rstrip('\n'))
patchesFound = patchesFound + 1
#if the residue has a guanine or adenine residue, use purine patch
elif match.group(2) == "GUA" or match.group(2) == "ADE":
lineToWrite = ("patch deo2 " + str(segid) + str(match.group(1))+ '\n')
g.write(lineToWrite)
print (lineToWrite.rstrip('\n'))
patchesFound = patchesFound + 1
print ()
#Prepending header and directions to patch.out
g.close()
g = open(outputFileName, 'r')
temp = g.read()
g.close()
g = open(outputFileName, 'w')
g.write(str(patchesFound) + ' patches found from ' + inputFileName + '\n'*2)
g.write('! deo1 is for pyrimidines' + '\n')
g.write('! deo2 is for purines' + '\n'*2)
g.write('COPY BELOW THIS LINE'+ '\n')
g.write('-'*50 + '\n'*2)
g.write(temp)
print ('Finished finding ' + str(patchesFound) + ' patches.')
print ('This can also be found in the file "patch.out" \n')
|
cfourt/R2DParser
|
R2Dparser.py
|
Python
|
mit
| 2,816
|
[
"CHARMM"
] |
d48ad072984ff4ccb6f4f13cc058cce65bfb9395d786ef85f2118eb43e5b5c33
|
#!/usr/bin/env python
#
'''
Perform analysis on whole 2D data sets.
'''
import time
import matplotlib
matplotlib.use('agg')
import grid_cell_model.visitors as vis
import grid_cell_model.visitors.spikes
import grid_cell_model.visitors.bumps
import grid_cell_model.visitors.signals
import grid_cell_model.visitors.plotting
import grid_cell_model.visitors.plotting.spikes
import grid_cell_model.visitors.plotting.grids
import grid_cell_model.visitors.plotting.grids_ipc
from grid_cell_model.parameters import JobTrialSpace2D
from grid_cell_model.submitting import flagparse
import common.analysis as common
###############################################################################
parser = flagparse.FlagParser()
parser.add_argument('--row', type=int, required=True)
parser.add_argument('--col', type=int, required=True)
parser.add_argument('--shapeRows', type=int, required=True)
parser.add_argument('--shapeCols', type=int, required=True)
parser.add_argument('--forceUpdate', type=int, required=True)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument("--job_num", type=int) # unused
parser.add_argument("--type", type=str, choices=common.allowedTypes, required=True, nargs="+")
parser.add_argument("--bumpSpeedMax", type=float)
o = parser.parse_args()
###############################################################################
startT = time.time()
shape = (o.shapeRows, o.shapeCols)
dataPoints = [(o.row, o.col)]
trialNums = None
sp = JobTrialSpace2D(shape, o.output_dir, dataPoints=dataPoints)
forceUpdate = bool(o.forceUpdate)
# Common parameters
isBump_win_dt = 125.
isBump_tstart = 0.
isBump_tend = None
isBump_readme = 'Bump position estimation. Whole simulation'
# Create visitors
if common.bumpType in o.type:
bumpVisitor = vis.bumps.BumpFittingVisitor(
forceUpdate=forceUpdate,
tstart='full',
readme='Bump fitting. Whole simulation, starting at the start of theta stimulation.',
bumpERoot='bump_e_full',
bumpIRoot='bump_i_full')
FRVisitor = vis.spikes.FiringRateVisitor(winLen=2., # ms
winDt=.5, # ms
forceUpdate=forceUpdate)
FRPlotter = vis.plotting.spikes.FiringRatePlotter(rootDir='pop_fr_plots')
isBumpVisitor = vis.bumps.BumpPositionVisitor(
tstart=isBump_tstart,
tend=isBump_tend,
win_dt=isBump_win_dt,
readme=isBump_readme,
forceUpdate=forceUpdate)
sp.visit(bumpVisitor)
sp.visit(isBumpVisitor)
sp.visit(FRVisitor)
#sp.visit(FRPlotter)
if common.gammaType in o.type:
monName = 'stateMonF_e'
stateList = ['I_clamp_GABA_A']
statsVisitor_e = vis.spikes.SpikeStatsVisitor("spikeMon_e",
forceUpdate=forceUpdate)
ACVisitor = vis.signals.AutoCorrelationVisitor(monName, stateList,
forceUpdate=forceUpdate)
sp.visit(ACVisitor)
sp.visit(statsVisitor_e)
if common.velocityType in o.type:
speedEstimator = vis.bumps.SpeedEstimator(
forceUpdate=forceUpdate,
axis='vertical',
win_dt=50.0)
gainEstimator = vis.bumps.VelocityGainEstimator(
o.bumpSpeedMax,
forceUpdate=forceUpdate,
maxFitRangeIdx=10)
speedPlotter = vis.bumps.SpeedPlotter(plotFittedLine=True)
sp.visit(speedEstimator, trialList='all-at-once')
sp.visit(gainEstimator, trialList='all-at-once')
sp.visit(speedPlotter, trialList='all-at-once')
if common.gridsType in o.type:
po = vis.plotting.grids.GridPlotVisitor.PlotOptions()
gridVisitor = vis.plotting.grids.GridPlotVisitor(o.output_dir,
spikeType='E',
plotOptions=po,
minGridnessT=300e3,
forceUpdate=o.forceUpdate)
gridVisitor_i = vis.plotting.grids.IGridPlotVisitor(o.output_dir,
plotOptions=po,
minGridnessT=300e3,
forceUpdate=o.forceUpdate)
isBumpVisitor = vis.bumps.BumpPositionVisitor(tstart=isBump_tstart,
tend=isBump_tend,
win_dt=isBump_win_dt,
readme=isBump_readme,
forceUpdate=forceUpdate,
bumpERoot='bump_e_isBump')
#ISIVisitor = plotting_visitors.ISIPlotVisitor(o.output_dir,
# spikeType = spikeType,
# nRows = 5, nCols = 5, range=[0, 1000], bins=40,
# ISINWindows=20)
FRVisitor = vis.spikes.FiringRateVisitor(winLen=2., # ms
winDt=.5, # ms
forceUpdate=forceUpdate,
sliding_analysis=False)
sp.visit(gridVisitor)
sp.visit(gridVisitor_i)
#sp.visit(isBumpVisitor)
#sp.visit(ISIVisitor)
sp.visit(FRVisitor)
if common.gridsIPCType in o.type:
# This is solely for the purpose of analyzing simulations where a
# population of place cells is connected to I cells.
po = vis.plotting.grids_ipc.GridPlotVisitor.PlotOptions()
ipc_gridVisitor = vis.plotting.grids_ipc.GridPlotVisitor(
o.output_dir,
spikeType='E',
plotOptions=po,
minGridnessT=300e3,
forceUpdate=o.forceUpdate)
ipc_gridVisitor_i = vis.plotting.grids_ipc.IGridPlotVisitor(
o.output_dir,
plotOptions=po,
minGridnessT=300e3,
forceUpdate=o.forceUpdate)
ipc_FRVisitor = vis.spikes.FiringRateVisitor(
winLen=2., # ms
winDt=.5, # ms
forceUpdate=forceUpdate,
sliding_analysis=False)
sp.visit(ipc_gridVisitor)
sp.visit(ipc_gridVisitor_i)
sp.visit(ipc_FRVisitor)
if common.posType in o.type:
bumpPosVisitor = vis.bumps.BumpPositionVisitor(
tstart=0,
tend=None,
win_dt=125.0,
readme='Bump position estimation. Whole simulation.',
forceUpdate=forceUpdate)
sp.visit(bumpPosVisitor)
print('Total time: %.3f s' % (time.time() - startT))
|
MattNolanLab/ei-attractor
|
grid_cell_model/simulations/007_noise/analysis_EI.py
|
Python
|
gpl-3.0
| 6,633
|
[
"VisIt"
] |
6ca2cb0bd9bdddd6111074929cc0b64d4f27003d84bf9278567a031cbaf47158
|
"""0MQ Error classes and functions."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class ZMQBaseError(Exception):
"""Base exception class for 0MQ errors in Python."""
pass
class ZMQError(ZMQBaseError):
"""Wrap an errno style error.
Parameters
----------
errno : int
The ZMQ errno or None. If None, then ``zmq_errno()`` is called and
used.
msg : string
Description of the error or None.
"""
errno = None
def __init__(self, errno=None, msg=None):
"""Wrap an errno style error.
Parameters
----------
errno : int
The ZMQ errno or None. If None, then ``zmq_errno()`` is called and
used.
msg : string
Description of the error or None.
"""
from zmq.backend import strerror, zmq_errno
if errno is None:
errno = zmq_errno()
if isinstance(errno, int):
self.errno = errno
if msg is None:
self.strerror = strerror(errno)
else:
self.strerror = msg
else:
if msg is None:
self.strerror = str(errno)
else:
self.strerror = msg
# flush signals, because there could be a SIGINT
# waiting to pounce, resulting in uncaught exceptions.
# Doing this here means getting SIGINT during a blocking
# libzmq call will raise a *catchable* KeyboardInterrupt
# PyErr_CheckSignals()
def __str__(self):
return self.strerror
def __repr__(self):
return "ZMQError('%s')"%self.strerror
class ZMQBindError(ZMQBaseError):
"""An error for ``Socket.bind_to_random_port()``.
See Also
--------
.Socket.bind_to_random_port
"""
pass
class NotDone(ZMQBaseError):
"""Raised when timeout is reached while waiting for 0MQ to finish with a Message
See Also
--------
.MessageTracker.wait : object for tracking when ZeroMQ is done
"""
pass
class ContextTerminated(ZMQError):
"""Wrapper for zmq.ETERM
.. versionadded:: 13.0
"""
pass
class Again(ZMQError):
"""Wrapper for zmq.EAGAIN
.. versionadded:: 13.0
"""
pass
def _check_rc(rc, errno=None):
"""internal utility for checking zmq return condition
and raising the appropriate Exception class
"""
if rc < 0:
from zmq.backend import zmq_errno
if errno is None:
errno = zmq_errno()
from zmq import EAGAIN, ETERM
if errno == EAGAIN:
raise Again(errno)
elif errno == ETERM:
raise ContextTerminated(errno)
else:
raise ZMQError(errno)
_zmq_version_info = None
_zmq_version = None
class ZMQVersionError(NotImplementedError):
"""Raised when a feature is not provided by the linked version of libzmq.
.. versionadded:: 14.2
"""
min_version = None
def __init__(self, min_version, msg='Feature'):
global _zmq_version
if _zmq_version is None:
from zmq import zmq_version
_zmq_version = zmq_version()
self.msg = msg
self.min_version = min_version
self.version = _zmq_version
def __repr__(self):
return "ZMQVersionError('%s')" % str(self)
def __str__(self):
return "%s requires libzmq >= %s, have %s" % (self.msg, self.min_version, self.version)
def _check_version(min_version_info, msg='Feature'):
"""Check for libzmq
raises ZMQVersionError if current zmq version is not at least min_version
min_version_info is a tuple of integers, and will be compared against zmq.zmq_version_info().
"""
global _zmq_version_info
if _zmq_version_info is None:
from zmq import zmq_version_info
_zmq_version_info = zmq_version_info()
if _zmq_version_info < min_version_info:
min_version = '.'.join(str(v) for v in min_version_info)
raise ZMQVersionError(min_version, msg)
__all__ = [
'ZMQBaseError',
'ZMQBindError',
'ZMQError',
'NotDone',
'ContextTerminated',
'Again',
'ZMQVersionError',
]
|
ellisonbg/pyzmq
|
zmq/error.py
|
Python
|
lgpl-3.0
| 4,885
|
[
"Brian"
] |
0619ba7be8c53172d88a870168209db8c98ae9235f6e585ab7ec5705d01bce68
|
from ase import Atom, Atoms
from gpaw import GPAW
L = 6
name='N2'
a = Atoms([Atom('N', (L/2.+1.098/2.,L/2.,L/2.)),
Atom('N', (L/2.-1.098/2.,L/2.,L/2.))], cell=(L, L, L), pbc=False)
calc=GPAW(h=0.22, xc='PBE',convergence={'eigenstates':1.0e-7}, stencils=(3,3), txt = name+'.txt',
eigensolver='rmm-diis')
a.set_calculator(calc)
e_n2 = a.get_potential_energy()
n2t = calc.get_xc_difference('TPSS')
n2rt = calc.get_xc_difference('revTPSS')
#print >> open('NSCF.txt','a'), name+ "TPSS Energy", e_n2 + n2t, name+ "revTPSS Energy", e_n2 + n2rt
a.calc.set(xc='TPSS')
e_n2t = a.get_potential_energy()
a.calc.set(xc='revTPSS')
e_n2rt = a.get_potential_energy()
name='N'
b = Atoms([Atom('N', (L/2.,L/2.,L/2.),magmom=3)], cell=(L, L, L), pbc=False)
calc=GPAW(h=0.22, xc='PBE',convergence={'eigenstates':1.0e-7}, stencils=(3,3), txt = name+'.txt',
eigensolver='rmm-diis',
fixmom=True, hund=True)
b.set_calculator(calc)
e_n = b.get_potential_energy()
nt = calc.get_xc_difference('TPSS')
nrt = calc.get_xc_difference('revTPSS')
#print >> open('NSCF.txt','a'), name+ "TPSS Energy", e_n + nt, name+ "revTPSS Energy", e_n + nrt
b.calc.set(xc='TPSS')
e_nt = b.get_potential_energy()
b.calc.set(xc='revTPSS')
e_nrt = b.get_potential_energy()
print 'Atm. Experiment ', -228.5
print 'Atm. PBE ', (e_n2-2*e_n)*23.06
print 'Atm. TPSS(nsc) ', ((e_n2+n2t)-2*(e_n+nt))*23.06
print 'Atm. TPSS ', (e_n2t-2*e_nt)*23.06
print 'Atm. revTPSS(nsc)', ((e_n2+n2rt)-2*(e_n+nrt))*23.06
print 'Atm. revTPSS ', (e_n2rt-2*e_nrt)*23.06
|
robwarm/gpaw-symm
|
gpaw/test/big/miscellaneous/revtpss_tpss_scf.py
|
Python
|
gpl-3.0
| 1,567
|
[
"ASE",
"GPAW"
] |
543caf77697aa7c6d47201c4f359dc89421c1c2cd16305cb7d6a4c35880a018f
|
"""
10 nov. 2014
iterative mapping copied from hiclib
"""
import os
import tempfile
import gzip
import pysam
import gem
from warnings import warn
N_WINDOWS = 0
def get_intersection(fname1, fname2, out_path, verbose=False):
"""
Merges the two files corresponding to each reads sides. Reads found in both
files are merged and written in an output file.
:param fname1: path to a tab separated file generated by the function
:func:`pytadbit.parsers.sam_parser.parse_sam`
:param fname2: path to a tab separated file generated by the function
:func:`pytadbit.parsers.sam_parser.parse_sam`
:param out_path: path to an outfile. It will written in a similar format as
the inputs
"""
reads_fh = open(out_path, 'w')
reads1 = open(fname1)
line1 = reads1.next()
header1 = ''
while line1.startswith('#'):
header1 += line1
line1 = reads1.next()
read1 = line1.split('\t', 1)[0]
reads2 = open(fname2)
line2 = reads2.next()
header2 = ''
while line2.startswith('#'):
header2 += line2
line2 = reads2.next()
read2 = line2.split('\t', 1)[0]
if header1 != header2:
raise Exception('seems to be mapped onover different chromosomes\n')
# writes header in output
reads_fh.write(header1)
# writes common reads
count = 0
try:
while True:
if read1 == read2:
count += 1
reads_fh.write(line1.strip() + '\t' +
line2.split('\t', 1)[1])
line1 = reads1.next()
read1 = line1.split('\t', 1)[0]
line2 = reads2.next()
read2 = line2.split('\t', 1)[0]
elif line1 > line2:
line2 = reads2.next()
read2 = line2.split('\t', 1)[0]
else:
line1 = reads1.next()
read1 = line1.split('\t', 1)[0]
except StopIteration:
pass
reads_fh.close()
if verbose:
print 'Found %d pair of reads mapping uniquely' % count
def trimming(raw_seq_len, seq_start, min_seq_len):
return seq_start, raw_seq_len - seq_start - min_seq_len
def iterative_mapping(gem_index_path, fastq_path, out_sam_path,
range_start, range_stop, **kwargs):
"""
:param gem_index_path: path to index file created from a reference genome
using gem-index tool
:param fastq_path: 152 bases first 76 from one end, next 76 from the other
end. Both to be read from left to right.
:param out_sam_path: path to a directory where to store mapped reads in SAM/
BAM format (see option output_is_bam).
:param range_start: list of integers representing the start position of each
read fragment to be mapped (starting at 1 includes the first nucleotide
of the read).
:param range_stop: list of integers representing the end position of each
read fragment to be mapped.
:param True single_end: when FASTQ contains paired-ends flags
:param 4 nthreads: number of threads to use for mapping (number of CPUs)
:param 0.04 max_edit_distance: The maximum number of edit operations allowed
while verifying candidate matches by dynamic programming.
:param 0.04 mismatches: The maximum number of nucleotide substitutions
allowed while mapping each k-mer. It is always guaranteed that, however
other options are chosen, all the matches up to the specified number of
substitutions will be found by the program.
:param -1 max_reads_per_chunk: maximum number of reads to process at a time.
If -1, all reads will be processed in one run (more RAM memory needed).
:param False output_is_bam: Use binary (compressed) form of generated
out-files with mapped reads (recommended to save disk space).
:param /tmp temp_dir: important to change. Intermediate FASTQ files will be
written there.
:returns: a list of paths to generated outfiles. To be passed to
:func:`pytadbit.parsers.sam_parser.parse_sam`
"""
gem_index_path = os.path.abspath(os.path.expanduser(gem_index_path))
fastq_path = os.path.abspath(os.path.expanduser(fastq_path))
out_sam_path = os.path.abspath(os.path.expanduser(out_sam_path))
single_end = kwargs.get('single_end' , True)
max_edit_distance = kwargs.get('max_edit_distance' , 0.04)
mismatches = kwargs.get('mismatches' , 0.04)
nthreads = kwargs.get('nthreads' , 4)
max_reads_per_chunk = kwargs.get('max_reads_per_chunk' , -1)
out_files = kwargs.get('out_files' , [])
output_is_bam = kwargs.get('output_is_bam' , False)
temp_dir = os.path.abspath(os.path.expanduser(
kwargs.get('temp_dir', tempfile.gettempdir())))
# check kwargs
for kw in kwargs:
if not kw in ['single_end', 'nthreads', 'max_edit_distance',
'mismatches', 'max_reads_per_chunk',
'out_files', 'output_is_bam', 'temp_dir']:
warn('WARNING: %s not is usual keywords, misspelled?' % kw)
# check windows:
if (len(zip(range_start, range_stop)) < len(range_start) or
len(range_start) != len(range_stop)):
raise Exception('ERROR: range_start and range_stop should have the ' +
'same sizes and windows should be uniques.')
if any([i >= j for i, j in zip(range_start, range_stop)]):
raise Exception('ERROR: start positions should always be lower than ' +
'stop positions.')
if any([i <= 0 for i in range_start]):
raise Exception('ERROR: start positions should be strictly positive.')
# create directories
for rep in [temp_dir, os.path.split(out_sam_path)[0]]:
try:
os.mkdir(rep)
except OSError, error:
if error.strerror != 'File exists':
raise error
#get the length of a read
if fastq_path.endswith('.gz'):
fastqh = gzip.open(fastq_path)
else:
fastqh = open(fastq_path)
# get the length from the length of the second line, which is the sequence
# can not use the "length" keyword, as it is not always present
try:
_ = fastqh.next()
raw_seq_len = len(fastqh.next().strip())
fastqh.close()
except StopIteration:
raise IOError('ERROR: problem reading %s\n' % fastq_path)
if not N_WINDOWS:
N_WINDOWS = len(range_start)
# Split input files if required and apply iterative mapping to each
# segment separately.
if max_reads_per_chunk > 0:
kwargs['max_reads_per_chunk'] = -1
print 'Split input file %s into chunks' % fastq_path
chunked_files = _chunk_file(
fastq_path,
os.path.join(temp_dir, os.path.split(fastq_path)[1]),
max_reads_per_chunk * 4)
print '%d chunks obtained' % len(chunked_files)
for i, fastq_chunk_path in enumerate(chunked_files):
global N_WINDOWS
N_WINDOWS = 0
print 'Run iterative_mapping recursively on %s' % fastq_chunk_path
out_files.extend(iterative_mapping(
gem_index_path, fastq_chunk_path,
out_sam_path + '.%d' % (i + 1), range_start[:], range_stop[:],
**kwargs))
for i, fastq_chunk_path in enumerate(chunked_files):
# Delete chunks only if the file was really chunked.
if len(chunked_files) > 1:
print 'Remove the chunks: %s' % ' '.join(chunked_files)
os.remove(fastq_chunk_path)
return out_files
# end position according to sequence in the file
# removes 1 in order to start at 1 instead of 0
try:
seq_end = range_stop.pop(0)
seq_beg = range_start.pop(0)
except IndexError:
return out_files
# define what we trim
seq_len = seq_end - seq_beg
trim_5, trim_3 = trimming(raw_seq_len, seq_beg - 1, seq_len - 1)
# output
local_out_sam = out_sam_path + '.%d:%d-%d' % (
N_WINDOWS - len(range_stop), seq_beg, seq_end)
out_files.append(local_out_sam)
# input
inputf = gem.files.open(fastq_path)
# trimming
trimmed = gem.filter.run_filter(
inputf, ['--hard-trim', '%d,%d' % (trim_5, trim_3)],
threads=nthreads, paired=not single_end)
# mapping
mapped = gem.mapper(trimmed, gem_index_path, min_decoded_strata=0,
max_decoded_matches=2, unique_mapping=False,
max_edit_distance=max_edit_distance,
mismatches=mismatches,
output=temp_dir + '/test.map',
threads=nthreads)
# convert to sam/bam
if output_is_bam:
sam = gem.gem2sam(mapped, index=gem_index_path, threads=nthreads,
single_end=single_end)
_ = gem.sam2bam(sam, output=local_out_sam, threads=nthreads)
else:
sam = gem.gem2sam(mapped, index=gem_index_path, output=local_out_sam,
threads=nthreads, single_end=single_end)
# Recursively go to the next iteration.
unmapped_fastq_path = os.path.split(fastq_path)[1]
if unmapped_fastq_path[-1].isdigit():
unmapped_fastq_path = unmapped_fastq_path.rsplit('.', 1)[0]
unmapped_fastq_path = os.path.join(
temp_dir, unmapped_fastq_path + '.%d:%d-%d' % (
N_WINDOWS - len(range_stop), seq_beg, seq_end))
_filter_unmapped_fastq(fastq_path, local_out_sam, unmapped_fastq_path)
out_files.extend(iterative_mapping(gem_index_path, unmapped_fastq_path,
out_sam_path,
range_start, range_stop, **kwargs))
os.remove(unmapped_fastq_path)
return out_files
def _line_count(path):
'''Count the number of lines in a file. The function was posted by
Mikola Kharechko on Stackoverflow.
'''
f = _gzopen(path)
lines = 0
buf_size = 1024 * 1024
read_f = f.read # loop optimization
buf = read_f(buf_size)
while buf:
lines += buf.count('\n')
buf = read_f(buf_size)
return lines
def _chunk_file(in_path, out_basename, max_num_lines):
'''Slice lines from a large file.
The line numbering is as in Python slicing notation.
'''
num_lines = _line_count(in_path)
if num_lines <= max_num_lines:
return [in_path, ]
out_paths = []
for i, line in enumerate(_gzopen(in_path)):
if i % max_num_lines == 0:
out_path = out_basename + '.%d' % (i // max_num_lines + 1)
out_paths.append(out_path)
out_file = file(out_path, 'w')
out_file.write(line)
return out_paths
def _filter_fastq(ids, in_fastq, out_fastq):
'''Filter FASTQ sequences by their IDs.
Read entries from **in_fastq** and store in **out_fastq** only those
the whose ID are in **ids**.
'''
out_file = open(out_fastq, 'w')
in_file = _gzopen(in_fastq)
while True:
line = in_file.readline()
if not line:
break
if not line.startswith('@'):
raise Exception(
'{0} does not comply with the FASTQ standards.'.format(in_fastq))
fastq_entry = [line, in_file.readline(),
in_file.readline(), in_file.readline()]
read_id = line.split()[0][1:]
if read_id.endswith('/1') or read_id.endswith('/2'):
read_id = read_id[:-2]
if read_id in ids:
out_file.writelines(fastq_entry)
def _filter_unmapped_fastq(in_fastq, in_sam, nonunique_fastq):
'''Read raw sequences from **in_fastq** and alignments from
**in_sam** and save the non-uniquely aligned and unmapped sequences
to **unique_sam**.
'''
samfile = pysam.Samfile(in_sam)
nonunique_ids = set()
for read in samfile:
tags_dict = dict(read.tags)
read_id = read.qname
# If exists, the option 'XS' contains the score of the second
# best alignment. Therefore, its presence means non-unique alignment.
if 'XS' in tags_dict or read.is_unmapped or (
'NH' in tags_dict and int(tags_dict['NH']) > 1):
nonunique_ids.add(read_id)
# UNMAPPED reads should be included 5% chance to be mapped
# with larger fragments, so do not do this:
# if 'XS' in tags_dict or (
# 'NH' in tags_dict and int(tags_dict['NH']) > 1):
# nonunique_ids.add(read_id)
_filter_fastq(nonunique_ids, in_fastq, nonunique_fastq)
def _gzopen(path):
if path.endswith('.gz'):
return gzip.open(path)
else:
return open(path)
|
yuanbaowen521/tadbit
|
_pytadbit/mapping/mapper.py
|
Python
|
gpl-3.0
| 12,840
|
[
"pysam"
] |
7e62fc19d819e7a6fcd8a56faa149f4fd6ab00a5b2df7bc4eee077b77ec7ab36
|
"""
Courseware views functions
"""
import logging
import urllib
import json
import cgi
from datetime import datetime
from django.utils import translation
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.conf import settings
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.utils.timezone import UTC
from django.views.decorators.http import require_GET, require_POST, require_http_methods
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import redirect
from certificates import api as certs_api
from edxmako.shortcuts import render_to_response, render_to_string, marketing_link
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.db import transaction
from markupsafe import escape
from courseware import grades
from courseware.access import has_access, in_preview_mode, _adjust_start_date_for_beta_testers
from courseware.access_response import StartDateError
from courseware.courses import (
get_courses, get_course, get_course_by_id,
get_studio_url, get_course_with_access,
sort_by_announcement,
sort_by_start_date,
UserNotEnrolled)
from courseware.masquerade import setup_masquerade
from openedx.core.djangoapps.credit.api import (
get_credit_requirement_status,
is_user_eligible_for_credit,
is_credit_course
)
from courseware.models import StudentModuleHistory
from courseware.model_data import FieldDataCache, ScoresClient
from .module_render import toc_for_course, get_module_for_descriptor, get_module, get_module_by_usage_id
from .entrance_exams import (
course_has_entrance_exam,
get_entrance_exam_content,
get_entrance_exam_score,
user_must_complete_entrance_exam,
user_has_passed_entrance_exam
)
from courseware.user_state_client import DjangoXBlockUserStateClient
from course_modes.models import CourseMode
from open_ended_grading import open_ended_notifications
from open_ended_grading.views import StaffGradingTab, PeerGradingTab, OpenEndedGradingTab
from student.models import UserTestGroup, CourseEnrollment
from student.views import is_course_blocked
from util.cache import cache, cache_if_anonymous
from util.date_utils import strftime_localized
from xblock.fragment import Fragment
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
from xmodule.tabs import CourseTabList
from xmodule.x_module import STUDENT_VIEW
import shoppingcart
from shoppingcart.models import CourseRegistrationCode
from shoppingcart.utils import is_shopping_cart_enabled
from opaque_keys import InvalidKeyError
from util.milestones_helpers import get_prerequisite_courses_display
from microsite_configuration import microsite
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.keys import CourseKey, UsageKey
from instructor.enrollment import uses_shib
from util.db import commit_on_success_with_read_committed
import survey.utils
import survey.views
from util.views import ensure_valid_course_key
from eventtracking import tracker
import analytics
from courseware.url_helpers import get_redirect_url
log = logging.getLogger("edx.courseware")
template_imports = {'urllib': urllib}
CONTENT_DEPTH = 2
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated():
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
@ensure_csrf_cookie
@cache_if_anonymous()
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
courses_list = []
course_discovery_meanings = getattr(settings, 'COURSE_DISCOVERY_MEANINGS', {})
if not settings.FEATURES.get('ENABLE_COURSE_DISCOVERY'):
courses_list = get_courses(request.user, request.META.get('HTTP_HOST'))
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses_list = sort_by_start_date(courses_list)
else:
courses_list = sort_by_announcement(courses_list)
return render_to_response(
"courseware/courses.html",
{'courses': courses_list, 'course_discovery_meanings': course_discovery_meanings}
)
def render_accordion(user, request, course, chapter, section, field_data_cache):
"""
Draws navigation bar. Takes current position in accordion as
parameter.
If chapter and section are '' or None, renders a default accordion.
course, chapter, and section are the url_names.
Returns the html string
"""
# grab the table of contents
toc = toc_for_course(user, request, course, chapter, section, field_data_cache)
context = dict([
('toc', toc),
('course_id', course.id.to_deprecated_string()),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)
] + template_imports.items())
return render_to_string('courseware/accordion.html', context)
def get_current_child(xmodule, min_depth=None):
"""
Get the xmodule.position's display item of an xmodule that has a position and
children. If xmodule has no position or is out of bounds, return the first
child with children extending down to content_depth.
For example, if chapter_one has no position set, with two child sections,
section-A having no children and section-B having a discussion unit,
`get_current_child(chapter, min_depth=1)` will return section-B.
Returns None only if there are no children at all.
"""
def _get_default_child_module(child_modules):
"""Returns the first child of xmodule, subject to min_depth."""
if not child_modules:
default_child = None
elif not min_depth > 0:
default_child = child_modules[0]
else:
content_children = [child for child in child_modules if
child.has_children_at_depth(min_depth - 1) and child.get_display_items()]
default_child = content_children[0] if content_children else None
return default_child
if not hasattr(xmodule, 'position'):
return None
if xmodule.position is None:
return _get_default_child_module(xmodule.get_display_items())
else:
# position is 1-indexed.
pos = xmodule.position - 1
children = xmodule.get_display_items()
if 0 <= pos < len(children):
child = children[pos]
elif len(children) > 0:
# module has a set position, but the position is out of range.
# return default child.
child = _get_default_child_module(children)
else:
child = None
return child
def redirect_to_course_position(course_module, content_depth):
"""
Return a redirect to the user's current place in the course.
If this is the user's first time, redirects to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, redirects to COURSE/CHAPTER,
and the view will find the current section and display a message
about reusing the stored position.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': course_module.id.to_deprecated_string()}
chapter = get_current_child(course_module, min_depth=content_depth)
if chapter is None:
# oops. Something bad has happened.
raise Http404("No chapter found when loading current position in course")
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return redirect(reverse('courseware_chapter', kwargs=urlargs))
# Relying on default of returning first child
section = get_current_child(chapter, min_depth=content_depth - 1)
if section is None:
raise Http404("No section found when loading current position in course")
urlargs['section'] = section.url_name
return redirect(reverse('courseware_section', kwargs=urlargs))
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, c in enumerate(seq_module.get_display_items(), start=1):
if c.location.name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def save_positions_recursively_up(user, request, field_data_cache, xmodule, course=None):
"""
Recurses up the course tree starting from a leaf
Saving the position property based on the previous node as it goes
"""
current_module = xmodule
while current_module:
parent_location = modulestore().get_parent_location(current_module.location)
parent = None
if parent_location:
parent_descriptor = modulestore().get_item(parent_location)
parent = get_module_for_descriptor(
user,
request,
parent_descriptor,
field_data_cache,
current_module.location.course_key,
course=course
)
if parent and hasattr(parent, 'position'):
save_child_position(parent, current_module.location.name)
current_module = parent
def chat_settings(course, user):
"""
Returns a dict containing the settings required to connect to a
Jabber chat server and room.
"""
domain = getattr(settings, "JABBER_DOMAIN", None)
if domain is None:
log.warning('You must set JABBER_DOMAIN in the settings to '
'enable the chat widget')
return None
return {
'domain': domain,
# Jabber doesn't like slashes, so replace with dashes
'room': "{ID}_class".format(ID=course.id.replace('/', '-')),
'username': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
# TODO: clearly this needs to be something other than the username
# should also be something that's not necessarily tied to a
# particular course
'password': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
}
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@ensure_valid_course_key
@commit_on_success_with_read_committed
def index(request, course_id, chapter=None, section=None,
position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right chapter.
If neither chapter or section are specified, redirects to user's most recent
chapter, or the first chapter if this is the user's first visit.
Arguments:
- request : HTTP request
- course_id : course id (str: ORG/course/URL_NAME)
- chapter : chapter url_name (str)
- section : section url_name (str)
- position : position in module, eg of <sequential> module (str)
Returns:
- HTTPresponse
"""
course_key = CourseKey.from_string(course_id)
user = User.objects.prefetch_related("groups").get(id=request.user.id)
redeemed_registration_codes = CourseRegistrationCode.objects.filter(
course_id=course_key,
registrationcoderedemption__redeemed_by=request.user
)
# Redirect to dashboard if the course is blocked due to non-payment.
if is_course_blocked(request, redeemed_registration_codes, course_key):
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
log.warning(
u'User %s cannot access the course %s because payment has not yet been received',
user,
course_key.to_deprecated_string()
)
return redirect(reverse('dashboard'))
request.user = user # keep just one instance of User
with modulestore().bulk_operations(course_key):
return _index_bulk_op(request, course_key, chapter, section, position)
# pylint: disable=too-many-statements
def _index_bulk_op(request, course_key, chapter, section, position):
"""
Render the index page for the specified course.
"""
# Verify that position a string is in fact an int
if position is not None:
try:
int(position)
except ValueError:
raise Http404(u"Position {} is not an integer!".format(position))
course = get_course_with_access(request.user, 'load', course_key, depth=2)
staff_access = has_access(request.user, 'staff', course)
masquerade, user = setup_masquerade(request, course_key, staff_access, reset_masquerade_data=True)
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.to_deprecated_string())
return redirect(reverse('about_course', args=[course_key.to_deprecated_string()]))
# see if all pre-requisites (as per the milestones app feature) have been fulfilled
# Note that if the pre-requisite feature flag has been turned off (default) then this check will
# always pass
if not has_access(user, 'view_courseware_with_prerequisites', course):
# prerequisites have not been fulfilled therefore redirect to the Dashboard
log.info(
u'User %d tried to view course %s '
u'without fulfilling prerequisites',
user.id, unicode(course.id))
return redirect(reverse('dashboard'))
# Entrance Exam Check
# If the course has an entrance exam and the requested chapter is NOT the entrance exam, and
# the user hasn't yet met the criteria to bypass the entrance exam, redirect them to the exam.
if chapter and course_has_entrance_exam(course):
chapter_descriptor = course.get_child_by(lambda m: m.location.name == chapter)
if chapter_descriptor and not getattr(chapter_descriptor, 'is_entrance_exam', False) \
and user_must_complete_entrance_exam(request, user, course):
log.info(u'User %d tried to view course %s without passing entrance exam', user.id, unicode(course.id))
return redirect(reverse('courseware', args=[unicode(course.id)]))
# check to see if there is a required survey that must be taken before
# the user can access the course.
if survey.utils.must_answer_survey(course, user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_key, user, course, depth=2)
course_module = get_module_for_descriptor(
user, request, course, field_data_cache, course_key, course=course
)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course_key.to_deprecated_string()]))
studio_url = get_studio_url(course, 'course')
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(user, request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'studio_url': studio_url,
'masquerade': masquerade,
'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"),
}
now = datetime.now(UTC())
effective_start = _adjust_start_date_for_beta_testers(user, course, course_key)
if not in_preview_mode() and staff_access and now < effective_start:
# Disable student view button if user is staff and
# course is not yet visible to students.
context['disable_student_access'] = True
has_content = course.has_children_at_depth(CONTENT_DEPTH)
if not has_content:
# Show empty courseware for a course with no units
return render_to_response('courseware/courseware.html', context)
elif chapter is None:
# Check first to see if we should instead redirect the user to an Entrance Exam
if course_has_entrance_exam(course):
exam_chapter = get_entrance_exam_content(request, course)
if exam_chapter:
exam_section = None
if exam_chapter.get_children():
exam_section = exam_chapter.get_children()[0]
if exam_section:
return redirect('courseware_section',
course_id=unicode(course_key),
chapter=exam_chapter.url_name,
section=exam_section.url_name)
# passing CONTENT_DEPTH avoids returning 404 for a course with an
# empty first section and a second section with content
return redirect_to_course_position(course_module, CONTENT_DEPTH)
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, request.user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.location.name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.location.name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masquerade and masquerade.role == 'student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masquerading as student: no chapter %s', chapter)
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
raise Http404
if course_has_entrance_exam(course):
# Message should not appear outside the context of entrance exam subsection.
# if section is none then we don't need to show message on welcome back screen also.
if getattr(chapter_module, 'is_entrance_exam', False) and section is not None:
context['entrance_exam_current_score'] = get_entrance_exam_score(request, course)
context['entrance_exam_passed'] = user_has_passed_entrance_exam(request, course)
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.location.name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masquerade and masquerade.role == 'student': # don't 404 if staff is masquerading as student
log.debug('staff masquerading as student: no section %s', section)
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
raise Http404
## Allow chromeless operation
if section_descriptor.chrome:
chrome = [s.strip() for s in section_descriptor.chrome.lower().split(",")]
if 'accordion' not in chrome:
context['disable_accordion'] = True
if 'tabs' not in chrome:
context['disable_tabs'] = True
if section_descriptor.default_tab:
context['default_tab'] = section_descriptor.default_tab
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_item(section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
field_data_cache.add_descriptor_descendents(
section_descriptor, depth=None
)
section_module = get_module_for_descriptor(
user,
request,
section_descriptor,
field_data_cache,
course_key,
position,
course=course
)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter.
save_child_position(chapter_module, section)
section_render_context = {'activate_block_id': request.GET.get('activate_block_id')}
context['fragment'] = section_module.render(STUDENT_VIEW, section_render_context)
context['section_title'] = section_descriptor.display_name_with_default
else:
# section is none, so display a message
studio_url = get_studio_url(course, 'course')
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user.
# Clearing out the last-visited state and showing "first-time" view by redirecting
# to courseware.
course_module.position = None
course_module.save()
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
prev_section_url = reverse('courseware_section', kwargs={
'course_id': course_key.to_deprecated_string(),
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name
})
context['fragment'] = Fragment(content=render_to_string(
'courseware/welcome-back.html',
{
'course': course,
'studio_url': studio_url,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url
}
))
result = render_to_response('courseware/courseware.html', context)
except Exception as e:
# Doesn't bar Unicode characters from URL, but if Unicode characters do
# cause an error it is a graceful failure.
if isinstance(e, UnicodeEncodeError):
raise Http404("URL contains Unicode characters")
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception(
u"Error in index view: user=%s, effective_user=%s, course=%s, chapter=%s section=%s position=%s",
request.user, user, course, chapter, section, position
)
try:
result = render_to_response('courseware/courseware-error.html', {
'staff_access': staff_access,
'course': course
})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
@ensure_csrf_cookie
@ensure_valid_course_key
def jump_to_id(request, course_id, module_id):
"""
This entry point allows for a shorter version of a jump to where just the id of the element is
passed in. This assumes that id is unique within the course_id namespace
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
items = modulestore().get_items(course_key, qualifiers={'name': module_id})
if len(items) == 0:
raise Http404(
u"Could not find id: {0} in course_id: {1}. Referer: {2}".format(
module_id, course_id, request.META.get("HTTP_REFERER", "")
))
if len(items) > 1:
log.warning(
u"Multiple items found with id: %s in course_id: %s. Referer: %s. Using first: %s",
module_id,
course_id,
request.META.get("HTTP_REFERER", ""),
items[0].location.to_deprecated_string()
)
return jump_to(request, course_id, items[0].location.to_deprecated_string())
@ensure_csrf_cookie
def jump_to(_request, course_id, location):
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
try:
course_key = CourseKey.from_string(course_id)
usage_key = UsageKey.from_string(location).replace(course_key=course_key)
except InvalidKeyError:
raise Http404(u"Invalid course_key or usage_key")
try:
redirect_url = get_redirect_url(course_key, usage_key)
except ItemNotFoundError:
raise Http404(u"No data at this location: {0}".format(usage_key))
except NoPathToItem:
raise Http404(u"This location is not in any class: {0}".format(usage_key))
return redirect(redirect_url)
@ensure_csrf_cookie
@ensure_valid_course_key
def course_info(request, course_id):
"""
Display the course's info.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
course = get_course_by_id(course_key, depth=2)
access_response = has_access(request.user, 'load', course, course_key)
if not access_response:
# The user doesn't have access to the course. If they're
# denied permission due to the course not being live yet,
# redirect to the dashboard page.
if isinstance(access_response, StartDateError):
start_date = strftime_localized(course.start, 'SHORT_DATE')
params = urllib.urlencode({'notlive': start_date})
return redirect('{0}?{1}'.format(reverse('dashboard'), params))
# Otherwise, give a 404 to avoid leaking info about access
# control.
raise Http404("Course not found.")
staff_access = has_access(request.user, 'staff', course)
masquerade, user = setup_masquerade(request, course_key, staff_access, reset_masquerade_data=True)
# If the user needs to take an entrance exam to access this course, then we'll need
# to send them to that specific course module before allowing them into other areas
if user_must_complete_entrance_exam(request, user, course):
return redirect(reverse('courseware', args=[unicode(course.id)]))
# check to see if there is a required survey that must be taken before
# the user can access the course.
if request.user.is_authenticated() and survey.utils.must_answer_survey(course, user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
studio_url = get_studio_url(course, 'course_info')
# link to where the student should go to enroll in the course:
# about page if there is not marketing site, SITE_NAME if there is
url_to_enroll = reverse(course_about, args=[course_id])
if settings.FEATURES.get('ENABLE_MKTG_SITE'):
url_to_enroll = marketing_link('COURSES')
show_enroll_banner = request.user.is_authenticated() and not CourseEnrollment.is_enrolled(user, course.id)
context = {
'request': request,
'course_id': course_key.to_deprecated_string(),
'cache': None,
'course': course,
'staff_access': staff_access,
'masquerade': masquerade,
'studio_url': studio_url,
'show_enroll_banner': show_enroll_banner,
'url_to_enroll': url_to_enroll,
}
now = datetime.now(UTC())
effective_start = _adjust_start_date_for_beta_testers(user, course, course_key)
if not in_preview_mode() and staff_access and now < effective_start:
# Disable student view button if user is staff and
# course is not yet visible to students.
context['disable_student_access'] = True
return render_to_response('courseware/info.html', context)
@ensure_csrf_cookie
@ensure_valid_course_key
def static_tab(request, course_id, tab_slug):
"""
Display the courses tab with the given name.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
tab = CourseTabList.get_tab_by_slug(course.tabs, tab_slug)
if tab is None:
raise Http404
contents = get_static_tab_contents(
request,
course,
tab
)
if contents is None:
raise Http404
return render_to_response('courseware/static_tab.html', {
'course': course,
'tab': tab,
'tab_contents': contents,
})
@ensure_csrf_cookie
@ensure_valid_course_key
def syllabus(request, course_id):
"""
Display the course's syllabus.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
return render_to_response('courseware/syllabus.html', {
'course': course,
'staff_access': staff_access,
})
def registered_for_course(course, user):
"""
Return True if user is registered for course, else False
"""
if user is None:
return False
if user.is_authenticated():
return CourseEnrollment.is_enrolled(user, course.id)
else:
return False
def get_cosmetic_display_price(course, registration_price):
"""
Return Course Price as a string preceded by correct currency, or 'Free'
"""
currency_symbol = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
price = course.cosmetic_display_price
if registration_price > 0:
price = registration_price
if price:
# Translators: This will look like '$50', where {currency_symbol} is a symbol such as '$' and {price} is a
# numerical amount in that currency. Adjust this display as needed for your language.
return _("{currency_symbol}{price}").format(currency_symbol=currency_symbol, price=price)
else:
# Translators: This refers to the cost of the course. In this case, the course costs nothing so it is free.
return _('Free')
@ensure_csrf_cookie
@cache_if_anonymous()
def course_about(request, course_id):
"""
Display the course's about page.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
permission_name = microsite.get_value(
'COURSE_ABOUT_VISIBILITY_PERMISSION',
settings.COURSE_ABOUT_VISIBILITY_PERMISSION
)
course = get_course_with_access(request.user, permission_name, course_key)
if microsite.get_value('ENABLE_MKTG_SITE', settings.FEATURES.get('ENABLE_MKTG_SITE', False)):
return redirect(reverse('info', args=[course.id.to_deprecated_string()]))
registered = registered_for_course(course, request.user)
staff_access = bool(has_access(request.user, 'staff', course))
studio_url = get_studio_url(course, 'settings/details')
if has_access(request.user, 'load', course):
course_target = reverse('info', args=[course.id.to_deprecated_string()])
else:
course_target = reverse('about_course', args=[course.id.to_deprecated_string()])
show_courseware_link = bool(
(
has_access(request.user, 'load', course)
and has_access(request.user, 'view_courseware_with_prerequisites', course)
)
or settings.FEATURES.get('ENABLE_LMS_MIGRATION')
)
# Note: this is a flow for payment for course registration, not the Verified Certificate flow.
registration_price = 0
in_cart = False
reg_then_add_to_cart_link = ""
_is_shopping_cart_enabled = is_shopping_cart_enabled()
if _is_shopping_cart_enabled:
registration_price = CourseMode.min_course_price_for_currency(course_key,
settings.PAID_COURSE_REGISTRATION_CURRENCY[0])
if request.user.is_authenticated():
cart = shoppingcart.models.Order.get_cart_for_user(request.user)
in_cart = shoppingcart.models.PaidCourseRegistration.contained_in_order(cart, course_key) or \
shoppingcart.models.CourseRegCodeItem.contained_in_order(cart, course_key)
reg_then_add_to_cart_link = "{reg_url}?course_id={course_id}&enrollment_action=add_to_cart".format(
reg_url=reverse('register_user'), course_id=urllib.quote(str(course_id)))
course_price = get_cosmetic_display_price(course, registration_price)
can_add_course_to_cart = _is_shopping_cart_enabled and registration_price
# Used to provide context to message to student if enrollment not allowed
can_enroll = bool(has_access(request.user, 'enroll', course))
invitation_only = course.invitation_only
is_course_full = CourseEnrollment.objects.is_course_full(course)
# Register button should be disabled if one of the following is true:
# - Student is already registered for course
# - Course is already full
# - Student cannot enroll in course
active_reg_button = not(registered or is_course_full or not can_enroll)
is_shib_course = uses_shib(course)
# get prerequisite courses display names
pre_requisite_courses = get_prerequisite_courses_display(course)
return render_to_response('courseware/course_about.html', {
'course': course,
'staff_access': staff_access,
'studio_url': studio_url,
'registered': registered,
'course_target': course_target,
'is_cosmetic_price_enabled': settings.FEATURES.get('ENABLE_COSMETIC_DISPLAY_PRICE'),
'course_price': course_price,
'in_cart': in_cart,
'reg_then_add_to_cart_link': reg_then_add_to_cart_link,
'show_courseware_link': show_courseware_link,
'is_course_full': is_course_full,
'can_enroll': can_enroll,
'invitation_only': invitation_only,
'active_reg_button': active_reg_button,
'is_shib_course': is_shib_course,
# We do not want to display the internal courseware header, which is used when the course is found in the
# context. This value is therefor explicitly set to render the appropriate header.
'disable_courseware_header': True,
'can_add_course_to_cart': can_add_course_to_cart,
'cart_link': reverse('shoppingcart.views.show_cart'),
'pre_requisite_courses': pre_requisite_courses
})
@ensure_csrf_cookie
@cache_if_anonymous('org')
@ensure_valid_course_key
def mktg_course_about(request, course_id):
"""This is the button that gets put into an iframe on the Drupal site."""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
permission_name = microsite.get_value(
'COURSE_ABOUT_VISIBILITY_PERMISSION',
settings.COURSE_ABOUT_VISIBILITY_PERMISSION
)
course = get_course_with_access(request.user, permission_name, course_key)
except (ValueError, Http404):
# If a course does not exist yet, display a "Coming Soon" button
return render_to_response(
'courseware/mktg_coming_soon.html', {'course_id': course_key.to_deprecated_string()}
)
registered = registered_for_course(course, request.user)
if has_access(request.user, 'load', course):
course_target = reverse('info', args=[course.id.to_deprecated_string()])
else:
course_target = reverse('about_course', args=[course.id.to_deprecated_string()])
allow_registration = bool(has_access(request.user, 'enroll', course))
show_courseware_link = bool(has_access(request.user, 'load', course) or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
course_modes = CourseMode.modes_for_course_dict(course.id)
context = {
'course': course,
'registered': registered,
'allow_registration': allow_registration,
'course_target': course_target,
'show_courseware_link': show_courseware_link,
'course_modes': course_modes,
}
# The edx.org marketing site currently displays only in English.
# To avoid displaying a different language in the register / access button,
# we force the language to English.
# However, OpenEdX installations with a different marketing front-end
# may want to respect the language specified by the user or the site settings.
force_english = settings.FEATURES.get('IS_EDX_DOMAIN', False)
if force_english:
translation.activate('en-us')
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
# Drupal will pass organization names using a GET parameter, as follows:
# ?org=Harvard
# ?org=Harvard,MIT
# If no full names are provided, the marketing iframe won't show the
# email opt-in checkbox.
org = request.GET.get('org')
if org:
org_list = org.split(',')
# HTML-escape the provided organization names
org_list = [cgi.escape(org) for org in org_list]
if len(org_list) > 1:
if len(org_list) > 2:
# Translators: The join of three or more institution names (e.g., Harvard, MIT, and Dartmouth).
org_name_string = _("{first_institutions}, and {last_institution}").format(
first_institutions=u", ".join(org_list[:-1]),
last_institution=org_list[-1]
)
else:
# Translators: The join of two institution names (e.g., Harvard and MIT).
org_name_string = _("{first_institution} and {second_institution}").format(
first_institution=org_list[0],
second_institution=org_list[1]
)
else:
org_name_string = org_list[0]
context['checkbox_label'] = ungettext(
"I would like to receive email from {institution_series} and learn about its other programs.",
"I would like to receive email from {institution_series} and learn about their other programs.",
len(org_list)
).format(institution_series=org_name_string)
try:
return render_to_response('courseware/mktg_course_about.html', context)
finally:
# Just to be safe, reset the language if we forced it to be English.
if force_english:
translation.deactivate()
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@transaction.commit_manually
@ensure_valid_course_key
def progress(request, course_id, student_id=None):
"""
Wraps "_progress" with the manual_transaction context manager just in case
there are unanticipated errors.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
with grades.manual_transaction():
return _progress(request, course_key, student_id)
def _progress(request, course_key, student_id):
"""
Unwrapped version of "progress".
User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
course = get_course_with_access(request.user, 'load', course_key, depth=None, check_if_enrolled=True)
# check to see if there is a required survey that must be taken before
# the user can access the course.
if survey.utils.must_answer_survey(course, request.user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
staff_access = bool(has_access(request.user, 'staff', course))
if student_id is None or student_id == request.user.id:
# always allowed to see your own profile
student = request.user
else:
# Requesting access to a different student's profile
if not staff_access:
raise Http404
try:
student = User.objects.get(id=student_id)
# Check for ValueError if 'student_id' cannot be converted to integer.
except (ValueError, User.DoesNotExist):
raise Http404
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
field_data_cache = grades.field_data_cache_for_grading(course, student)
scores_client = ScoresClient.from_field_data_cache(field_data_cache)
courseware_summary = grades.progress_summary(
student, request, course, field_data_cache=field_data_cache, scores_client=scores_client
)
grade_summary = grades.grade(
student, request, course, field_data_cache=field_data_cache, scores_client=scores_client
)
studio_url = get_studio_url(course, 'settings/grading')
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
# checking certificate generation configuration
show_generate_cert_btn = certs_api.cert_generation_enabled(course_key)
context = {
'course': course,
'courseware_summary': courseware_summary,
'studio_url': studio_url,
'grade_summary': grade_summary,
'staff_access': staff_access,
'student': student,
'passed': is_course_passed(course, grade_summary),
'show_generate_cert_btn': show_generate_cert_btn,
'credit_course_requirements': _credit_course_requirements(course_key, student),
}
if show_generate_cert_btn:
context.update(certs_api.certificate_downloadable_status(student, course_key))
# showing the certificate web view button if feature flags are enabled.
if certs_api.has_html_certificates_enabled(course_key, course):
if certs_api.get_active_web_certificate(course) is not None:
context.update({
'show_cert_web_view': True,
'cert_web_view_url': u'{url}'.format(
url=certs_api.get_certificate_url(
user_id=student.id,
course_id=unicode(course.id)
)
)
})
else:
context.update({
'is_downloadable': False,
'is_generating': True,
'download_url': None
})
with grades.manual_transaction():
response = render_to_response('courseware/progress.html', context)
return response
def _credit_course_requirements(course_key, student):
"""Return information about which credit requirements a user has satisfied.
Arguments:
course_key (CourseKey): Identifier for the course.
student (User): Currently logged in user.
Returns: dict
"""
# If credit eligibility is not enabled or this is not a credit course,
# short-circuit and return `None`. This indicates that credit requirements
# should NOT be displayed on the progress page.
if not (settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY", False) and is_credit_course(course_key)):
return None
# Retrieve the status of the user for each eligibility requirement in the course.
# For each requirement, the user's status is either "satisfied", "failed", or None.
# In this context, `None` means that we don't know the user's status, either because
# the user hasn't done something (for example, submitting photos for verification)
# or we're waiting on more information (for example, a response from the photo
# verification service).
requirement_statuses = get_credit_requirement_status(course_key, student.username)
# If the user has been marked as "eligible", then they are *always* eligible
# unless someone manually intervenes. This could lead to some strange behavior
# if the requirements change post-launch. For example, if the user was marked as eligible
# for credit, then a new requirement was added, the user will see that they're eligible
# AND that one of the requirements is still pending.
# We're assuming here that (a) we can mitigate this by properly training course teams,
# and (b) it's a better user experience to allow students who were at one time
# marked as eligible to continue to be eligible.
# If we need to, we can always manually move students back to ineligible by
# deleting CreditEligibility records in the database.
if is_user_eligible_for_credit(student.username, course_key):
eligibility_status = "eligible"
# If the user has *failed* any requirements (for example, if a photo verification is denied),
# then the user is NOT eligible for credit.
elif any(requirement['status'] == 'failed' for requirement in requirement_statuses):
eligibility_status = "not_eligible"
# Otherwise, the user may be eligible for credit, but the user has not
# yet completed all the requirements.
else:
eligibility_status = "partial_eligible"
return {
'eligibility_status': eligibility_status,
'requirements': requirement_statuses,
}
@login_required
@ensure_valid_course_key
def submission_history(request, course_id, student_username, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
usage_key = course_key.make_usage_key_from_deprecated_string(location)
except (InvalidKeyError, AssertionError):
return HttpResponse(escape(_(u'Invalid location.')))
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (student_username != request.user.username) and (not staff_access):
raise PermissionDenied
user_state_client = DjangoXBlockUserStateClient()
try:
history_entries = list(user_state_client.get_history(student_username, usage_key))
except DjangoXBlockUserStateClient.DoesNotExist:
return HttpResponse(escape(_(u'User {username} has never accessed problem {location}').format(
username=student_username,
location=location
)))
# This is ugly, but until we have a proper submissions API that we can use to provide
# the scores instead, it will have to do.
scores = list(StudentModuleHistory.objects.filter(
student_module__module_state_key=usage_key,
student_module__student__username=student_username,
student_module__course_id=course_key
).order_by('-id'))
if len(scores) != len(history_entries):
log.warning(
"Mismatch when fetching scores for student "
"history for course %s, user %s, xblock %s. "
"%d scores were found, and %d history entries were found. "
"Matching scores to history entries by date for display.",
course_id,
student_username,
location,
len(scores),
len(history_entries),
)
scores_by_date = {
score.created: score
for score in scores
}
scores = [
scores_by_date[history.updated]
for history in history_entries
]
context = {
'history_entries': history_entries,
'scores': scores,
'username': student_username,
'location': location,
'course_id': course_key.to_deprecated_string()
}
return render_to_response('courseware/submission_history.html', context)
def notification_image_for_tab(course_tab, user, course):
"""
Returns the notification image path for the given course_tab if applicable, otherwise None.
"""
tab_notification_handlers = {
StaffGradingTab.type: open_ended_notifications.staff_grading_notifications,
PeerGradingTab.type: open_ended_notifications.peer_grading_notifications,
OpenEndedGradingTab.type: open_ended_notifications.combined_notifications
}
if course_tab.name in tab_notification_handlers:
notifications = tab_notification_handlers[course_tab.name](course, user)
if notifications and notifications['pending_grading']:
return notifications['img_path']
return None
def get_static_tab_contents(request, course, tab):
"""
Returns the contents for the given static tab
"""
loc = course.id.make_usage_key(
tab.type,
tab.url_slug,
)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, modulestore().get_item(loc), depth=0
)
tab_module = get_module(
request.user, request, loc, field_data_cache, static_asset_path=course.static_asset_path, course=course
)
logging.debug('course_module = %s', tab_module)
html = ''
if tab_module is not None:
try:
html = tab_module.render(STUDENT_VIEW).content
except Exception: # pylint: disable=broad-except
html = render_to_string('courseware/error-message.html', None)
log.exception(
u"Error rendering course=%s, tab=%s", course, tab['url_slug']
)
return html
@require_GET
@ensure_valid_course_key
def get_course_lti_endpoints(request, course_id):
"""
View that, given a course_id, returns the a JSON object that enumerates all of the LTI endpoints for that course.
The LTI 2.0 result service spec at
http://www.imsglobal.org/lti/ltiv2p0/uml/purl.imsglobal.org/vocab/lis/v2/outcomes/Result/service.html
says "This specification document does not prescribe a method for discovering the endpoint URLs." This view
function implements one way of discovering these endpoints, returning a JSON array when accessed.
Arguments:
request (django request object): the HTTP request object that triggered this view function
course_id (unicode): id associated with the course
Returns:
(django response object): HTTP response. 404 if course is not found, otherwise 200 with JSON body.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
course = get_course(course_key, depth=2)
except ValueError:
return HttpResponse(status=404)
anonymous_user = AnonymousUser()
anonymous_user.known = False # make these "noauth" requests like module_render.handle_xblock_callback_noauth
lti_descriptors = modulestore().get_items(course.id, qualifiers={'category': 'lti'})
lti_noauth_modules = [
get_module_for_descriptor(
anonymous_user,
request,
descriptor,
FieldDataCache.cache_for_descriptor_descendents(
course_key,
anonymous_user,
descriptor
),
course_key,
course=course
)
for descriptor in lti_descriptors
]
endpoints = [
{
'display_name': module.display_name,
'lti_2_0_result_service_json_endpoint': module.get_outcome_service_url(
service_name='lti_2_0_result_rest_handler') + "/user/{anon_user_id}",
'lti_1_1_result_service_xml_endpoint': module.get_outcome_service_url(
service_name='grade_handler'),
}
for module in lti_noauth_modules
]
return HttpResponse(json.dumps(endpoints), content_type='application/json')
@login_required
def course_survey(request, course_id):
"""
URL endpoint to present a survey that is associated with a course_id
Note that the actual implementation of course survey is handled in the
views.py file in the Survey Djangoapp
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
redirect_url = reverse('info', args=[course_id])
# if there is no Survey associated with this course,
# then redirect to the course instead
if not course.course_survey_name:
return redirect(redirect_url)
return survey.views.view_student_survey(
request.user,
course.course_survey_name,
course=course,
redirect_url=redirect_url,
is_required=course.course_survey_required,
)
def is_course_passed(course, grade_summary=None, student=None, request=None):
"""
check user's course passing status. return True if passed
Arguments:
course : course object
grade_summary (dict) : contains student grade details.
student : user object
request (HttpRequest)
Returns:
returns bool value
"""
nonzero_cutoffs = [cutoff for cutoff in course.grade_cutoffs.values() if cutoff > 0]
success_cutoff = min(nonzero_cutoffs) if nonzero_cutoffs else None
if grade_summary is None:
grade_summary = grades.grade(student, request, course)
return success_cutoff and grade_summary['percent'] >= success_cutoff
@require_POST
def generate_user_cert(request, course_id):
"""Start generating a new certificate for the user.
Certificate generation is allowed if:
* The user has passed the course, and
* The user does not already have a pending/completed certificate.
Note that if an error occurs during certificate generation
(for example, if the queue is down), then we simply mark the
certificate generation task status as "error" and re-run
the task with a management command. To students, the certificate
will appear to be "generating" until it is re-run.
Args:
request (HttpRequest): The POST request to this view.
course_id (unicode): The identifier for the course.
Returns:
HttpResponse: 200 on success, 400 if a new certificate cannot be generated.
"""
if not request.user.is_authenticated():
log.info(u"Anon user trying to generate certificate for %s", course_id)
return HttpResponseBadRequest(
_('You must be signed in to {platform_name} to create a certificate.').format(
platform_name=settings.PLATFORM_NAME
)
)
student = request.user
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key, depth=2)
if not course:
return HttpResponseBadRequest(_("Course is not valid"))
if not is_course_passed(course, None, student, request):
return HttpResponseBadRequest(_("Your certificate will be available when you pass the course."))
certificate_status = certs_api.certificate_downloadable_status(student, course.id)
if certificate_status["is_downloadable"]:
return HttpResponseBadRequest(_("Certificate has already been created."))
elif certificate_status["is_generating"]:
return HttpResponseBadRequest(_("Certificate is being created."))
else:
# If the certificate is not already in-process or completed,
# then create a new certificate generation task.
# If the certificate cannot be added to the queue, this will
# mark the certificate with "error" status, so it can be re-run
# with a management command. From the user's perspective,
# it will appear that the certificate task was submitted successfully.
certs_api.generate_user_certificates(student, course.id, course=course, generation_mode='self')
_track_successful_certificate_generation(student.id, course.id)
return HttpResponse()
def _track_successful_certificate_generation(user_id, course_id): # pylint: disable=invalid-name
"""
Track a successful certificate generation event.
Arguments:
user_id (str): The ID of the user generting the certificate.
course_id (CourseKey): Identifier for the course.
Returns:
None
"""
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
event_name = 'edx.bi.user.certificate.generate'
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(
user_id,
event_name,
{
'category': 'certificates',
'label': unicode(course_id)
},
context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
@require_http_methods(["GET", "POST"])
def render_xblock(request, usage_key_string, check_if_enrolled=True):
"""
Returns an HttpResponse with HTML content for the xBlock with the given usage_key.
The returned HTML is a chromeless rendering of the xBlock (excluding content of the containing courseware).
"""
usage_key = UsageKey.from_string(usage_key_string)
usage_key = usage_key.replace(course_key=modulestore().fill_in_run(usage_key.course_key))
course_key = usage_key.course_key
with modulestore().bulk_operations(course_key):
# verify the user has access to the course, including enrollment check
try:
course = get_course_with_access(request.user, 'load', course_key, check_if_enrolled=check_if_enrolled)
except UserNotEnrolled:
raise Http404("Course not found.")
# get the block, which verifies whether the user has access to the block.
block, _ = get_module_by_usage_id(
request, unicode(course_key), unicode(usage_key), disable_staff_debug_info=True, course=course
)
context = {
'fragment': block.render('student_view', context=request.GET),
'course': course,
'disable_accordion': True,
'allow_iframing': True,
'disable_header': True,
'disable_window_wrap': True,
'disable_preview_menu': True,
'staff_access': bool(has_access(request.user, 'staff', course)),
'xqa_server': settings.FEATURES.get('XQA_SERVER', 'http://your_xqa_server.com'),
}
return render_to_response('courseware/courseware-chromeless.html', context)
|
jazztpt/edx-platform
|
lms/djangoapps/courseware/views.py
|
Python
|
agpl-3.0
| 61,937
|
[
"VisIt"
] |
80e1932cce279ba4e24f5fb4a79c177d6de5cf19737305c501da8c89dfd76718
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_equal,
assert_warns)
from numpy import random
from numpy.compat import asbytes
import sys
class TestSeed(TestCase):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be a unsigned 32 bit integers
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be a unsigned 32 bit integers
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
class TestBinomial(TestCase):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
np.testing.assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(TestCase):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
np.float(1))
class TestSetState(TestCase):
def setUp(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandomDist(TestCase):
# Make sure the random distrobution return the correct value for a
# given seed
def setUp(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
np.testing.assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
actual = np.random.random_integers(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
np.testing.assert_array_equal(actual, desired)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
np.testing.assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
np.testing.assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
np.testing.assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
np.testing.assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
np.testing.assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2, replace=False,
p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_(np.random.choice(6, s, replace=True).shape, s)
assert_(np.random.choice(6, s, replace=False).shape, s)
assert_(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_(np.random.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = asbytes('\x82Ui\x9e\xff\x97+Wf\xa5')
np.testing.assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays, and multidimensional versions of both:
for conv in [lambda x: x,
np.asarray,
lambda x: [(i, i) for i in x],
lambda x: np.asarray([(i, i) for i in x])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
np.testing.assert_array_equal(actual, desired)
def test_shuffle_flexible(self):
# gh-4270
arr = [(0, 1), (2, 3)]
dt = np.dtype([('a', np.int32, 1), ('b', np.int32, 1)])
nparr = np.array(arr, dtype=dt)
a, b = nparr[0].copy(), nparr[1].copy()
for i in range(50):
np.random.shuffle(nparr)
assert_(a in nparr)
assert_(b in nparr)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5,4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
ma = np.ma.count_masked(a)
mb = np.ma.count_masked(b)
for i in range(50):
np.random.shuffle(a)
self.assertEqual(ma, np.ma.count_masked(a))
np.random.shuffle(b)
self.assertEqual(mb, np.ma.count_masked(b))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
np.testing.assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
np.testing.assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, np.float(1))
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
np.testing.assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
np.testing.assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
np.testing.assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
np.testing.assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
np.testing.assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
np.testing.assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
np.testing.assert_array_almost_equal(actual, desired, decimal=13)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
np.testing.assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
np.testing.assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
# Hmm... not even symmetric.
cov = [[1, 0], [1, 0]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[-1.47027513018564449, 10.],
[-1.65915081534845532, 10.]],
[[-2.29186329304599745, 10.],
[-1.77505606019580053, 10.]],
[[-0.54970369430044119, 10.],
[0.29768848031692957, 10.]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([-0.79441224511977482, 10.])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance raises warning
mean = [0, 0]
cov = [[1, 1 + 1e-10], [1 + 1e-10, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
np.testing.assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[ 1.47145377828516666, 0.15052899268012659],
[ 0.00943803056963588, 1.02647251615666169],
[ 0.332334982684171 , 0.15451287602753125]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.scipy.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
np.testing.assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
np.testing.assert_raises(OverflowError, func, -np.inf, 0)
np.testing.assert_raises(OverflowError, func, 0, np.inf)
np.testing.assert_raises(OverflowError, func, fmin, fmax)
# (fmax / 1e17) - fmin is within range, so this should not throw
np.random.uniform(low=fmin, high=fmax / 1e17)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
np.testing.assert_array_equal(actual, desired)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setUp(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if (np.intp().dtype.itemsize == 4 and sys.platform == "win32"):
np.testing.assert_array_almost_equal(out1, out2)
else:
np.testing.assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000,6))
if __name__ == "__main__":
run_module_suite()
|
ViralLeadership/numpy
|
numpy/random/tests/test_random.py
|
Python
|
bsd-3-clause
| 32,289
|
[
"Gaussian"
] |
cbbbedcef5557e284b16c58b96216d8deba0cbb40a1e274378dfbd36c4ba499c
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# managejobs - simple job management interface
# Copyright (C) 2003-2014 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Simple front end to job management"""
import shared.returnvalues as returnvalues
from shared.functional import validate_input_and_cert
from shared.init import initialize_main_variables, find_entry
def signature():
"""Signature of the main function"""
defaults = {}
return ['html_form', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False)
status = returnvalues.OK
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
title_entry = find_entry(output_objects, 'title')
title_entry['text'] = 'Manage jobs'
output_objects.append({'object_type': 'header', 'text'
: 'Manage Jobs'})
output_objects.append({'object_type': 'sectionheader', 'text'
: 'View status of all submitted jobs'})
output_objects.append({'object_type': 'html_form', 'text'
: """
<form method="post" action="jobstatus.py">
Sort by modification time: <input type="radio" name="flags" value="sv" />yes
<input type="radio" name="flags" checked="checked" value="vi" />no<br />
<input type="hidden" name="job_id" value="*" />
<input type="hidden" name="output_format" value="html" />
<input type="submit" value="Show All" />
</form>
"""})
output_objects.append({'object_type': 'sectionheader', 'text'
: 'View status of individual jobs'})
output_objects.append({'object_type': 'html_form', 'text'
: """
Filter job IDs (* and ? wildcards are supported)<br />
<form method="post" action="jobstatus.py">
Job ID: <input type="text" name="job_id" value="*" size="30" /><br />
Show only <input type="text" name="max_jobs" size="6" value=5 /> first matching jobs<br />
Sort by modification time: <input type="radio" name="flags" checked="checked" value="vsi" />yes
<input type="radio" name="flags" value="vi" />no<br />
<input type="hidden" name="output_format" value="html" />
<input type="submit" value="Show" />
</form>
"""})
output_objects.append({'object_type': 'sectionheader', 'text'
: 'Resubmit job'})
output_objects.append({'object_type': 'html_form', 'text'
: """
<form method="post" action="resubmit.py">
Job ID: <input type="text" name="job_id" size="30" /><br />
<input type="hidden" name="output_format" value="html" />
<input type="submit" value="Submit" />
</form>
"""})
output_objects.append({'object_type': 'sectionheader', 'text'
: 'Freeze pending job'})
output_objects.append({'object_type': 'html_form', 'text'
: """
<form method="post" action="jobaction.py">
Job ID: <input type="text" name="job_id" size="30" /><br />
<input type="hidden" name="action" value="freeze" />
<input type="hidden" name="output_format" value="html" />
<input type="submit" value="Freeze job" />
</form>
"""})
output_objects.append({'object_type': 'sectionheader', 'text'
: 'Thaw frozen job'})
output_objects.append({'object_type': 'html_form', 'text'
: """
<form method="post" action="jobaction.py">
Job ID: <input type="text" name="job_id" size="30" /><br />
<input type="hidden" name="action" value="thaw" />
<input type="hidden" name="output_format" value="html" />
<input type="submit" value="Thaw job" />
</form>
"""})
output_objects.append({'object_type': 'sectionheader', 'text'
: 'Cancel pending or executing job'})
output_objects.append({'object_type': 'html_form', 'text'
: """
<form method="post" action="jobaction.py">
Job ID: <input type="text" name="job_id" size="30" /><br />
<input type="hidden" name="action" value="cancel" />
<input type="hidden" name="output_format" value="html" />
<input type="submit" value="Cancel job" />
</form>
"""})
output_objects.append({'object_type': 'sectionheader', 'text'
: 'Request live I/O'})
output_objects.append({'object_type': 'html_form', 'text'
: """
<form method="post" action="liveio.py">
Job ID: <input type="text" name="job_id" size="30" /><br />
<input type="hidden" name="output_format" value="html" />
<input type="submit" value="Request" />
</form>
<br />
"""})
return (output_objects, status)
|
heromod/migrid
|
mig/shared/functionality/managejobs.py
|
Python
|
gpl-2.0
| 5,682
|
[
"Brian"
] |
5750eebfa90e8df250ebd120cf069620f0dc5dc5bbf81364a3e1ccd13f1cf5a8
|
from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from numpy.core.multiarray import _insert, add_docstring
from numpy.core.multiarray import digitize, bincount, interp as compiled_interp
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
from numpy.compat.py3k import basestring
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def _hist_optim_numbins_estimator(a, estimator):
"""
A helper function to be called from histogram to deal with estimating optimal number of bins
estimator: str
If estimator is one of ['auto', 'fd', 'scott', 'rice', 'sturges'] this function
will choose the appropriate estimator and return it's estimate for the optimal
number of bins.
"""
assert isinstance(estimator, basestring)
# private function should not be called otherwise
if a.size == 0:
return 1
def sturges(x):
"""
Sturges Estimator
A very simplistic estimator based on the assumption of normality of the data
Poor performance for non-normal data, especially obvious for large X.
Depends only on size of the data.
"""
return np.ceil(np.log2(x.size)) + 1
def rice(x):
"""
Rice Estimator
Another simple estimator, with no normality assumption.
It has better performance for large data, but tends to overestimate number of bins.
The number of bins is proportional to the cube root of data size (asymptotically optimal)
Depends only on size of the data
"""
return np.ceil(2 * x.size ** (1.0 / 3))
def scott(x):
"""
Scott Estimator
The binwidth is proportional to the standard deviation of the data and
inversely proportional to the cube root of data size (asymptotically optimal)
"""
h = 3.5 * x.std() * x.size ** (-1.0 / 3)
if h > 0:
return np.ceil(x.ptp() / h)
return 1
def fd(x):
"""
Freedman Diaconis rule using Inter Quartile Range (IQR) for binwidth
Considered a variation of the Scott rule with more robustness as the IQR
is less affected by outliers than the standard deviation. However the IQR depends on
fewer points than the sd so it is less accurate, especially for long tailed distributions.
If the IQR is 0, we return 1 for the number of bins.
Binwidth is inversely proportional to the cube root of data size (asymptotically optimal)
"""
iqr = np.subtract(*np.percentile(x, [75, 25]))
if iqr > 0:
h = (2 * iqr * x.size ** (-1.0 / 3))
return np.ceil(x.ptp() / h)
# If iqr is 0, default number of bins is 1
return 1
def auto(x):
"""
The FD estimator is usually the most robust method, but it tends to be too small
for small X. The Sturges estimator is quite good for small (<1000) datasets and is
the default in R.
This method gives good off the shelf behaviour.
"""
return max(fd(x), sturges(x))
optimal_numbins_methods = {'sturges': sturges, 'rice': rice, 'scott': scott,
'fd': fd, 'auto': auto}
try:
estimator_func = optimal_numbins_methods[estimator.lower()]
except KeyError:
raise ValueError("{0} not a valid method for `bins`".format(estimator))
else:
# these methods return floats, np.histogram requires an int
return int(estimator_func(a))
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string from the list below, `histogram` will use the method
chosen to calculate the optimal number of bins (see Notes for more detail
on the estimators). For visualisation, we suggest using the 'auto' option.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good all round performance
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into account data
variability and data size .
'scott'
Less robust estimator that that takes into account data
variability and data size.
'rice'
Estimator does not take variability into account, only data size.
Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only optimal for
gaussian data and underestimates number of bins for large non-gaussian datasets.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
.. versionadded:: 1.11.0
The methods to estimate the optimal number of bins are well found in literature,
and are inspired by the choices R provides for histogram visualisation.
Note that having the number of bins proportional to :math:`n^{1/3}` is asymptotically optimal,
which is why it appears in most estimators.
These are simply plug-in methods that give good starting points for number of bins.
In the equations below, :math:`h` is the binwidth and :math:`n_h` is the number of bins
'Auto' (maximum of the 'Sturges' and 'FD' estimators)
A compromise to get a good value. For small datasets the sturges
value will usually be chosen, while larger datasets will usually default to FD.
Avoids the overly conservative behaviour of FD and Sturges for small and
large datasets respectively. Switchover point is usually x.size~1000.
'FD' (Freedman Diaconis Estimator)
.. math:: h = 2 \\frac{IQR}{n^{-1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good
for large datasets. The IQR is very robust to outliers.
'Scott'
.. math:: h = \\frac{3.5\\sigma}{n^{-1/3}}
The binwidth is proportional to the standard deviation (sd) of the data
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good
for large datasets. The sd is not very robust to outliers. Values
are very similar to the Freedman Diaconis Estimator in the absence of outliers.
'Rice'
.. math:: n_h = \\left\\lceil 2n^{1/3} \\right\\rceil
The number of bins is only proportional to cube root of a.size.
It tends to overestimate the number of bins
and it does not take into account data variability.
'Sturges'
.. math:: n_h = \\left\\lceil \\log _{2}n+1 \\right\\rceil
The number of bins is the base2 log of a.size.
This estimator assumes normality of data and is too conservative for larger,
non-normal datasets. This is the default method in R's `hist` method.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data with 2000 points
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size = 1000), rng.normal(loc = 5, scale = 2, size = 1000)))
>>> plt.hist(a, bins = 'auto') # plt.hist passes it's arguments to np.histogram
>>> plt.title("Histogram with 'auto' bins")
>>> plt.show()
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if isinstance(bins, basestring):
bins = _hist_optim_numbins_estimator(a, bins)
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
# At this point, if the weights are not integer, floating point, or
# complex, we have to use the slow algorithm.
if weights is not None and not (np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, np.complex)):
bins = linspace(mn, mx, bins + 1, endpoint=True)
if not iterable(bins):
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
# Initialize empty histogram
n = np.zeros(bins, ntype)
# Pre-compute histogram scaling factor
norm = bins / (mx - mn)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= mn)
keep &= (tmp_a <= mx)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
tmp_a = tmp_a.astype(float)
tmp_a -= mn
tmp_a *= norm
# Compute the bin indices, and for values that lie exactly on mx we
# need to subtract one
indices = tmp_a.astype(np.intp)
indices[indices == bins] -= 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins)
n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins)
else:
n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype)
# We now compute the bin edges since these are returned
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Initialize empty histogram
n = np.zeros(bins.shape, ntype)
if weights is None:
for i in arange(0, len(a), BLOCK):
sa = sort(a[i:i+BLOCK])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : array_type or double
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.asarray(weights)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis, dtype=np.result_type(a.dtype, wgt.dtype))
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
condlist = np.vstack([condlist, ~totlist])
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
# 2014-02-24, 1.9
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it seperatly optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
# 2014-02-24, 1.9
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : scalar or list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
single scalar specifies sample distance for all dimensions.
if `axis` is given, the number of varargs must equal the number of axes.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
axis : None or int or tuple of ints, optional
Gradient is calculated only along the given axis or axes
The default (axis = None) is to calculate the gradient for all the axes of the input array.
axis may be negative, in which case it counts from the last to the first axis.
.. versionadded:: 1.11.0
Returns
-------
gradient : list of ndarray
Each element of `list` has the same shape as `f` giving the derivative
of `f` with respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
The axis keyword can be used to specify a subset of axes of which the gradient is calculated
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0)
array([[ 2., 2., -1.],
[ 2., 2., -1.]])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
axes = kwargs.pop('axis', None)
if axes is None:
axes = tuple(range(N))
# check axes to have correct type and no duplicate entries
if isinstance(axes, int):
axes = (axes,)
if not isinstance(axes, tuple):
raise TypeError("A tuple of integers or a single integer is required")
# normalize axis values:
axes = tuple(x + N if x < 0 else x for x in axes)
if max(axes) >= N or min(axes) < 0:
raise ValueError("'axis' entry is out of bounds")
if len(set(axes)) != len(axes):
raise ValueError("duplicate value in 'axis'")
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == len(axes):
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for i, axis in enumerate(axes):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[i]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if len(axes) == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th discrete difference along given axis.
The first difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The n-th differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
.. versionadded:: 1.10.0
Returns
-------
y : float or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
"""
if period is None:
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
else:
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
return_array = True
if isinstance(x, (float, int, number)):
return_array = False
x = [x]
x = np.asarray(x, dtype=np.float64)
xp = np.asarray(xp, dtype=np.float64)
fp = np.asarray(fp, dtype=np.float64)
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError("Data points must be 1-D sequences")
if xp.shape[0] != fp.shape[0]:
raise ValueError("fp and xp are not of the same length")
# normalizing periodic boundaries
x = x % period
xp = xp % period
asort_xp = np.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = np.concatenate((fp[-1:], fp, fp[0:1]))
if return_array:
return compiled_interp(x, xp, fp, left, right)
else:
return compiled_interp(x, xp, fp, left, right).item()
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Note that `place` does the exact opposite of `extract`.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress, place
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None, fweights=None, aweights=None):
"""
Estimate a covariance matrix, given data and weights.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
See the notes for an outline of the algorithm.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` corresponds to the
number of observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using the
keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
`fweights` and `aweights` are specified, and ``ddof=0`` will return
the simple average. See the notes for the details. The default value
is ``None``.
.. versionadded:: 1.5
fweights : array_like, int, optional
1-D array of integer freguency weights; the number of times each
observation vector should be repeated.
.. versionadded:: 1.10
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
.. versionadded:: 1.10
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Notes
-----
Assume that the observations are in the columns of the observation
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
>>> m -= np.sum(m * w, axis=1, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
as it should.
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if rowvar == 0 and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
if rowvar == 0 and y.shape[0] != 1:
y = y.T
X = np.vstack((X, y))
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=np.float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=np.float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
# Determine the normalization
if w is None:
fact = X.shape[1] - ddof
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof*sum(w*aweights)/w_sum
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X*w).T
c = dot(X, X_T.conj())
c *= 1. / np.float64(fact)
return c.squeeze()
def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `R`, and the
covariance matrix, `C`, is
.. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `R` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
ddof : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
Notes
-----
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no effect and are deprecated',
DeprecationWarning)
c = cov(x, y, rowvar)
try:
d = diag(c)
except ValueError: # scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
d = sqrt(d)
# calculate "c / multiply.outer(d, d)" row-wise ... for memory and speed
for i in range(0, d.size):
c[i,:] /= (d * d[i])
return c
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and buffer length as the expected output, but the
type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve the
contents of the input array. Treat the input as undefined, but it
will probably be fully or partially sorted. Default is False. Note
that, if `overwrite_input` is True and the input is not already an
ndarray, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in which
case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
# Set the partition indexes
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
kth = [szh - 1, szh]
else:
kth = [(sz - 1) // 2]
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
kth.append(-1)
if overwrite_input:
if axis is None:
part = a.ravel()
part.partition(kth)
else:
a.partition(kth, axis=axis)
part = a
else:
part = partition(a, kth, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact) and sz > 0:
# warn and return nans like mean would
rout = mean(part[indexer], axis=axis, out=out)
part = np.rollaxis(part, axis, part.ndim)
n = np.isnan(part[..., -1])
if rout.ndim == 0:
if n == True:
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if out is not None:
out[...] = a.dtype.type(np.nan)
rout = out
else:
rout = a.dtype.type(np.nan)
elif np.count_nonzero(n.ravel()) > 0:
warnings.warn("Invalid value encountered in median for" +
" %d results" % np.count_nonzero(n.ravel()),
RuntimeWarning)
rout[n] = np.nan
return rout
else:
# if there are no nans
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int or sequence of int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the percentiles along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
percentile. This will save memory when you do not need to preserve
the contents of the input array. In this case you should not make
any assumptions about the content of the passed in array `a` after
this function completes -- treat it as undefined. Default is False.
Note that, if the `a` input is not already an array this parameter
will have no effect, `a` will be converted to an array internally
regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original array `a`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If a single percentile `q` is given and axis=None a scalar is
returned. If multiple percentiles `q` are given an array holding
the result is returned. The results are listed in the first axis.
(If `out` is specified, in which case that array is returned
instead). If the input contains integers, or floats of smaller
precision than 64, then the output data-type is float64. Otherwise,
the output data-type is the same as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the q-th percentile of V is the q-th ranked
value in a sorted copy of V. The values and distances of the two
nearest neighbors as well as the `interpolation` parameter will
determine the percentile if the normalized ranking does not match q
exactly. This function is the same as the median if ``q=50``, the same
as the minimum if ``q=0`` and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
array([ 3.5])
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([[ 7.],
[ 2.]])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
array([ 3.5])
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = floor(indices) + 0.5
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
n = np.array(False, dtype=bool) # check for nan's flag
if indices.dtype == intp: # take the points along axis
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = concatenate((indices, [-1]))
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = indices[:-1]
n = np.isnan(ap[-1:, ...])
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = concatenate((indices_above, [-1]))
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
weights_below = np.rollaxis(weights_below, axis, 0)
weights_above = np.rollaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = indices_above[:-1]
n = np.isnan(ap[-1:, ...])
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
if np.any(n):
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if zerod:
if ap.ndim == 1:
if out is not None:
out[...] = a.dtype.type(np.nan)
r = out
else:
r = a.dtype.type(np.nan)
else:
r[..., n.squeeze(0)] = a.dtype.type(np.nan)
else:
if r.ndim == 1:
r[:] = a.dtype.type(np.nan)
else:
r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
# 2013-09-24, 1.9
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays along an existing axis.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
|
saquiba2/numpy2
|
numpy/lib/function_base.py
|
Python
|
bsd-3-clause
| 143,343
|
[
"Gaussian"
] |
9ee8721281153a1dc5a20f8bd8f0e0e56c2d30b2c76fdf9785e70c9c2de4dece
|
import os
import unittest
import netCDF4
from lxml import etree
from petulantbear.netcdf_etree import parse_nc_dataset_as_etree, dataset2ncml
namespaces = {
'x': 'http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2'
}
class TestPb(unittest.TestCase):
def setUp(self):
self.file = os.path.abspath(os.path.join(os.path.dirname(__file__), 'test.nc'))
def test_ncml_string(self):
with netCDF4.Dataset(self.file) as ds:
ncml = dataset2ncml(ds, url="file:{}".format(self.file))
root = etree.fromstring(ncml)
assert isinstance(root, etree._Element)
def test_dimension(self):
with netCDF4.Dataset(self.file, 'a') as ds:
root = parse_nc_dataset_as_etree(ds)
dim = root[0]
assert dim.attrib['name'] == 'bad_name'
def test_variable(self):
with netCDF4.Dataset(self.file, 'a') as ds:
root = parse_nc_dataset_as_etree(ds)
vs = root.xpath('/x:netcdf/x:variable', namespaces=namespaces)
assert len(vs) > 0
for v in vs:
assert v.attrib['name']
v = root.xpath("x:variable[@name='var4']", namespaces=namespaces)[0]
assert v.attrib['name'] == 'var4'
v = root.xpath("x:variable[@name='var4']/x:attribute[@name='foo']", namespaces=namespaces)[0]
assert v.attrib['name'] == 'foo'
assert v.attrib['value'] == 'bar'
def test_global(self):
with netCDF4.Dataset(self.file, 'a') as ds:
root = parse_nc_dataset_as_etree(ds)
g = root.xpath("/x:netcdf/x:attribute[@name='foo']", namespaces=namespaces)[0]
assert g.attrib['name'] == 'foo'
|
ioos/petulant-bear
|
petulantbear/test_pb.py
|
Python
|
gpl-3.0
| 1,718
|
[
"NetCDF"
] |
89f07c5fb6286bfb8f090ea4e65be4b42b88955c4b3b37e064e052216007a0e7
|
from __future__ import division
import imgaug as ia
from scipy.misc import imsave
from imgaug import augmenters as iaa
import numpy as np
import glob
import matplotlib.image as img
import matplotlib.pyplot as plt
import cv2
images = img.imread('/home/ahmed/Pictures/cogedis/cogedis_words_3/0d167103-1b66-4373-9a78-5b21f50f9abb.png')
#plt.imshow(image)
#plt.show()
st = lambda aug: iaa.Sometimes(0.3, aug)
# Define our sequence of augmentation steps that will be applied to every image
# All augmenters with per_channel=0.5 will sample one value _per image_
# in 50% of all cases. In all other cases they will sample new values
# _per channel_.
#seq_flipud=iaa.Sequential([st(iaa.GaussianBlur((1.5)))])
'''
seq = iaa.Sequential([
iaa.Flipud(0.5), # vertically flip 50% of all images
st(iaa.GaussianBlur((1.5))), # blur images with a sigma between 0 and 3.0
st(iaa.Sharpen(alpha=(1.0), lightness=(0.5))), # sharpen images
st(iaa.Emboss(alpha=(1.0), strength=(1.0))), # emboss images
# search either for all edges or for directed edges
st(iaa.AdditiveGaussianNoise(loc=0, scale=(0.2), per_channel=0.5)), # add gaussian noise to images
st(iaa.Dropout((0.1), per_channel=0.5)), # randomly remove up to 10% of the pixels
st(iaa.Invert(0.25, per_channel=True)), # invert color channels
#st(iaa.Add((-10, 10), per_channel=0.5)), # change brightness of images (by -10 to 10 of original value)
#st(iaa.Multiply((0.5, 1.5), per_channel=0.5)), # change brightness of images (50-150% of original value)
#st(iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5)), # improve or worsen the contrast
st(iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
translate_px={"x": (-16, 16), "y": (-16, 16)}, # translate by -16 to +16 pixels (per axis)
rotate=(-90, 90), # rotate by -45 to +45 degrees
)),
st(iaa.ElasticTransformation(alpha=(3.0), sigma=0.25)) # apply elastic transformations with random strengths
],
random_order=False # do all of the above in random order
)
#images_aug = seq.augment_images(images)
'''
'''
gaussianBlur=iaa.Sequential([st(iaa.GaussianBlur(1.5))])
g=gaussianBlur.show_grid(images,cols=1,rows=1)
sharpen=iaa.Sequential([st(iaa.Sharpen(alpha=1.0,lightness=1))])
s=sharpen.show_grid(images,cols=1,rows=1)
images_aug = sharpen.augment_images(images)
'''
#imsave('/home/ahmed/Pictures/cogedis/cogedis_words_3/0aa3a241-1d17-4173-958f-41da009281c9_sharpen.png',sharpen.show_grid(images,cols=1,rows=1))
gaussianBlur=iaa.Sequential([st(iaa.GaussianBlur(1.0))])
#images_aug=gaussianBlur.augment_image(images)
additive=st(iaa.AdditiveGaussianNoise(loc=0, scale=(0.1), per_channel=0.7))
img=additive.augment_image(images)
#images_aug = sharpen.augment_images(images)
print(img.shape)
plt.imshow(img)
#plt.savefig('/home/ahmed/Pictures/cogedis/cogedis_words_3/0aa3a241-1d17-4173-958f-41da009281c9_blur.png')
plt.show()
print("ok")
|
ahmedmazari-dhatim/CRNN-for-sequence-recognition-
|
data_aug_2.py
|
Python
|
mit
| 3,073
|
[
"Gaussian"
] |
e968fd0d05b894e40871636233f07a4119a01c3422bf52625d3e502cc1032fc0
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
###########################################################################
# ESPResSo++ #
# Test script for Converting GROMACS tabulated file #
# #
###########################################################################
import sys
import time
import os
import espresso
import mpi4py.MPI as MPI
import math
import logging
import os
from espresso import Real3D, Int3D
from espresso.tools.convert import gromacs
# Input values for system
N = 10 # box size
size = (float(N), float(N), float(N))
numParticles = N**3 # number of particles
nsteps = 1000 # number of steps
cutoff = 2.5 # cutoff for LJ potential
tabfile = "pot-lj-esp.tab" # filename for tabulated potential
skin = 0.3 # skin for Verlet lists
spline = 2 # interpolation spline type
# parameters to convert GROMACS tabulated potential file
filein = "table6-12.xvg" # gromacs tabulated file to be converted
fileout = "pot-lj-gro.tab" # filename of output file
sigma = 1.0
epsilon = 1.0
c6 = 4.0
c12 = 4.0
files = [tabfile, fileout] # run simulation on these files
######################################################################
## IT SHOULD BE UNNECESSARY TO MAKE MODIFICATIONS BELOW THIS LINE ##
######################################################################
print '\n-- GROMACS Tabulated File Conversion Test -- \n'
print 'Steps: %3s' % nsteps
print 'Particles: %3s' % numParticles
print 'Cutoff: %3s' % cutoff
# writes the tabulated potential file
def writeTabFile(pot, name, N, low=0.0, high=2.5, body=2):
outfile = open(name, "w")
delta = (high - low) / (N - 1)
for i in range(N):
r = low + i * delta
energy = pot.computeEnergy(r)
if body == 2:# this is for 2-body potentials
force = pot.computeForce(Real3D(r, 0.0, 0.0))[0]
else: # this is for 3- and 4-body potentials
force = pot.computeForce(r)
outfile.write("%15.8g %15.8g %15.8g\n"%(r, energy, force))
outfile.close()
# write the espresso++ tabulated file for a LJ potential
print 'Generating potential file ... (%2s)' % tabfile
potLJ = espresso.interaction.LennardJones(epsilon=1.0, sigma=1.0, shift=0.0, cutoff=cutoff)
writeTabFile(potLJ, tabfile, N=1500, low=0.01, high=potLJ.cutoff)
# convert gromacs tabulated file to espresso++ format
print 'Converting GROMACS file to ESPResSo++ file ... (%2s -> %2s)' % (filein, fileout)
gromacs.convertTable(filein, fileout, sigma, epsilon, c6, c12)
#exit() # exit if you just want to convert a file
# compute the number of cells on each node
def calcNumberCells(size, nodes, cutoff):
ncells = 1
while size / (ncells * nodes) >= cutoff:
ncells = ncells + 1
return ncells - 1
#start_time = time.clock()
# run simulation for all tabulated potential files
for potfile in files:
print '\nUsing file: %0s'% potfile
# set up system
system = espresso.System()
system.rng = espresso.esutil.RNG()
system.bc = espresso.bc.OrthorhombicBC(system.rng, size)
system.skin = skin
comm = MPI.COMM_WORLD
nodeGrid = Int3D(1, 1, comm.size)
cellGrid = Int3D(
calcNumberCells(size[0], nodeGrid[0], cutoff),
calcNumberCells(size[1], nodeGrid[1], cutoff),
calcNumberCells(size[2], nodeGrid[2], cutoff)
)
system.storage = espresso.storage.DomainDecomposition(system, nodeGrid, cellGrid)
pid = 0
for i in range(N):
for j in range(N):
for k in range(N):
m = (i + 2*j + 3*k) % 11
r = 0.45 + m * 0.01
x = (i + r) / N * size[0]
y = (j + r) / N * size[1]
z = (k + r) / N * size[2]
x = 1.0 * i
y = 1.0 * j
z = 1.0 * k
system.storage.addParticle(pid, Real3D(x, y, z))
# not yet: dd.setVelocity(id, (1.0, 0.0, 0.0))
pid = pid + 1
system.storage.decompose()
# integrator
integrator = espresso.integrator.VelocityVerlet(system)
integrator.dt = 0.005
# now build Verlet List
# ATTENTION: you must not add the skin explicitly here
logging.getLogger("Interpolation").setLevel(logging.INFO)
vl = espresso.VerletList(system, cutoff = cutoff + system.skin)
potTab = espresso.interaction.Tabulated(itype=spline, filename=potfile, cutoff=cutoff)
# ATTENTION: auto shift was enabled
interTab = espresso.interaction.VerletListTabulated(vl)
interTab.setPotential(type1=0, type2=0, potential=potTab)
system.addInteraction(interTab)
temp = espresso.analysis.Temperature(system)
press = espresso.analysis.Pressure(system)
temperature = temp.compute()
p = press.compute()
Ek = 0.5 * temperature * (3 * numParticles)
Ep = interTab.computeEnergy()
print 'Start %5s: tot energy = %10.3f pot = %10.3f kin = %10.3f temp = %10.3f p = %10.3f' \
% ("", Ek + Ep, Ep, Ek, temperature, p)
# langevin thermostat
langevin = espresso.integrator.LangevinThermostat(system)
integrator.addExtension(langevin)
langevin.gamma = 1.0
langevin.temperature = 1.0
integrator.run(nsteps)
temperature = temp.compute()
p = press.compute()
Ek = 0.5 * temperature * (3 * numParticles)
Ep = interTab.computeEnergy()
print 'Step %6d: tot energy = %10.3f pot = %10.3f kin = %10.3f temp = %10.3f p = %10.3f' % \
(nsteps, Ek + Ep, Ep, Ek, temperature, p)
os.system('rm '+potfile) # remove file
print '\nDone.'
|
BackupTheBerlios/espressopp
|
examples/convert_gromacs_tables/convert_gromacs_table.py
|
Python
|
gpl-3.0
| 6,132
|
[
"ESPResSo",
"Gromacs"
] |
1a0c42e3a52bf92f71c4fa3d145d70a1e04459450b9f0167ebb30c1fe933d78b
|
import types
import time
import random
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities import DEncode
from DIRAC.Core.Base.ExecutorMindHandler import ExecutorMindHandler
random.seed()
class PingPongMindHandler( ExecutorMindHandler ):
MSG_DEFINITIONS = { 'StartReaction' : { 'numBounces' : ( types.IntType, types.LongType ) } }
auth_msg_StartReaction = [ 'all' ]
def msg_StartReaction( self, msgObj ):
bouncesLeft = msgObj.numBounces
taskid = time.time() + random.random()
taskData = { 'bouncesLeft' : bouncesLeft }
return self.executeTask( time.time() + random.random(), taskData )
auth_startPingOfDeath = [ 'all' ]
types_startPingOfDeath = [ types.IntType ]
def export_startPingOfDeath( self, numBounces ):
taskData = { 'bouncesLeft' : numBounces }
gLogger.info( "START TASK = %s" % taskData )
return self.executeTask( int( time.time() + random.random() ), taskData )
@classmethod
def exec_executorConnected( cls, trid, eTypes ):
"""
This function will be called any time an executor reactor connects
eTypes is a list of executor modules the reactor runs
"""
gLogger.info( "EXECUTOR CONNECTED OF TYPE %s" % eTypes )
return S_OK()
@classmethod
def exec_executorDisconnected( cls, trid ):
"""
This function will be called any time an executor disconnects
"""
return S_OK()
@classmethod
def exec_dispatch( cls, taskid, taskData, pathExecuted ):
"""
Before a task can be executed, the mind has to know which executor module can process it
"""
gLogger.info( "IN DISPATCH %s" % taskData )
if taskData[ 'bouncesLeft' ] > 0:
gLogger.info( "SEND TO PLACE" )
return S_OK( "Test/PingPongExecutor" )
return S_OK()
@classmethod
def exec_prepareToSend( cls, taskId, taskData, trid ):
"""
"""
return S_OK()
@classmethod
def exec_serializeTask( cls, taskData ):
gLogger.info( "SERIALIZE %s" % taskData )
return S_OK( DEncode.encode( taskData ) )
@classmethod
def exec_deserializeTask( cls, taskStub ):
gLogger.info( "DESERIALIZE %s" % taskStub )
return S_OK( DEncode.decode( taskStub )[0] )
@classmethod
def exec_taskProcessed( cls, taskid, taskData, eType ):
"""
This function will be called when a task has been processed and by which executor module
"""
gLogger.info( "PROCESSED %s" % taskData )
taskData[ 'bouncesLeft' ] -= 1
return cls.executeTask( taskid, taskData )
@classmethod
def exec_taskError( cls, taskid, taskData, errorMsg ):
print "OOOOOO THERE WAS AN ERROR!!", errorMsg
return S_OK()
@classmethod
def exec_taskFreeze( cls, taskid, taskData, eType ):
"""
A task can be frozen either because there are no executors connected that can handle it
or becase an executor has requested it.
"""
print "OOOOOO THERE WAS A TASK FROZEN"
return S_OK()
|
chaen/DIRACDocs
|
source/DeveloperGuide/AddingNewComponents/DevelopingExecutors/PingPongMindHandler.py
|
Python
|
gpl-3.0
| 2,911
|
[
"DIRAC"
] |
7f5e409acfaab5d8d25bfca0a17abae440bcfdf1aee9bbe1768d00dacd932e44
|
####
# This sample is published as part of the blog article at www.toptal.com/blog
# Visit www.toptal.com/blog and subscribe to our newsletter to read great posts
####
import logging
from pathlib import Path
from time import time
from functools import partial
from concurrent.futures import ProcessPoolExecutor
from PIL import Image
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def create_thumbnail(size, path):
"""
Creates a thumbnail of an image with the same name as image but with _thumbnail appended before the
suffix.
>>> create_thumbnail((128, 128), 'image.jpg')
A new thumbnail image is created with the name image_thumbnail.jpg
:param size: A tuple of the width and height of the image
:param path: The path to the image file
:return: None
"""
path = Path(path)
name = path.stem + '_thumbnail' + path.suffix
thumbnail_path = path.with_name(name)
image = Image.open(path)
image.thumbnail(size)
image.save(thumbnail_path)
def main():
ts = time()
# Partially apply the create_thumbnail method setting the size to 128x128 and returning a function of a single
# argument
thumbnail_128 = partial(create_thumbnail, (128, 128))
# Create the executor in a with block so shut down is called when the block is exited
with ProcessPoolExecutor() as executor:
executor.map(thumbnail_128, Path('images').iterdir())
logging.info('Took %s', time() - ts)
if __name__ == '__main__':
main()
|
volker48/python-concurrency
|
processpool_thumbnails.py
|
Python
|
mit
| 1,589
|
[
"VisIt"
] |
65ff044506a7534458bfcb9251ec2dd186ad6d0f556e5965aa95d081680c87b0
|
# Copyright (c) 2005 Gavin E. Crooks <gec@threeplusone.com>
# Copyright (c) 2006, The Regents of the University of California, through
# Lawrence Berkeley National Laboratory (subject to receipt of any required
# approvals from the U.S. Dept. of Energy). All rights reserved.
# This software is distributed under the new BSD Open Source License.
# <http://www.opensource.org/licenses/bsd-license.html>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# (1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and or other materials provided with the distribution.
#
# (3) Neither the name of the University of California, Lawrence Berkeley
# National Laboratory, U.S. Dept. of Energy nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
""" Sequence file reading and writing.
Biological sequence data is stored and transmitted using a wide variety of
different file formats. This package provides convenient methods to read and
write several of these file fomats.
CoreBio is often capable of guessing the correct file type, either from the
file extension or the structure of the file:
>>> import corebio.seq_io
>>> afile = open("test_corebio/data/cap.fa")
>>> seqs = corebio.seq_io.read(afile)
Alternatively, each sequence file type has a separate module named FILETYPE_io
(e.g. fasta_io, clustal_io).
>>> import corebio.seq_io.fasta_io
>>> afile = open("test_corebio/data/cap.fa")
>>> seqs = corebio.seq_io.fasta_io.read( afile )
Sequence data can also be written back to files:
>>> fout = open("out.fa", "w")
>>> corebio.seq_io.fasta_io.write( fout, seqs )
Supported File Formats
----------------------
Module Name Extension read write features
---------------------------------------------------------------------------
array_io array, flatfile yes yes none
Each IO module defines one or more of the following functions and variables:
read(afile, alphabet=None)
Read a file of sequence data and return a SeqList, a collection
of Seq's (Alphabetic strings) and features.
read_seq(afile, alphabet=None)
Read a single sequence from a file.
iter_seq(afile, alphabet =None)
Iterate over the sequences in a file.
index(afile, alphabet = None)
Instead of loading all of the sequences into memory, scan the file and
return an index map that will load sequences on demand. Typically not
implemented for formats with interleaved sequences.
write(afile, seqlist)
Write a collection of sequences to the specifed file.
write_seq(afile, seq)
Write one sequence to the file. Only implemented for non-interleaved,
headerless formats, such as fasta and plain.
example
A string containing a short example of the file format
names
A list of synonyms for the file format. E.g. for fasta_io, ( 'fasta',
'pearson', 'fa'). The first entry is the preferred format name.
extensions
A list of file name extensions used for this file format. e.g.
fasta_io.extensions is ('fa', 'fasta', 'fast', 'seq', 'fsa', 'fst', 'nt',
'aa','fna','mpfa'). The preferred or standard extension is first in the
list.
Attributes :
- formats -- Available seq_io format parsers
- format_names -- A map between format names and format parsers.
- format_extensions -- A map between filename extensions and parsers.
"""
# Dev. References :
#
# - http://iubio.bio.indiana.edu/soft/molbio/readseq/java/Readseq2-help.html
# - http://www.ebi.ac.uk/help/formats_frame.html
# - http://www.cmbi.kun.nl/bioinf/tools/crab_pir.html
# - http://bioperl.org/HOWTOs/html/SeqIO.html
# - http://emboss.sourceforge.net/docs/themes/SequenceFormats.html
# - http://www.cse.ucsc.edu/research/compbio/a2m-desc.html (a2m)
# - http://www.genomatix.de/online_help/help/sequence_formats.html
from weblogoMod.corebio.seq import *
import array_io
__all__ = [
'array_io',
'read',
]
_parsers = [array_io]
def _get_parsers(lines) :
global _parsers
parsers = list(_parsers)
return parsers
def read(lines, alphabet=None) :
""" Read a sequence file and attempt to guess its format.
First the filename extension (if available) is used to infer the format.
If that fails, then we attempt to parse the file using several common
formats.
Note, fin cannot be unseekable stream such as sys.stdin
returns :
SeqList
raises :
ValueError - If the file cannot be parsed.
ValueError - Sequence do not conform to the alphabet.
"""
alphabet = Alphabet(alphabet)
parsers = _get_parsers(lines)
for p in _get_parsers(lines) :
try:
return p.read(lines, alphabet)
except ValueError:
pass
names = ", ".join([ p.names[0] for p in parsers])
raise ValueError("Cannot parse sequence file: Tried %s " % names)
|
NarlikarLab/DIVERSITY
|
weblogoMod/corebio/seq_io/__init__.py
|
Python
|
gpl-3.0
| 6,149
|
[
"BioPerl"
] |
23b0c49167146c0fb0f72649d432785ecd7e4b9ac56fdd5953d9c3cf0c8b78d1
|
"""
This is a simple script that verifies several ways of accessing numpy arrays
and ensures that their memory is properly cleaned.
"""
from addons import *
import psi4
import numpy as np
# If it's too small, something odd happens with the memory manager
mat_size = 10000
def snapshot_memory():
import memory_profiler as mp
return mp.memory_usage()[0] * 1048576
def check_leak(func, tol=1.e6):
start = snapshot_memory()
func()
diff = abs(start - snapshot_memory())
# A megabyte is excusable due to various GC funcs
if diff > tol:
raise MemoryError("Function did not correctly clean up")
else:
print("Function %s: PASSED" % func.__name__)
return True
def build_mat():
mat = psi4.core.Matrix(mat_size, mat_size)
return mat
def build_view_mat():
mat = psi4.core.Matrix(mat_size, mat_size)
view = mat.np
return mat, view
def build_viewh_mat():
mat = psi4.core.Matrix(mat_size, mat_size)
view = mat.np
return mat, view
def build_view_set_mat():
mat = psi4.core.Matrix(mat_size, mat_size)
view = mat.np
view[:] = 5
return mat, view
def build_arr_mat():
mat = psi4.core.Matrix(mat_size, mat_size)
view = np.asarray(mat)
return mat, view
def build_copy_mat():
mat = psi4.core.Matrix(mat_size, mat_size)
view = np.array(mat)
return mat, view
@using_memory_profiler
def test_build_mat():
assert(check_leak(build_mat))
@using_memory_profiler
def test_build_view_mat():
assert(check_leak(build_view_mat))
@using_memory_profiler
def test_build_viewh_mat():
assert(check_leak(build_viewh_mat))
@using_memory_profiler
def test_build_view_set_mat():
assert(check_leak(build_view_set_mat))
@using_memory_profiler
def test_build_arr_mat():
assert(check_leak(build_arr_mat))
@using_memory_profiler
def test_build_copy_mat():
assert(check_leak(build_copy_mat))
@using_memory_profiler
def test_totals():
start = snapshot_memory()
check_leak(build_mat)
check_leak(build_view_mat)
check_leak(build_viewh_mat)
check_leak(build_view_set_mat)
check_leak(build_arr_mat)
check_leak(build_copy_mat)
# Double check totals
diff = abs(start - snapshot_memory())
if diff > 1.e6:
raise MemoryError("\nA function leaked %d bytes of memory!" % diff)
else:
print("\nNo leaks detected!")
|
amjames/psi4
|
tests/pytest/test_np_views.py
|
Python
|
lgpl-3.0
| 2,399
|
[
"Psi4"
] |
3c499b2b45ee81881646c7201d3bedfd9bfd97332d03d5ce12f65a8907ce0ecd
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.