text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
#
# $File: simpleExample.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
pop = sim.Population(size=1000, loci=2)
pop.evolve(
initOps=[
sim.InitSex(),
sim.InitGenotype(genotype=[1, 2, 2, 1])
],
matingScheme=sim.RandomMating(ops=sim.Recombinator(rates=0.01)),
postOps=[
sim.Stat(LD=[0, 1], step=10),
sim.PyEval(r"'%.2f\n' % LD[0][1]", step=10),
],
gen=100
)
|
BoPeng/simuPOP
|
docs/simpleExample.py
|
Python
|
gpl-2.0
| 1,449
|
[
"VisIt"
] |
618013f12a94c6226a0c19b449afaebd628da175d6d0a153fb0e23895f68f574
|
# Copyright 2012-2014 Brian May
#
# This file is part of python-tldap.
#
# python-tldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-tldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-tldap If not, see <http://www.gnu.org/licenses/>.
""" Contains ConnectionHandler which represents a list of connections. """
import sys
from threading import local
DEFAULT_LDAP_ALIAS = "default"
def load_backend(backend_name):
__import__(backend_name)
return sys.modules[backend_name]
class ConnectionHandler(object):
""" Contains a list of known LDAP connections. """
def __init__(self, databases):
self.databases = databases
self._connections = local()
def __getitem__(self, alias):
if hasattr(self._connections, alias):
return getattr(self._connections, alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
conn = backend.LDAPwrapper(db)
setattr(self._connections, alias, conn)
return conn
def __iter__(self):
return iter(self.databases)
def all(self):
""" Return list of all connections. """
return [self[alias] for alias in self]
|
Karaage-Cluster/python-tldap
|
tldap/utils.py
|
Python
|
gpl-3.0
| 1,651
|
[
"Brian"
] |
48d29ad5c570eb7097ff56dc57628738eaedb0ba8eba753328b71ad4843ca796
|
#!/usr/bin/env python2.7
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
#
r"""
mx is a command line tool for managing the development of Java code organized as suites of projects.
Version 1.x supports a single suite of projects.
Full documentation can be found at https://wiki.openjdk.java.net/display/Graal/The+mx+Tool
"""
import sys, os, errno, time, subprocess, shlex, types, StringIO, zipfile, signal, xml.sax.saxutils, tempfile, fnmatch, platform
import textwrap
import socket
import tarfile
import hashlib
import xml.parsers.expat
import shutil, re, xml.dom.minidom
import pipes
import difflib
from collections import Callable
from threading import Thread
from argparse import ArgumentParser, REMAINDER
from os.path import join, basename, dirname, exists, getmtime, isabs, expandvars, isdir, isfile
# Support for Python 2.6
def check_output(*popenargs, **kwargs):
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, _ = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
# Support for jython
def is_jython():
return sys.platform.startswith('java')
if not is_jython():
import multiprocessing
def cpu_count():
if is_jython():
from java.lang import Runtime
runtime = Runtime.getRuntime()
return runtime.availableProcessors()
else:
return multiprocessing.cpu_count()
try: subprocess.check_output
except: subprocess.check_output = check_output
try: zipfile.ZipFile.__enter__
except:
zipfile.ZipFile.__enter__ = lambda self: self
zipfile.ZipFile.__exit__ = lambda self, t, value, traceback: self.close()
_projects = dict()
_libs = dict()
_jreLibs = dict()
_dists = dict()
_suites = dict()
_annotationProcessors = None
_primary_suite_path = None
_primary_suite = None
_opts = None
_java_homes = None
_warn = False
"""
A distribution is a jar or zip file containing the output from one or more Java projects.
"""
class Distribution:
def __init__(self, suite, name, path, sourcesPath, deps, mainClass, excludedDependencies, distDependencies, javaCompliance):
self.suite = suite
self.name = name
self.path = path.replace('/', os.sep)
self.path = _make_absolute(self.path, suite.dir)
self.sourcesPath = _make_absolute(sourcesPath.replace('/', os.sep), suite.dir) if sourcesPath else None
self.deps = deps
self.update_listeners = set()
self.mainClass = mainClass
self.excludedDependencies = excludedDependencies
self.distDependencies = distDependencies
self.javaCompliance = JavaCompliance(javaCompliance) if javaCompliance else None
def sorted_deps(self, includeLibs=False, transitive=False):
deps = []
if transitive:
for depDist in [distribution(name) for name in self.distDependencies]:
for d in depDist.sorted_deps(includeLibs=includeLibs, transitive=True):
if d not in deps:
deps.append(d)
try:
excl = [dependency(d) for d in self.excludedDependencies]
except SystemExit as e:
abort('invalid excluded dependency for {0} distribution: {1}'.format(self.name, e))
return deps + [d for d in sorted_deps(self.deps, includeLibs=includeLibs) if d not in excl]
def __str__(self):
return self.name
def add_update_listener(self, listener):
self.update_listeners.add(listener)
"""
Gets the directory in which the IDE project configuration
for this distribution is generated. If this is a distribution
derived from a project defining an annotation processor, then
None is return to indicate no IDE configuration should be
created for this distribution.
"""
def get_ide_project_dir(self):
if hasattr(self, 'definingProject') and self.definingProject.definedAnnotationProcessorsDist == self:
return None
if hasattr(self, 'subDir'):
return join(self.suite.dir, self.subDir, self.name + '.dist')
else:
return join(self.suite.dir, self.name + '.dist')
def make_archive(self):
# are sources combined into main archive?
unified = self.path == self.sourcesPath
with Archiver(self.path) as arc:
with Archiver(None if unified else self.sourcesPath) as srcArcRaw:
srcArc = arc if unified else srcArcRaw
services = {}
def overwriteCheck(zf, arcname, source):
if not hasattr(zf, '_provenance'):
zf._provenance = {}
existingSource = zf._provenance.get(arcname, None)
isOverwrite = False
if existingSource and existingSource != source:
if arcname[-1] != os.path.sep:
logv('warning: ' + self.path + ': avoid overwrite of ' + arcname + '\n new: ' + source + '\n old: ' + existingSource)
isOverwrite = True
zf._provenance[arcname] = source
return isOverwrite
if self.mainClass:
manifest = "Manifest-Version: 1.0\nMain-Class: %s\n\n" % (self.mainClass)
if not overwriteCheck(arc.zf, "META-INF/MANIFEST.MF", "project files"):
arc.zf.writestr("META-INF/MANIFEST.MF", manifest)
for dep in self.sorted_deps(includeLibs=True):
isCoveredByDependecy = False
for d in self.distDependencies:
if dep in _dists[d].sorted_deps(includeLibs=True, transitive=True):
logv("Excluding {0} from {1} because it's provided by the dependency {2}".format(dep.name, self.path, d))
isCoveredByDependecy = True
break
if isCoveredByDependecy:
continue
if dep.isLibrary():
l = dep
# merge library jar into distribution jar
logv('[' + self.path + ': adding library ' + l.name + ']')
lpath = l.get_path(resolve=True)
libSourcePath = l.get_source_path(resolve=True)
if lpath:
with zipfile.ZipFile(lpath, 'r') as lp:
for arcname in lp.namelist():
if arcname.startswith('META-INF/services/') and not arcname == 'META-INF/services/':
service = arcname[len('META-INF/services/'):]
assert '/' not in service
services.setdefault(service, []).extend(lp.read(arcname).splitlines())
else:
if not overwriteCheck(arc.zf, arcname, lpath + '!' + arcname):
arc.zf.writestr(arcname, lp.read(arcname))
if srcArc.zf and libSourcePath:
with zipfile.ZipFile(libSourcePath, 'r') as lp:
for arcname in lp.namelist():
if not overwriteCheck(srcArc.zf, arcname, lpath + '!' + arcname):
srcArc.zf.writestr(arcname, lp.read(arcname))
elif dep.isProject():
p = dep
if self.javaCompliance:
if p.javaCompliance > self.javaCompliance:
abort("Compliance level doesn't match: Distribution {0} requires {1}, but {2} is {3}.".format(self.name, self.javaCompliance, p.name, p.javaCompliance))
# skip a Java project if its Java compliance level is "higher" than the configured JDK
jdk = java(p.javaCompliance)
assert jdk
logv('[' + self.path + ': adding project ' + p.name + ']')
outputDir = p.output_dir()
for root, _, files in os.walk(outputDir):
relpath = root[len(outputDir) + 1:]
if relpath == join('META-INF', 'services'):
for service in files:
with open(join(root, service), 'r') as fp:
services.setdefault(service, []).extend([provider.strip() for provider in fp.readlines()])
elif relpath == join('META-INF', 'providers'):
for provider in files:
with open(join(root, provider), 'r') as fp:
for service in fp:
services.setdefault(service.strip(), []).append(provider)
else:
for f in files:
arcname = join(relpath, f).replace(os.sep, '/')
if not overwriteCheck(arc.zf, arcname, join(root, f)):
arc.zf.write(join(root, f), arcname)
if srcArc.zf:
sourceDirs = p.source_dirs()
if p.source_gen_dir():
sourceDirs.append(p.source_gen_dir())
for srcDir in sourceDirs:
for root, _, files in os.walk(srcDir):
relpath = root[len(srcDir) + 1:]
for f in files:
if f.endswith('.java'):
arcname = join(relpath, f).replace(os.sep, '/')
if not overwriteCheck(srcArc.zf, arcname, join(root, f)):
srcArc.zf.write(join(root, f), arcname)
for service, providers in services.iteritems():
arcname = 'META-INF/services/' + service
arc.zf.writestr(arcname, '\n'.join(providers))
self.notify_updated()
def notify_updated(self):
for l in self.update_listeners:
l(self)
"""
A dependency is a library or project specified in a suite.
"""
class Dependency:
def __init__(self, suite, name):
self.name = name
self.suite = suite
def __cmp__(self, other):
return cmp(self.name, other.name)
def __str__(self):
return self.name
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return self.name != other.name
def __hash__(self):
return hash(self.name)
def isLibrary(self):
return isinstance(self, Library)
def isJreLibrary(self):
return isinstance(self, JreLibrary)
def isProject(self):
return isinstance(self, Project)
class Project(Dependency):
def __init__(self, suite, name, srcDirs, deps, javaCompliance, workingSets, d):
Dependency.__init__(self, suite, name)
self.srcDirs = srcDirs
self.deps = deps
self.checkstyleProj = name
self.javaCompliance = JavaCompliance(javaCompliance) if javaCompliance is not None else None
self.native = False
self.workingSets = workingSets
self.dir = d
# The annotation processors defined by this project
self.definedAnnotationProcessors = None
self.definedAnnotationProcessorsDist = None
# Verify that a JDK exists for this project if its compliance level is
# less than the compliance level of the default JDK
jdk = java(self.javaCompliance)
if jdk is None and self.javaCompliance < java().javaCompliance:
abort('Cannot find ' + str(self.javaCompliance) + ' JDK required by ' + name + '. ' +
'Specify it with --extra-java-homes option or EXTRA_JAVA_HOMES environment variable.')
# Create directories for projects that don't yet exist
if not exists(d):
os.mkdir(d)
for s in self.source_dirs():
if not exists(s):
os.mkdir(s)
def all_deps(self, deps, includeLibs, includeSelf=True, includeJreLibs=False, includeAnnotationProcessors=False):
"""
Add the transitive set of dependencies for this project, including
libraries if 'includeLibs' is true, to the 'deps' list.
"""
return sorted(self._all_deps_helper(deps, [], includeLibs, includeSelf, includeJreLibs, includeAnnotationProcessors))
def _all_deps_helper(self, deps, dependants, includeLibs, includeSelf=True, includeJreLibs=False, includeAnnotationProcessors=False):
if self in dependants:
abort(str(self) + 'Project dependency cycle found:\n ' +
'\n |\n V\n '.join(map(str, dependants[dependants.index(self):])) +
'\n |\n V\n ' + self.name)
childDeps = list(self.deps)
if includeAnnotationProcessors and len(self.annotation_processors()) > 0:
childDeps = self.annotation_processors() + childDeps
if self in deps:
return deps
for name in childDeps:
assert name != self.name
dep = dependency(name)
if not dep in deps:
if dep.isProject():
dep._all_deps_helper(deps, dependants + [self], includeLibs=includeLibs, includeJreLibs=includeJreLibs, includeAnnotationProcessors=includeAnnotationProcessors)
elif dep.isProject or (dep.isLibrary() and includeLibs) or (dep.isJreLibrary() and includeJreLibs):
dep.all_deps(deps, includeLibs=includeLibs, includeJreLibs=includeJreLibs, includeAnnotationProcessors=includeAnnotationProcessors)
if not self in deps and includeSelf:
deps.append(self)
return deps
def _compute_max_dep_distances(self, name, distances, dist):
currentDist = distances.get(name)
if currentDist is None or currentDist < dist:
distances[name] = dist
p = project(name, False)
if p is not None:
for dep in p.deps:
self._compute_max_dep_distances(dep, distances, dist + 1)
def canonical_deps(self):
"""
Get the dependencies of this project that are not recursive (i.e. cannot be reached
via other dependencies).
"""
distances = dict()
result = set()
self._compute_max_dep_distances(self.name, distances, 0)
for n, d in distances.iteritems():
assert d > 0 or n == self.name
if d == 1:
result.add(n)
if len(result) == len(self.deps) and frozenset(self.deps) == result:
return self.deps
return result
def max_depth(self):
"""
Get the maximum canonical distance between this project and its most distant dependency.
"""
distances = dict()
self._compute_max_dep_distances(self.name, distances, 0)
return max(distances.values())
def source_dirs(self):
"""
Get the directories in which the sources of this project are found.
"""
return [join(self.dir, s) for s in self.srcDirs]
def source_gen_dir(self):
"""
Get the directory in which source files generated by the annotation processor are found/placed.
"""
if self.native:
return None
return join(self.dir, 'src_gen')
def output_dir(self):
"""
Get the directory in which the class files of this project are found/placed.
"""
if self.native:
return None
return join(self.dir, 'bin')
def jasmin_output_dir(self):
"""
Get the directory in which the Jasmin assembled class files of this project are found/placed.
"""
if self.native:
return None
return join(self.dir, 'jasmin_classes')
def append_to_classpath(self, cp, resolve):
if not self.native:
cp.append(self.output_dir())
def find_classes_with_matching_source_line(self, pkgRoot, function, includeInnerClasses=False):
"""
Scan the sources of this project for Java source files containing a line for which
'function' returns true. A map from class name to source file path for each existing class
corresponding to a matched source file is returned.
"""
result = dict()
pkgDecl = re.compile(r"^package\s+([a-zA-Z_][\w\.]*)\s*;$")
for srcDir in self.source_dirs():
outputDir = self.output_dir()
for root, _, files in os.walk(srcDir):
for name in files:
if name.endswith('.java') and name != 'package-info.java':
matchFound = False
source = join(root, name)
with open(source) as f:
pkg = None
for line in f:
if line.startswith("package "):
match = pkgDecl.match(line)
if match:
pkg = match.group(1)
if function(line.strip()):
matchFound = True
if pkg and matchFound:
break
if matchFound:
simpleClassName = name[:-len('.java')]
assert pkg is not None, 'could not find package statement in file ' + name
if pkgRoot is None or pkg.startswith(pkgRoot):
pkgOutputDir = join(outputDir, pkg.replace('.', os.path.sep))
if exists(pkgOutputDir):
for e in os.listdir(pkgOutputDir):
if includeInnerClasses:
if e.endswith('.class') and (e.startswith(simpleClassName) or e.startswith(simpleClassName + '$')):
className = pkg + '.' + e[:-len('.class')]
result[className] = source
elif e == simpleClassName + '.class':
className = pkg + '.' + simpleClassName
result[className] = source
return result
def _init_packages_and_imports(self):
if not hasattr(self, '_defined_java_packages'):
packages = set()
extendedPackages = set()
depPackages = set()
for d in self.all_deps([], includeLibs=False, includeSelf=False):
depPackages.update(d.defined_java_packages())
imports = set()
importRe = re.compile(r'import\s+(?:static\s+)?([^;]+);')
for sourceDir in self.source_dirs():
for root, _, files in os.walk(sourceDir):
javaSources = [name for name in files if name.endswith('.java')]
if len(javaSources) != 0:
pkg = root[len(sourceDir) + 1:].replace(os.sep, '.')
if not pkg in depPackages:
packages.add(pkg)
else:
# A project extends a package already defined by one of it dependencies
extendedPackages.add(pkg)
imports.add(pkg)
for n in javaSources:
with open(join(root, n)) as fp:
content = fp.read()
imports.update(importRe.findall(content))
self._defined_java_packages = frozenset(packages)
self._extended_java_packages = frozenset(extendedPackages)
importedPackages = set()
for imp in imports:
name = imp
while not name in depPackages and len(name) > 0:
lastDot = name.rfind('.')
if lastDot == -1:
name = None
break
name = name[0:lastDot]
if name is not None:
importedPackages.add(name)
self._imported_java_packages = frozenset(importedPackages)
def defined_java_packages(self):
"""Get the immutable set of Java packages defined by the Java sources of this project"""
self._init_packages_and_imports()
return self._defined_java_packages
def extended_java_packages(self):
"""Get the immutable set of Java packages extended by the Java sources of this project"""
self._init_packages_and_imports()
return self._extended_java_packages
def imported_java_packages(self):
"""Get the immutable set of Java packages defined by other Java projects that are
imported by the Java sources of this project."""
self._init_packages_and_imports()
return self._imported_java_packages
"""
Gets the list of projects defining the annotation processors that will be applied
when compiling this project. This includes the projects declared by the annotationProcessors property
of this project and any of its project dependencies. It also includes
any project dependencies that define an annotation processors.
"""
def annotation_processors(self):
if not hasattr(self, '_annotationProcessors'):
aps = set()
if hasattr(self, '_declaredAnnotationProcessors'):
aps = set(self._declaredAnnotationProcessors)
for ap in aps:
if project(ap).definedAnnotationProcessorsDist is None:
config = join(project(ap).source_dirs()[0], 'META-INF', 'services', 'javax.annotation.processing.Processor')
if not exists(config):
TimeStampFile(config).touch()
abort('Project ' + ap + ' declared in annotationProcessors property of ' + self.name + ' does not define any annotation processors.\n' +
'Please specify the annotation processors in ' + config)
allDeps = self.all_deps([], includeLibs=False, includeSelf=False, includeAnnotationProcessors=False)
for p in allDeps:
# Add an annotation processor dependency
if p.definedAnnotationProcessorsDist is not None:
aps.add(p.name)
# Inherit annotation processors from dependencies
aps.update(p.annotation_processors())
self._annotationProcessors = sorted(list(aps))
return self._annotationProcessors
"""
Gets the class path composed of the distribution jars containing the
annotation processors that will be applied when compiling this project.
"""
def annotation_processors_path(self):
aps = [project(ap) for ap in self.annotation_processors()]
libAps = [dep for dep in self.all_deps([], includeLibs=True, includeSelf=False) if dep.isLibrary() and hasattr(dep, 'annotationProcessor') and getattr(dep, 'annotationProcessor').lower() == 'true']
if len(aps) + len(libAps):
return os.pathsep.join([ap.definedAnnotationProcessorsDist.path for ap in aps if ap.definedAnnotationProcessorsDist] + [lib.get_path(False) for lib in libAps])
return None
def uses_annotation_processor_library(self):
for dep in self.all_deps([], includeLibs=True, includeSelf=False):
if dep.isLibrary() and hasattr(dep, 'annotationProcessor'):
return True
return False
def update_current_annotation_processors_file(self):
aps = self.annotation_processors()
outOfDate = False
currentApsFile = join(self.suite.mxDir, 'currentAnnotationProcessors', self.name)
currentApsFileExists = exists(currentApsFile)
if currentApsFileExists:
with open(currentApsFile) as fp:
currentAps = [l.strip() for l in fp.readlines()]
if currentAps != aps:
outOfDate = True
if outOfDate or not currentApsFileExists:
if not exists(dirname(currentApsFile)):
os.mkdir(dirname(currentApsFile))
with open(currentApsFile, 'w') as fp:
for ap in aps:
print >> fp, ap
return outOfDate
def make_archive(self, path=None):
outputDir = self.output_dir()
if not path:
path = join(self.dir, self.name + '.jar')
with Archiver(path) as arc:
for root, _, files in os.walk(outputDir):
for f in files:
relpath = root[len(outputDir) + 1:]
arcname = join(relpath, f).replace(os.sep, '/')
arc.zf.write(join(root, f), arcname)
return path
def _make_absolute(path, prefix):
"""
Makes 'path' absolute if it isn't already by prefixing 'prefix'
"""
if not isabs(path):
return join(prefix, path)
return path
def sha1OfFile(path):
with open(path, 'rb') as f:
d = hashlib.sha1()
while True:
buf = f.read(4096)
if not buf:
break
d.update(buf)
return d.hexdigest()
def download_file_with_sha1(name, path, urls, sha1, sha1path, resolve, mustExist, sources=False, canSymlink=True):
canSymlink = canSymlink and not (get_os() == 'windows' or get_os() == 'cygwin')
def _download_lib():
cacheDir = _cygpathW2U(get_env('MX_CACHE_DIR', join(_opts.user_home, '.mx', 'cache')))
if not exists(cacheDir):
os.makedirs(cacheDir)
base = basename(path)
cachePath = join(cacheDir, base + '_' + sha1)
if not exists(cachePath) or sha1OfFile(cachePath) != sha1:
if exists(cachePath):
log('SHA1 of ' + cachePath + ' does not match expected value (' + sha1 + ') - re-downloading')
print 'Downloading ' + ("sources " if sources else "") + name + ' from ' + str(urls)
download(cachePath, urls)
d = dirname(path)
if d != '' and not exists(d):
os.makedirs(d)
if canSymlink and 'symlink' in dir(os):
if exists(path):
os.unlink(path)
os.symlink(cachePath, path)
else:
shutil.copy(cachePath, path)
def _sha1Cached():
with open(sha1path, 'r') as f:
return f.read()[0:40]
def _writeSha1Cached():
with open(sha1path, 'w') as f:
f.write(sha1OfFile(path))
if resolve and mustExist and not exists(path):
assert not len(urls) == 0, 'cannot find required library ' + name + ' ' + path
_download_lib()
if exists(path):
if sha1 and not exists(sha1path):
_writeSha1Cached()
if sha1 and sha1 != _sha1Cached():
_download_lib()
if sha1 != sha1OfFile(path):
abort("SHA1 does not match for " + name + ". Broken download? SHA1 not updated in projects file?")
_writeSha1Cached()
return path
class BaseLibrary(Dependency):
def __init__(self, suite, name, optional):
Dependency.__init__(self, suite, name)
self.optional = optional
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
"""
A library that will be provided by the JRE but may be absent.
Any project or normal library that depends on a missing library
will be removed from the global project and library dictionaries
(i.e., _projects and _libs).
This mechanism exists primarily to be able to support code
that may use functionality in one JRE (e.g., Oracle JRE)
that is not present in another JRE (e.g., OpenJDK). A
motivating example is the Java Flight Recorder library
found in the Oracle JRE.
"""
class JreLibrary(BaseLibrary):
def __init__(self, suite, name, jar, optional):
BaseLibrary.__init__(self, suite, name, optional)
self.jar = jar
def __eq__(self, other):
if isinstance(other, JreLibrary):
return self.jar == other.jar
else:
return NotImplemented
def is_present_in_jdk(self, jdk):
return jdk.containsJar(self.jar)
def all_deps(self, deps, includeLibs, includeSelf=True, includeJreLibs=False, includeAnnotationProcessors=False):
"""
Add the transitive set of dependencies for this JRE library to the 'deps' list.
"""
if includeJreLibs and includeSelf and not self in deps:
deps.append(self)
return sorted(deps)
class Library(BaseLibrary):
def __init__(self, suite, name, path, optional, urls, sha1, sourcePath, sourceUrls, sourceSha1, deps):
BaseLibrary.__init__(self, suite, name, optional)
self.path = path.replace('/', os.sep)
self.urls = urls
self.sha1 = sha1
self.sourcePath = sourcePath
self.sourceUrls = sourceUrls
if sourcePath == path:
assert sourceSha1 is None or sourceSha1 == sha1
sourceSha1 = sha1
self.sourceSha1 = sourceSha1
self.deps = deps
abspath = _make_absolute(path, self.suite.dir)
if not optional and not exists(abspath):
if not len(urls):
abort('Non-optional library {0} must either exist at {1} or specify one or more URLs from which it can be retrieved'.format(name, abspath))
def _checkSha1PropertyCondition(propName, cond, inputPath):
if not cond:
absInputPath = _make_absolute(inputPath, self.suite.dir)
if exists(absInputPath):
abort('Missing "{0}" property for library {1}. Add the following line to projects file:\nlibrary@{2}@{3}={4}'.format(propName, name, name, propName, sha1OfFile(absInputPath)))
abort('Missing "{0}" property for library {1}'.format(propName, name))
_checkSha1PropertyCondition('sha1', sha1, path)
_checkSha1PropertyCondition('sourceSha1', not sourcePath or sourceSha1, sourcePath)
for url in urls:
if url.endswith('/') != self.path.endswith(os.sep):
abort('Path for dependency directory must have a URL ending with "/": path=' + self.path + ' url=' + url)
def __eq__(self, other):
if isinstance(other, Library):
if len(self.urls) == 0:
return self.path == other.path
else:
return self.urls == other.urls
else:
return NotImplemented
def get_path(self, resolve):
path = _make_absolute(self.path, self.suite.dir)
sha1path = path + '.sha1'
includedInJDK = getattr(self, 'includedInJDK', None)
if includedInJDK and java().javaCompliance >= JavaCompliance(includedInJDK):
return None
bootClassPathAgent = getattr(self, 'bootClassPathAgent').lower() == 'true' if hasattr(self, 'bootClassPathAgent') else False
return download_file_with_sha1(self.name, path, self.urls, self.sha1, sha1path, resolve, not self.optional, canSymlink=not bootClassPathAgent)
def get_source_path(self, resolve):
if self.sourcePath is None:
return None
path = _make_absolute(self.sourcePath, self.suite.dir)
sha1path = path + '.sha1'
return download_file_with_sha1(self.name, path, self.sourceUrls, self.sourceSha1, sha1path, resolve, len(self.sourceUrls) != 0, sources=True)
def append_to_classpath(self, cp, resolve):
path = self.get_path(resolve)
if path and (exists(path) or not resolve):
cp.append(path)
def all_deps(self, deps, includeLibs, includeSelf=True, includeJreLibs=False, includeAnnotationProcessors=False):
"""
Add the transitive set of dependencies for this library to the 'deps' list.
"""
if not includeLibs:
return sorted(deps)
childDeps = list(self.deps)
if self in deps:
return sorted(deps)
for name in childDeps:
assert name != self.name
dep = library(name)
if not dep in deps:
dep.all_deps(deps, includeLibs=includeLibs, includeJreLibs=includeJreLibs, includeAnnotationProcessors=includeAnnotationProcessors)
if not self in deps and includeSelf:
deps.append(self)
return sorted(deps)
class HgConfig:
"""
Encapsulates access to Mercurial (hg)
"""
def __init__(self):
self.missing = 'no hg executable found'
self.has_hg = None
def check(self, abortOnFail=True):
if self.has_hg is None:
try:
subprocess.check_output(['hg'])
self.has_hg = True
except OSError:
self.has_hg = False
warn(self.missing)
if not self.has_hg:
if abortOnFail:
abort(self.missing)
else:
warn(self.missing)
def tip(self, sDir, abortOnError=True):
try:
return subprocess.check_output(['hg', 'tip', '-R', sDir, '--template', '{node}'])
except OSError:
warn(self.missing)
except subprocess.CalledProcessError:
if abortOnError:
abort('failed to get tip revision id')
else:
return None
def isDirty(self, sDir, abortOnError=True):
try:
return len(subprocess.check_output(['hg', 'status', '-R', sDir])) > 0
except OSError:
warn(self.missing)
except subprocess.CalledProcessError:
if abortOnError:
abort('failed to get status')
else:
return None
def _load_suite_dict(mxDir):
suffix = 1
suite = None
dictName = 'suite'
def expand(value, context):
if isinstance(value, types.DictionaryType):
for n, v in value.iteritems():
value[n] = expand(v, context + [n])
elif isinstance(value, types.ListType):
for i in range(len(value)):
value[i] = expand(value[i], context + [str(i)])
else:
if not isinstance(value, types.StringTypes):
abort('value of ' + '.'.join(context) + ' is of unexpected type ' + str(type(value)))
value = expandvars(value)
if '$' in value or '%' in value:
abort('value of ' + '.'.join(context) + ' contains an undefined environment variable: ' + value)
return value
moduleName = 'suite'
modulePath = join(mxDir, moduleName + '.py')
while exists(modulePath):
savedModule = sys.modules.get(moduleName)
if savedModule:
warn(modulePath + ' conflicts with ' + savedModule.__file__)
# temporarily extend the Python path
sys.path.insert(0, mxDir)
snapshot = frozenset(sys.modules.keys())
module = __import__(moduleName)
if savedModule:
# restore the old module into the module name space
sys.modules[moduleName] = savedModule
else:
# remove moduleName from the module name space
sys.modules.pop(moduleName)
# For now fail fast if extra modules were loaded.
# This can later be relaxed to simply remove the extra modules
# from the sys.modules name space if necessary.
extraModules = frozenset(sys.modules.keys()) - snapshot
assert len(extraModules) == 0, 'loading ' + modulePath + ' caused extra modules to be loaded: ' + ', '.join([m for m in extraModules])
# revert the Python path
del sys.path[0]
if not hasattr(module, dictName):
abort(modulePath + ' must define a variable named "' + dictName + '"')
d = expand(getattr(module, dictName), [dictName])
sections = ['projects', 'libraries', 'jrelibraries', 'distributions'] + (['distribution_extensions'] if suite else ['name', 'mxversion'])
unknown = frozenset(d.keys()) - frozenset(sections)
if unknown:
abort(modulePath + ' defines unsupported suite sections: ' + ', '.join(unknown))
if suite is None:
suite = d
else:
for s in sections:
existing = suite.get(s)
additional = d.get(s)
if additional:
if not existing:
suite[s] = additional
else:
conflicting = frozenset(additional.keys()) & frozenset(existing.keys())
if conflicting:
abort(modulePath + ' redefines: ' + ', '.join(conflicting))
existing.update(additional)
distExtensions = d.get('distribution_extensions')
if distExtensions:
existing = suite['distributions']
for n, attrs in distExtensions.iteritems():
original = existing.get(n)
if not original:
abort('cannot extend non-existing distribution ' + n)
for k, v in attrs.iteritems():
if k != 'dependencies':
abort('Only the dependencies of distribution ' + n + ' can be extended')
if not isinstance(v, types.ListType):
abort('distribution_extensions.' + n + '.dependencies must be a list')
original['dependencies'] += v
dictName = 'extra'
moduleName = 'suite' + str(suffix)
modulePath = join(mxDir, moduleName + '.py')
deprecatedModulePath = join(mxDir, 'projects' + str(suffix) + '.py')
if exists(deprecatedModulePath):
abort('Please rename ' + deprecatedModulePath + ' to ' + modulePath)
suffix = suffix + 1
return suite, modulePath
class Suite:
def __init__(self, mxDir, primary, load=True):
self.dir = dirname(mxDir)
self.mxDir = mxDir
self.projects = []
self.libs = []
self.jreLibs = []
self.dists = []
self.commands = None
self.primary = primary
self.requiredMxVersion = None
self.name = _suitename(mxDir) # validated in _load_projects
if load:
# just check that there are no imports
self._load_imports()
self._load_env()
self._load_commands()
_suites[self.name] = self
def __str__(self):
return self.name
def _load_projects(self):
suitePyFile = join(self.mxDir, 'suite.py')
if not exists(suitePyFile):
return
suiteDict, _ = _load_suite_dict(self.mxDir)
if suiteDict.get('name') is not None and suiteDict.get('name') != self.name:
abort('suite name in project file does not match ' + _suitename(self.mxDir))
if suiteDict.has_key('mxversion'):
try:
self.requiredMxVersion = VersionSpec(suiteDict['mxversion'])
except AssertionError as ae:
abort('Exception while parsing "mxversion" in project file: ' + str(ae))
libsMap = suiteDict['libraries']
jreLibsMap = suiteDict['jrelibraries']
projsMap = suiteDict['projects']
distsMap = suiteDict['distributions']
def pop_list(attrs, name, context):
v = attrs.pop(name, None)
if not v:
return []
if not isinstance(v, list):
abort('Attribute "' + name + '" for ' + context + ' must be a list')
return v
for name, attrs in sorted(projsMap.iteritems()):
context = 'project ' + name
srcDirs = pop_list(attrs, 'sourceDirs', context)
deps = pop_list(attrs, 'dependencies', context)
ap = pop_list(attrs, 'annotationProcessors', context)
javaCompliance = attrs.pop('javaCompliance', None)
subDir = attrs.pop('subDir', None)
if subDir is None:
d = join(self.dir, name)
else:
d = join(self.dir, subDir, name)
workingSets = attrs.pop('workingSets', None)
p = Project(self, name, srcDirs, deps, javaCompliance, workingSets, d)
p.checkstyleProj = attrs.pop('checkstyle', name)
p.native = attrs.pop('native', '') == 'true'
if not p.native and p.javaCompliance is None:
abort('javaCompliance property required for non-native project ' + name)
if len(ap) > 0:
p._declaredAnnotationProcessors = ap
p.__dict__.update(attrs)
self.projects.append(p)
for name, attrs in sorted(jreLibsMap.iteritems()):
jar = attrs.pop('jar')
# JRE libraries are optional by default
optional = attrs.pop('optional', 'true') != 'false'
l = JreLibrary(self, name, jar, optional)
self.jreLibs.append(l)
for name, attrs in sorted(libsMap.iteritems()):
context = 'library ' + name
if "|" in name:
if name.count('|') != 2:
abort("Format error in library name: " + name + "\nsyntax: libname|os-platform|architecture")
name, platform, architecture = name.split("|")
if platform != get_os() or architecture != get_arch():
continue
path = attrs.pop('path')
urls = pop_list(attrs, 'urls', context)
sha1 = attrs.pop('sha1', None)
sourcePath = attrs.pop('sourcePath', None)
sourceUrls = pop_list(attrs, 'sourceUrls', context)
sourceSha1 = attrs.pop('sourceSha1', None)
deps = pop_list(attrs, 'dependencies', context)
# Add support optional libraries once we have a good use case
optional = False
l = Library(self, name, path, optional, urls, sha1, sourcePath, sourceUrls, sourceSha1, deps)
l.__dict__.update(attrs)
self.libs.append(l)
for name, attrs in sorted(distsMap.iteritems()):
context = 'distribution ' + name
path = attrs.pop('path')
sourcesPath = attrs.pop('sourcesPath', None)
deps = pop_list(attrs, 'dependencies', context)
mainClass = attrs.pop('mainClass', None)
exclDeps = pop_list(attrs, 'exclude', context)
distDeps = pop_list(attrs, 'distDependencies', context)
javaCompliance = attrs.pop('javaCompliance', None)
d = Distribution(self, name, path, sourcesPath, deps, mainClass, exclDeps, distDeps, javaCompliance)
d.__dict__.update(attrs)
self.dists.append(d)
# Create a distribution for each project that defines annotation processors
for p in self.projects:
annotationProcessors = None
for srcDir in p.source_dirs():
configFile = join(srcDir, 'META-INF', 'services', 'javax.annotation.processing.Processor')
if exists(configFile):
with open(configFile) as fp:
annotationProcessors = [ap.strip() for ap in fp]
if len(annotationProcessors) != 0:
for ap in annotationProcessors:
if not ap.startswith(p.name):
abort(ap + ' in ' + configFile + ' does not start with ' + p.name)
if annotationProcessors:
dname = p.name.replace('.', '_').upper()
apDir = join(p.dir, 'ap')
path = join(apDir, p.name + '.jar')
sourcesPath = None
deps = [p.name]
mainClass = None
exclDeps = []
distDeps = []
javaCompliance = None
d = Distribution(self, dname, path, sourcesPath, deps, mainClass, exclDeps, distDeps, javaCompliance)
d.subDir = os.path.relpath(os.path.dirname(p.dir), self.dir)
self.dists.append(d)
p.definedAnnotationProcessors = annotationProcessors
p.definedAnnotationProcessorsDist = d
d.definingProject = p
# Restrict exported annotation processors to those explicitly defined by the project
def _refineAnnotationProcessorServiceConfig(dist):
aps = dist.definingProject.definedAnnotationProcessors
apsJar = dist.path
config = 'META-INF/services/javax.annotation.processing.Processor'
with zipfile.ZipFile(apsJar, 'r') as zf:
currentAps = zf.read(config).split()
if currentAps != aps:
logv('[updating ' + config + ' in ' + apsJar + ']')
with Archiver(apsJar) as arc:
with zipfile.ZipFile(apsJar, 'r') as lp:
for arcname in lp.namelist():
if arcname == config:
arc.zf.writestr(arcname, '\n'.join(aps))
else:
arc.zf.writestr(arcname, lp.read(arcname))
d.add_update_listener(_refineAnnotationProcessorServiceConfig)
self.dists.append(d)
if self.name is None:
abort('Missing "suite=<name>" in ' + suitePyFile)
def _commands_name(self):
return 'mx_' + self.name.replace('-', '_')
def _find_commands(self, name):
commandsPath = join(self.mxDir, name + '.py')
if exists(commandsPath):
return name
else:
return None
def _load_commands(self):
commandsName = self._find_commands(self._commands_name())
if commandsName is None:
# backwards compatibility
commandsName = self._find_commands('commands')
if commandsName is not None:
if commandsName in sys.modules:
abort(commandsName + '.py in suite ' + self.name + ' duplicates ' + sys.modules[commandsName].__file__)
# temporarily extend the Python path
sys.path.insert(0, self.mxDir)
mod = __import__(commandsName)
self.commands = sys.modules.pop(commandsName)
sys.modules[commandsName] = self.commands
# revert the Python path
del sys.path[0]
if not hasattr(mod, 'mx_init'):
abort(commandsName + '.py in suite ' + self.name + ' must define an mx_init(suite) function')
if hasattr(mod, 'mx_post_parse_cmd_line'):
self.mx_post_parse_cmd_line = mod.mx_post_parse_cmd_line
mod.mx_init(self)
self.commands = mod
def _load_imports(self):
if exists(join(self.mxDir, 'imports')):
abort('multiple suites are not supported in this version of mx')
def _load_env(self):
e = join(self.mxDir, 'env')
if exists(e):
with open(e) as f:
lineNum = 0
for line in f:
lineNum = lineNum + 1
line = line.strip()
if len(line) != 0 and line[0] != '#':
if not '=' in line:
abort(e + ':' + str(lineNum) + ': line does not match pattern "key=value"')
key, value = line.split('=', 1)
os.environ[key.strip()] = expandvars_in_property(value.strip())
def _post_init(self, opts):
self._load_projects()
if self.requiredMxVersion is None:
warn("This suite does not express any required mx version. Consider adding 'mxversion=<version>' to your projects file.")
elif self.requiredMxVersion > version:
abort("This suite requires mx version " + str(self.requiredMxVersion) + " while your current mx version is " + str(version) + ". Please update mx.")
# set the global data structures, checking for conflicts unless _check_global_structures is False
for p in self.projects:
existing = _projects.get(p.name)
if existing is not None:
abort('cannot override project ' + p.name + ' in ' + p.dir + " with project of the same name in " + existing.dir)
if not p.name in _opts.ignored_projects:
_projects[p.name] = p
for l in self.libs:
existing = _libs.get(l.name)
# Check that suites that define same library are consistent
if existing is not None and existing != l:
abort('inconsistent library redefinition of ' + l.name + ' in ' + existing.suite.dir + ' and ' + l.suite.dir)
_libs[l.name] = l
for l in self.jreLibs:
existing = _jreLibs.get(l.name)
# Check that suites that define same library are consistent
if existing is not None and existing != l:
abort('inconsistent JRE library redefinition of ' + l.name + ' in ' + existing.suite.dir + ' and ' + l.suite.dir)
_jreLibs[l.name] = l
for d in self.dists:
existing = _dists.get(d.name)
if existing is not None:
# allow redefinition, so use path from existing
# abort('cannot redefine distribution ' + d.name)
warn('distribution ' + d.name + ' redefined')
d.path = existing.path
_dists[d.name] = d
# Remove projects and libraries that (recursively) depend on an optional library
# whose artifact does not exist or on a JRE library that is not present in the
# JDK for a project. Also remove projects whose Java compliance requirement
# cannot be satisfied by the configured JDKs.
#
# Removed projects and libraries are also removed from
# distributions in they are listed as dependencies.
for d in sorted_deps(includeLibs=True):
if d.isLibrary():
if d.optional:
try:
d.optional = False
path = d.get_path(resolve=True)
except SystemExit:
path = None
finally:
d.optional = True
if not path:
logv('[omitting optional library {0} as {1} does not exist]'.format(d, d.path))
del _libs[d.name]
self.libs.remove(d)
elif d.isProject():
if java(d.javaCompliance) is None:
logv('[omitting project {0} as Java compliance {1} cannot be satisfied by configured JDKs]'.format(d, d.javaCompliance))
del _projects[d.name]
self.projects.remove(d)
else:
for name in list(d.deps):
jreLib = _jreLibs.get(name)
if jreLib:
if not jreLib.is_present_in_jdk(java(d.javaCompliance)):
if jreLib.optional:
logv('[omitting project {0} as dependency {1} is missing]'.format(d, name))
del _projects[d.name]
self.projects.remove(d)
else:
abort('JRE library {0} required by {1} not found'.format(jreLib, d))
elif not dependency(name, fatalIfMissing=False):
logv('[omitting project {0} as dependency {1} is missing]'.format(d, name))
del _projects[d.name]
self.projects.remove(d)
for dist in _dists.itervalues():
for name in list(dist.deps):
if not dependency(name, fatalIfMissing=False):
logv('[omitting {0} from distribution {1}]'.format(name, dist))
dist.deps.remove(name)
if hasattr(self, 'mx_post_parse_cmd_line'):
self.mx_post_parse_cmd_line(opts)
class XMLElement(xml.dom.minidom.Element):
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write(indent + "<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
xml.dom.minidom._write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
if not self.ownerDocument.padTextNodeWithoutSiblings and len(self.childNodes) == 1 and isinstance(self.childNodes[0], xml.dom.minidom.Text):
# if the only child of an Element node is a Text node, then the
# text is printed without any indentation or new line padding
writer.write(">")
self.childNodes[0].writexml(writer)
writer.write("</%s>%s" % (self.tagName, newl))
else:
writer.write(">%s" % (newl))
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % (newl))
class XMLDoc(xml.dom.minidom.Document):
def __init__(self):
xml.dom.minidom.Document.__init__(self)
self.current = self
self.padTextNodeWithoutSiblings = False
def createElement(self, tagName):
# overwritten to create XMLElement
e = XMLElement(tagName)
e.ownerDocument = self
return e
def comment(self, txt):
self.current.appendChild(self.createComment(txt))
def open(self, tag, attributes=None, data=None):
if attributes is None:
attributes = {}
element = self.createElement(tag)
for key, value in attributes.items():
element.setAttribute(key, value)
self.current.appendChild(element)
self.current = element
if data is not None:
element.appendChild(self.createTextNode(data))
return self
def close(self, tag):
assert self.current != self
assert tag == self.current.tagName, str(tag) + ' != ' + self.current.tagName
self.current = self.current.parentNode
return self
def element(self, tag, attributes=None, data=None):
if attributes is None:
attributes = {}
return self.open(tag, attributes, data).close(tag)
def xml(self, indent='', newl='', escape=False, standalone=None):
assert self.current == self
result = self.toprettyxml(indent, newl, encoding="UTF-8")
if escape:
entities = {'"': """, "'": "'", '\n': ' '}
result = xml.sax.saxutils.escape(result, entities)
if standalone is not None:
result = result.replace('encoding="UTF-8"?>', 'encoding="UTF-8" standalone="' + str(standalone) + '"?>')
return result
def get_jython_os():
from java.lang import System as System
os_name = System.getProperty('os.name').lower()
if System.getProperty('isCygwin'):
return 'cygwin'
elif os_name.startswith('mac'):
return 'darwin'
elif os_name.startswith('linux'):
return 'linux'
elif os_name.startswith('sunos'):
return 'solaris'
elif os_name.startswith('win'):
return 'windows'
else:
abort('Unknown operating system ' + os_name)
def get_os():
"""
Get a canonical form of sys.platform.
"""
if is_jython():
return get_jython_os()
elif sys.platform.startswith('darwin'):
return 'darwin'
elif sys.platform.startswith('linux'):
return 'linux'
elif sys.platform.startswith('sunos'):
return 'solaris'
elif sys.platform.startswith('win32'):
return 'windows'
elif sys.platform.startswith('cygwin'):
return 'cygwin'
else:
abort('Unknown operating system ' + sys.platform)
def _cygpathU2W(p):
"""
Translate a path from unix-style to windows-style.
This method has no effects on other platforms than cygwin.
"""
if p is None or get_os() != "cygwin":
return p
return subprocess.check_output(['cygpath', '-a', '-w', p]).strip()
def _cygpathW2U(p):
"""
Translate a path from windows-style to unix-style.
This method has no effects on other platforms than cygwin.
"""
if p is None or get_os() != "cygwin":
return p
return subprocess.check_output(['cygpath', '-a', '-u', p]).strip()
def _separatedCygpathU2W(p):
"""
Translate a group of paths, separated by a path separator.
unix-style to windows-style.
This method has no effects on other platforms than cygwin.
"""
if p is None or p == "" or get_os() != "cygwin":
return p
return ';'.join(map(_cygpathU2W, p.split(os.pathsep)))
def _separatedCygpathW2U(p):
"""
Translate a group of paths, separated by a path separator.
windows-style to unix-style.
This method has no effects on other platforms than cygwin.
"""
if p is None or p == "" or get_os() != "cygwin":
return p
return os.pathsep.join(map(_cygpathW2U, p.split(';')))
def get_arch():
machine = platform.uname()[4]
if machine in ['amd64', 'AMD64', 'x86_64', 'i86pc']:
return 'amd64'
if machine in ['sun4v', 'sun4u']:
return 'sparcv9'
if machine == 'i386' and get_os() == 'darwin':
try:
# Support for Snow Leopard and earlier version of MacOSX
if subprocess.check_output(['sysctl', '-n', 'hw.cpu64bit_capable']).strip() == '1':
return 'amd64'
except OSError:
# sysctl is not available
pass
abort('unknown or unsupported architecture: os=' + get_os() + ', machine=' + machine)
def _loadSuite(mxDir, primary=False):
"""
Load a suite from 'mxDir'.
"""
for s in _suites.itervalues():
if s.mxDir == mxDir:
return s
# create the new suite
s = Suite(mxDir, primary)
return s
def suites(opt_limit_to_suite=False):
"""
Get the list of all loaded suites.
"""
return _suites.values()
def suite(name, fatalIfMissing=True):
"""
Get the suite for a given name.
"""
s = _suites.get(name)
if s is None and fatalIfMissing:
abort('suite named ' + name + ' not found')
return s
def projects_from_names(projectNames):
"""
Get the list of projects corresponding to projectNames; all projects if None
"""
if projectNames is None:
return projects()
else:
return [project(name) for name in projectNames]
def projects(opt_limit_to_suite=False):
"""
Get the list of all loaded projects limited by --suite option if opt_limit_to_suite == True
"""
sortedProjects = sorted(_projects.values(), key=lambda p: p.name)
if opt_limit_to_suite:
return _projects_opt_limit_to_suites(sortedProjects)
else:
return sortedProjects
def projects_opt_limit_to_suites():
"""
Get the list of all loaded projects optionally limited by --suite option
"""
return projects(True)
def _projects_opt_limit_to_suites(projects):
return projects
def annotation_processors():
"""
Get the list of all loaded projects that define an annotation processor.
"""
global _annotationProcessors
if _annotationProcessors is None:
aps = set()
for p in projects():
for ap in p.annotation_processors():
if project(ap, False):
aps.add(ap)
_annotationProcessors = list(aps)
return _annotationProcessors
def distribution(name, fatalIfMissing=True):
"""
Get the distribution for a given name. This will abort if the named distribution does
not exist and 'fatalIfMissing' is true.
"""
d = _dists.get(name)
if d is None and fatalIfMissing:
abort('distribution named ' + name + ' not found')
return d
def dependency(name, fatalIfMissing=True):
"""
Get the project or library for a given name. This will abort if a project or library does
not exist for 'name' and 'fatalIfMissing' is true.
"""
d = _projects.get(name)
if d is None:
d = _libs.get(name)
if d is None:
d = _jreLibs.get(name)
if d is None and fatalIfMissing:
if name in _opts.ignored_projects:
abort('project named ' + name + ' is ignored')
abort('project or library named ' + name + ' not found')
return d
def project(name, fatalIfMissing=True):
"""
Get the project for a given name. This will abort if the named project does
not exist and 'fatalIfMissing' is true.
"""
p = _projects.get(name)
if p is None and fatalIfMissing:
if name in _opts.ignored_projects:
abort('project named ' + name + ' is ignored')
abort('project named ' + name + ' not found')
return p
def library(name, fatalIfMissing=True):
"""
Gets the library for a given name. This will abort if the named library does
not exist and 'fatalIfMissing' is true.
"""
l = _libs.get(name)
if l is None and fatalIfMissing:
if _projects.get(name):
abort(name + ' is a project, not a library')
abort('library named ' + name + ' not found')
return l
def _as_classpath(deps, resolve):
cp = []
if _opts.cp_prefix is not None:
cp = [_opts.cp_prefix]
for d in deps:
d.append_to_classpath(cp, resolve)
if _opts.cp_suffix is not None:
cp += [_opts.cp_suffix]
return os.pathsep.join(cp)
def classpath(names=None, resolve=True, includeSelf=True, includeBootClasspath=False):
"""
Get the class path for a list of given dependencies and distributions, resolving each entry in the
path (e.g. downloading a missing library) if 'resolve' is true.
"""
if names is None:
deps = sorted_deps(includeLibs=True)
dists = list(_dists.values())
else:
deps = []
dists = []
if isinstance(names, types.StringTypes):
names = [names]
for n in names:
dep = dependency(n, fatalIfMissing=False)
if dep:
dep.all_deps(deps, True, includeSelf)
else:
dist = distribution(n)
if not dist:
abort('project, library or distribution named ' + n + ' not found')
dists.append(dist)
if len(dists):
distsDeps = set()
for d in dists:
distsDeps.update(d.sorted_deps())
# remove deps covered by a dist that will be on the class path
deps = [d for d in deps if d not in distsDeps]
result = _as_classpath(deps, resolve)
# prepend distributions
if len(dists):
distsCp = os.pathsep.join(dist.path for dist in dists)
if len(result):
result = distsCp + os.pathsep + result
else:
result = distsCp
if includeBootClasspath:
result = os.pathsep.join([java().bootclasspath(), result])
return result
def classpath_walk(names=None, resolve=True, includeSelf=True, includeBootClasspath=False):
"""
Walks the resources available in a given classpath, yielding a tuple for each resource
where the first member of the tuple is a directory path or ZipFile object for a
classpath entry and the second member is the qualified path of the resource relative
to the classpath entry.
"""
cp = classpath(names, resolve, includeSelf, includeBootClasspath)
for entry in cp.split(os.pathsep):
if not exists(entry):
continue
if isdir(entry):
for root, dirs, files in os.walk(entry):
for d in dirs:
entryPath = join(root[len(entry) + 1:], d)
yield entry, entryPath
for f in files:
entryPath = join(root[len(entry) + 1:], f)
yield entry, entryPath
elif entry.endswith('.jar') or entry.endswith('.zip'):
with zipfile.ZipFile(entry, 'r') as zf:
for zi in zf.infolist():
entryPath = zi.filename
yield zf, entryPath
def sorted_deps(projectNames=None, includeLibs=False, includeJreLibs=False, includeAnnotationProcessors=False):
"""
Gets projects and libraries sorted such that dependencies
are before the projects that depend on them. Unless 'includeLibs' is
true, libraries are omitted from the result.
"""
projects = projects_from_names(projectNames)
return sorted_project_deps(projects, includeLibs=includeLibs, includeJreLibs=includeJreLibs, includeAnnotationProcessors=includeAnnotationProcessors)
def sorted_dists():
"""
Gets distributions sorted such that each distribution comes after
any distributions it depends upon.
"""
dists = []
def add_dist(dist):
if not dist in dists:
for depDist in [distribution(name) for name in dist.distDependencies]:
add_dist(depDist)
if not dist in dists:
dists.append(dist)
for d in _dists.itervalues():
add_dist(d)
return dists
def sorted_project_deps(projects, includeLibs=False, includeJreLibs=False, includeAnnotationProcessors=False):
deps = []
for p in projects:
p.all_deps(deps, includeLibs=includeLibs, includeJreLibs=includeJreLibs, includeAnnotationProcessors=includeAnnotationProcessors)
return deps
def _handle_lookup_java_home(jdk):
return _handle_lookup_jdk(jdk, 'JAVA_HOME', '--java-home', False)
def _handle_lookup_extra_java_homes(jdk):
return _handle_lookup_jdk(jdk, 'EXTRA_JAVA_HOMES', '--extra-java-homes', True)
def _handle_lookup_jdk(jdk, varName, flagName, allowMultiple):
if jdk != None and jdk != '':
return jdk
jdk = os.environ.get(varName)
if jdk != None and jdk != '':
return jdk
if not sys.stdout.isatty():
abort('Could not find bootstrap {0}. Use {1} option or ensure {2} environment variable is set.'.format(varName, flagName, varName))
candidateJdks = []
if get_os() == 'darwin':
base = '/Library/Java/JavaVirtualMachines'
candidateJdks = [join(base, n, 'Contents/Home') for n in os.listdir(base) if exists(join(base, n, 'Contents/Home'))]
elif get_os() == 'linux':
base = '/usr/lib/jvm'
candidateJdks = [join(base, n) for n in os.listdir(base) if exists(join(base, n, 'jre/lib/rt.jar'))]
elif get_os() == 'solaris':
base = '/usr/jdk/instances'
candidateJdks = [join(base, n) for n in os.listdir(base) if exists(join(base, n, 'jre/lib/rt.jar'))]
elif get_os() == 'windows':
base = r'C:\Program Files\Java'
candidateJdks = [join(base, n) for n in os.listdir(base) if exists(join(base, n, r'jre\lib\rt.jar'))]
javaHome = None
if len(candidateJdks) != 0:
log('Missing value for {0}.'.format(varName))
javaHome = select_items(candidateJdks + ['<other>'], allowMultiple=allowMultiple)
if javaHome == '<other>':
javaHome = None
if javaHome != None and allowMultiple:
javaHome = os.pathsep.join(javaHome)
while javaHome is None:
javaHome = raw_input('Enter path of JDK for {0}: '.format(varName))
rtJarPath = join(javaHome, 'jre', 'lib', 'rt.jar')
if not exists(rtJarPath):
log('Does not appear to be a valid JDK as ' + rtJarPath + ' does not exist')
javaHome = None
else:
break
envPath = join(_primary_suite.mxDir, 'env')
if ask_yes_no('Persist this setting by adding "{0}={1}" to {2}'.format(varName, javaHome, envPath), 'y'):
with open(envPath, 'a') as fp:
print >> fp, varName + '=' + javaHome
return javaHome
class ArgParser(ArgumentParser):
# Override parent to append the list of available commands
def format_help(self):
return ArgumentParser.format_help(self) + _format_commands()
def __init__(self):
self.java_initialized = False
# this doesn't resolve the right way, but can't figure out how to override _handle_conflict_resolve in _ActionsContainer
ArgumentParser.__init__(self, prog='mx', conflict_handler='resolve')
self.add_argument('-v', action='store_true', dest='verbose', help='enable verbose output')
self.add_argument('-V', action='store_true', dest='very_verbose', help='enable very verbose output')
self.add_argument('-w', action='store_true', dest='warn', help='enable warning messages')
self.add_argument('-p', '--primary-suite-path', help='set the primary suite directory', metavar='<path>')
self.add_argument('--dbg', type=int, dest='java_dbg_port', help='make Java processes wait on <port> for a debugger', metavar='<port>')
self.add_argument('-d', action='store_const', const=8000, dest='java_dbg_port', help='alias for "-dbg 8000"')
self.add_argument('--backup-modified', action='store_true', help='backup generated files if they pre-existed and are modified')
self.add_argument('--cp-pfx', dest='cp_prefix', help='class path prefix', metavar='<arg>')
self.add_argument('--cp-sfx', dest='cp_suffix', help='class path suffix', metavar='<arg>')
self.add_argument('--J', dest='java_args', help='Java VM arguments (e.g. --J @-dsa)', metavar='@<args>')
self.add_argument('--Jp', action='append', dest='java_args_pfx', help='prefix Java VM arguments (e.g. --Jp @-dsa)', metavar='@<args>', default=[])
self.add_argument('--Ja', action='append', dest='java_args_sfx', help='suffix Java VM arguments (e.g. --Ja @-dsa)', metavar='@<args>', default=[])
self.add_argument('--user-home', help='users home directory', metavar='<path>', default=os.path.expanduser('~'))
self.add_argument('--java-home', help='primary JDK directory (must be JDK 7 or later)', metavar='<path>')
self.add_argument('--extra-java-homes', help='secondary JDK directories separated by "' + os.pathsep + '"', metavar='<path>')
self.add_argument('--ignore-project', action='append', dest='ignored_projects', help='name of project to ignore', metavar='<name>', default=[])
self.add_argument('--kill-with-sigquit', action='store_true', dest='killwithsigquit', help='send sigquit first before killing child processes')
if get_os() != 'windows':
# Time outs are (currently) implemented with Unix specific functionality
self.add_argument('--timeout', help='timeout (in seconds) for command', type=int, default=0, metavar='<secs>')
self.add_argument('--ptimeout', help='timeout (in seconds) for subprocesses', type=int, default=0, metavar='<secs>')
def _parse_cmd_line(self, args=None):
if args is None:
args = sys.argv[1:]
self.add_argument('commandAndArgs', nargs=REMAINDER, metavar='command args...')
opts = self.parse_args()
global _opts
_opts = opts
# Give the timeout options a default value to avoid the need for hasattr() tests
opts.__dict__.setdefault('timeout', 0)
opts.__dict__.setdefault('ptimeout', 0)
if opts.very_verbose:
opts.verbose = True
opts.java_home = _handle_lookup_java_home(opts.java_home)
opts.extra_java_homes = _handle_lookup_extra_java_homes(opts.extra_java_homes)
if opts.user_home is None or opts.user_home == '':
abort('Could not find user home. Use --user-home option or ensure HOME environment variable is set.')
os.environ['JAVA_HOME'] = opts.java_home
os.environ['HOME'] = opts.user_home
opts.ignored_projects = opts.ignored_projects + os.environ.get('IGNORED_PROJECTS', '').split(',')
commandAndArgs = opts.__dict__.pop('commandAndArgs')
return opts, commandAndArgs
def _handle_conflict_resolve(self, action, conflicting_actions):
self._handle_conflict_error(action, conflicting_actions)
def _format_commands():
msg = '\navailable commands:\n\n'
for cmd in sorted(_commands.iterkeys()):
c, _ = _commands[cmd][:2]
doc = c.__doc__
if doc is None:
doc = ''
msg += ' {0:<20} {1}\n'.format(cmd, doc.split('\n', 1)[0])
return msg + '\n'
def java(requiredCompliance=None):
"""
Get a JavaConfig object containing Java commands launch details.
If requiredCompliance is None, the compliance level specified by --java-home/JAVA_HOME
is returned. Otherwise, the JavaConfig exactly matching requiredCompliance is returned
or None if there is no exact match.
"""
assert _java_homes
if not requiredCompliance:
return _java_homes[0]
for java in _java_homes:
if java.javaCompliance == requiredCompliance:
return java
return None
def run_java(args, nonZeroIsFatal=True, out=None, err=None, cwd=None, addDefaultArgs=True, javaConfig=None):
if not javaConfig:
javaConfig = java()
return run(javaConfig.format_cmd(args, addDefaultArgs), nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd)
def _kill_process_group(pid, sig):
if not sig:
sig = signal.SIGKILL
pgid = os.getpgid(pid)
try:
os.killpg(pgid, sig)
return True
except:
log('Error killing subprocess ' + str(pgid) + ': ' + str(sys.exc_info()[1]))
return False
def _waitWithTimeout(process, args, timeout):
def _waitpid(pid):
while True:
try:
return os.waitpid(pid, os.WNOHANG)
except OSError, e:
if e.errno == errno.EINTR:
continue
raise
def _returncode(status):
if os.WIFSIGNALED(status):
return -os.WTERMSIG(status)
elif os.WIFEXITED(status):
return os.WEXITSTATUS(status)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
end = time.time() + timeout
delay = 0.0005
while True:
(pid, status) = _waitpid(process.pid)
if pid == process.pid:
return _returncode(status)
remaining = end - time.time()
if remaining <= 0:
abort('Process timed out after {0} seconds: {1}'.format(timeout, ' '.join(args)))
delay = min(delay * 2, remaining, .05)
time.sleep(delay)
# Makes the current subprocess accessible to the abort() function
# This is a list of tuples of the subprocess.Popen or
# multiprocessing.Process object and args.
_currentSubprocesses = []
def _addSubprocess(p, args):
entry = (p, args)
_currentSubprocesses.append(entry)
return entry
def _removeSubprocess(entry):
if entry and entry in _currentSubprocesses:
try:
_currentSubprocesses.remove(entry)
except:
pass
def waitOn(p):
if get_os() == 'windows':
# on windows use a poll loop, otherwise signal does not get handled
retcode = None
while retcode == None:
retcode = p.poll()
time.sleep(0.05)
else:
retcode = p.wait()
return retcode
def run(args, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, env=None):
"""
Run a command in a subprocess, wait for it to complete and return the exit status of the process.
If the exit status is non-zero and `nonZeroIsFatal` is true, then mx is exited with
the same exit status.
Each line of the standard output and error streams of the subprocess are redirected to
out and err if they are callable objects.
"""
assert isinstance(args, types.ListType), "'args' must be a list: " + str(args)
for arg in args:
assert isinstance(arg, types.StringTypes), 'argument is not a string: ' + str(arg)
if env is None:
env = os.environ.copy()
# Ideally the command line could be communicated directly in an environment
# variable. However, since environment variables share the same resource
# space as the command line itself (on Unix at least), this would cause the
# limit to be exceeded too easily.
with tempfile.NamedTemporaryFile(suffix='', prefix='mx_subprocess_command.', mode='w', delete=False) as fp:
subprocessCommandFile = fp.name
for arg in args:
# TODO: handle newlines in args once there's a use case
assert '\n' not in arg
print >> fp, arg
env['MX_SUBPROCESS_COMMAND_FILE'] = subprocessCommandFile
if _opts.verbose:
if _opts.very_verbose:
log('Environment variables:')
for key in sorted(env.keys()):
log(' ' + key + '=' + env[key])
log(' '.join(map(pipes.quote, args)))
if timeout is None and _opts.ptimeout != 0:
timeout = _opts.ptimeout
sub = None
try:
# On Unix, the new subprocess should be in a separate group so that a timeout alarm
# can use os.killpg() to kill the whole subprocess group
preexec_fn = None
creationflags = 0
if not is_jython():
if get_os() == 'windows':
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
else:
preexec_fn = os.setsid
def redirect(stream, f):
for line in iter(stream.readline, ''):
f(line)
stream.close()
stdout = out if not callable(out) else subprocess.PIPE
stderr = err if not callable(err) else subprocess.PIPE
p = subprocess.Popen(args, cwd=cwd, stdout=stdout, stderr=stderr, preexec_fn=preexec_fn, creationflags=creationflags, env=env)
sub = _addSubprocess(p, args)
joiners = []
if callable(out):
t = Thread(target=redirect, args=(p.stdout, out))
# Don't make the reader thread a daemon otherwise output can be droppped
t.start()
joiners.append(t)
if callable(err):
t = Thread(target=redirect, args=(p.stderr, err))
# Don't make the reader thread a daemon otherwise output can be droppped
t.start()
joiners.append(t)
while any([t.is_alive() for t in joiners]):
# Need to use timeout otherwise all signals (including CTRL-C) are blocked
# see: http://bugs.python.org/issue1167930
for t in joiners:
t.join(10)
if timeout is None or timeout == 0:
retcode = waitOn(p)
else:
if get_os() == 'windows':
abort('Use of timeout not (yet) supported on Windows')
retcode = _waitWithTimeout(p, args, timeout)
except OSError as e:
log('Error executing \'' + ' '.join(args) + '\': ' + str(e))
if _opts.verbose:
raise e
abort(e.errno)
except KeyboardInterrupt:
abort(1)
finally:
_removeSubprocess(sub)
os.remove(subprocessCommandFile)
if retcode and nonZeroIsFatal:
if _opts.verbose:
if _opts.very_verbose:
raise subprocess.CalledProcessError(retcode, ' '.join(args))
else:
log('[exit code: ' + str(retcode) + ']')
abort(retcode)
return retcode
def exe_suffix(name):
"""
Gets the platform specific suffix for an executable
"""
if get_os() == 'windows':
return name + '.exe'
return name
def add_lib_prefix(name):
"""
Adds the platform specific library prefix to a name
"""
os = get_os()
if os == 'linux' or os == 'solaris' or os == 'darwin':
return 'lib' + name
return name
def add_lib_suffix(name):
"""
Adds the platform specific library suffix to a name
"""
os = get_os()
if os == 'windows':
return name + '.dll'
if os == 'linux' or os == 'solaris':
return name + '.so'
if os == 'darwin':
return name + '.dylib'
return name
"""
Utility for filtering duplicate lines.
"""
class DuplicateSuppressingStream:
"""
Creates an object that will suppress duplicate lines sent to 'out'.
The lines considered for suppression are those that contain one of the
strings in 'restrictTo' if it is not None.
"""
def __init__(self, restrictTo=None, out=sys.stdout):
self.restrictTo = restrictTo
self.seen = set()
self.out = out
self.currentFilteredLineCount = 0
self.currentFilteredTime = None
def isSuppressionCandidate(self, line):
if self.restrictTo:
for p in self.restrictTo:
if p in line:
return True
return False
else:
return True
def write(self, line):
if self.isSuppressionCandidate(line):
if line in self.seen:
self.currentFilteredLineCount += 1
if self.currentFilteredTime:
if time.time() - self.currentFilteredTime > 1 * 60:
self.out.write(" Filtered " + str(self.currentFilteredLineCount) + " repeated lines...\n")
self.currentFilteredTime = time.time()
else:
self.currentFilteredTime = time.time()
return
self.seen.add(line)
self.currentFilteredLineCount = 0
self.out.write(line)
self.currentFilteredTime = None
"""
A JavaCompliance simplifies comparing Java compliance values extracted from a JDK version string.
"""
class JavaCompliance:
def __init__(self, ver):
m = re.match(r'1\.(\d+).*', ver)
assert m is not None, 'not a recognized version string: ' + ver
self.value = int(m.group(1))
def __str__(self):
return '1.' + str(self.value)
def __cmp__(self, other):
if isinstance(other, types.StringType):
other = JavaCompliance(other)
return cmp(self.value, other.value)
def __hash__(self):
return self.value.__hash__()
"""
A version specification as defined in JSR-56
"""
class VersionSpec:
def __init__(self, versionString):
validChar = r'[\x21-\x25\x27-\x29\x2c\x2f-\x5e\x60-\x7f]'
separator = r'[.\-_]'
m = re.match("^" + validChar + '+(' + separator + validChar + '+)*$', versionString)
assert m is not None, 'not a recognized version string: ' + versionString
self.versionString = versionString
self.parts = [int(f) if f.isdigit() else f for f in re.split(separator, versionString)]
def __str__(self):
return self.versionString
def __cmp__(self, other):
return cmp(self.parts, other.parts)
def _filter_non_existant_paths(paths):
return os.pathsep.join([path for path in _separatedCygpathW2U(paths).split(os.pathsep) if exists(path)])
"""
A JavaConfig object encapsulates info on how Java commands are run.
"""
class JavaConfig:
def __init__(self, java_home, java_dbg_port):
self.jdk = java_home
self.debug_port = java_dbg_port
self.jar = exe_suffix(join(self.jdk, 'bin', 'jar'))
self.java = exe_suffix(join(self.jdk, 'bin', 'java'))
self.javac = exe_suffix(join(self.jdk, 'bin', 'javac'))
self.javap = exe_suffix(join(self.jdk, 'bin', 'javap'))
self.javadoc = exe_suffix(join(self.jdk, 'bin', 'javadoc'))
self.pack200 = exe_suffix(join(self.jdk, 'bin', 'pack200'))
self.toolsjar = join(self.jdk, 'lib', 'tools.jar')
self._bootclasspath = None
self._extdirs = None
self._endorseddirs = None
if not exists(self.java):
abort('Java launcher does not exist: ' + self.java)
def delAtAndSplit(s):
return shlex.split(s.lstrip('@'))
self.java_args = delAtAndSplit(_opts.java_args) if _opts.java_args else []
self.java_args_pfx = sum(map(delAtAndSplit, _opts.java_args_pfx), [])
self.java_args_sfx = sum(map(delAtAndSplit, _opts.java_args_sfx), [])
# Prepend the -d64 VM option only if the java command supports it
try:
output = subprocess.check_output([self.java, '-d64', '-version'], stderr=subprocess.STDOUT)
self.java_args = ['-d64'] + self.java_args
except subprocess.CalledProcessError as e:
try:
output = subprocess.check_output([self.java, '-version'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print e.output
abort(e.returncode)
def _checkOutput(out):
return 'version' in out
# hotspot can print a warning, e.g. if there's a .hotspot_compiler file in the cwd
output = output.split('\n')
version = None
for o in output:
if _checkOutput(o):
assert version is None
version = o
self.version = VersionSpec(version.split()[2].strip('"'))
self.javaCompliance = JavaCompliance(self.version.versionString)
if self.debug_port is not None:
self.java_args += ['-Xdebug', '-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=' + str(self.debug_port)]
def _init_classpaths(self):
myDir = dirname(__file__)
outDir = join(dirname(__file__), '.jdk' + str(self.version))
if not exists(outDir):
os.makedirs(outDir)
javaSource = join(myDir, 'ClasspathDump.java')
if not exists(join(outDir, 'ClasspathDump.class')):
subprocess.check_call([self.javac, '-d', _cygpathU2W(outDir), _cygpathU2W(javaSource)], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self._bootclasspath, self._extdirs, self._endorseddirs = [x if x != 'null' else None for x in subprocess.check_output([self.java, '-cp', _cygpathU2W(outDir), 'ClasspathDump'], stderr=subprocess.PIPE).split('|')]
if not self._bootclasspath or not self._extdirs or not self._endorseddirs:
warn("Could not find all classpaths: boot='" + str(self._bootclasspath) + "' extdirs='" + str(self._extdirs) + "' endorseddirs='" + str(self._endorseddirs) + "'")
self._bootclasspath = _filter_non_existant_paths(self._bootclasspath)
self._extdirs = _filter_non_existant_paths(self._extdirs)
self._endorseddirs = _filter_non_existant_paths(self._endorseddirs)
def __repr__(self):
return "JavaConfig(" + str(self.jdk) + ", " + str(self.debug_port) + ")"
def __str__(self):
return "Java " + str(self.version) + " (" + str(self.javaCompliance) + ") from " + str(self.jdk)
def __hash__(self):
return hash(self.jdk)
def __cmp__(self, other):
if isinstance(other, JavaConfig):
compilanceCmp = cmp(self.javaCompliance, other.javaCompliance)
if compilanceCmp:
return compilanceCmp
versionCmp = cmp(self.version, other.version)
if versionCmp:
return versionCmp
return cmp(self.jdk, other.jdk)
raise TypeError()
def format_cmd(self, args, addDefaultArgs):
if addDefaultArgs:
return [self.java] + self.processArgs(args)
else:
return [self.java] + args
def processArgs(self, args):
return self.java_args_pfx + self.java_args + self.java_args_sfx + args
def bootclasspath(self):
if self._bootclasspath is None:
self._init_classpaths()
return _separatedCygpathU2W(self._bootclasspath)
def extdirs(self):
if self._extdirs is None:
self._init_classpaths()
return _separatedCygpathU2W(self._extdirs)
def endorseddirs(self):
if self._endorseddirs is None:
self._init_classpaths()
return _separatedCygpathU2W(self._endorseddirs)
def containsJar(self, jar):
if self._bootclasspath is None:
self._init_classpaths()
for e in self._bootclasspath.split(os.pathsep):
if basename(e) == jar:
return True
for d in self._extdirs.split(os.pathsep):
if len(d) and jar in os.listdir(d):
return True
for d in self._endorseddirs.split(os.pathsep):
if len(d) and jar in os.listdir(d):
return True
return False
def check_get_env(key):
"""
Gets an environment variable, aborting with a useful message if it is not set.
"""
value = get_env(key)
if value is None:
abort('Required environment variable ' + key + ' must be set')
return value
def get_env(key, default=None):
"""
Gets an environment variable.
"""
value = os.environ.get(key, default)
return value
def logv(msg=None):
if _opts.verbose:
log(msg)
def log(msg=None):
"""
Write a message to the console.
All script output goes through this method thus allowing a subclass
to redirect it.
"""
if msg is None:
print
else:
print msg
def expand_project_in_class_path_arg(cpArg):
cp = []
for part in cpArg.split(os.pathsep):
if part.startswith('@'):
cp += classpath(part[1:]).split(os.pathsep)
else:
cp.append(part)
return os.pathsep.join(cp)
def expand_project_in_args(args):
for i in range(len(args)):
if args[i] == '-cp' or args[i] == '-classpath':
if i + 1 < len(args):
args[i + 1] = expand_project_in_class_path_arg(args[i + 1])
return
def gmake_cmd():
for a in ['make', 'gmake', 'gnumake']:
try:
output = subprocess.check_output([a, '--version'])
if 'GNU' in output:
return a
except:
pass
abort('Could not find a GNU make executable on the current path.')
def expandvars_in_property(value):
result = expandvars(value)
if '$' in result or '%' in result:
abort('Property contains an undefined environment variable: ' + value)
return result
def _send_sigquit():
for p, args in _currentSubprocesses:
def _isJava():
if args:
name = args[0].split(os.sep)[-1]
return name == "java"
return False
if p is not None and _isJava():
if get_os() == 'windows':
log("mx: implement me! want to send SIGQUIT to my child process")
else:
_kill_process_group(p.pid, sig=signal.SIGQUIT)
time.sleep(0.1)
def abort(codeOrMessage):
"""
Aborts the program with a SystemExit exception.
If 'codeOrMessage' is a plain integer, it specifies the system exit status;
if it is None, the exit status is zero; if it has another type (such as a string),
the object's value is printed and the exit status is one.
"""
if _opts and _opts.killwithsigquit:
_send_sigquit()
def is_alive(p):
if isinstance(p, subprocess.Popen):
return p.poll() is None
assert is_jython() or isinstance(p, multiprocessing.Process), p
return p.is_alive()
for p, args in _currentSubprocesses:
if is_alive(p):
try:
if get_os() == 'windows':
p.terminate()
else:
_kill_process_group(p.pid, signal.SIGKILL)
except BaseException as e:
if is_alive(p):
log('error while killing subprocess {0} "{1}": {2}'.format(p.pid, ' '.join(args), e))
if _opts and _opts.verbose:
import traceback
traceback.print_stack()
raise SystemExit(codeOrMessage)
def download(path, urls, verbose=False):
"""
Attempts to downloads content for each URL in a list, stopping after the first successful download.
If the content cannot be retrieved from any URL, the program is aborted. The downloaded content
is written to the file indicated by 'path'.
"""
d = dirname(path)
if d != '' and not exists(d):
os.makedirs(d)
assert not path.endswith(os.sep)
myDir = dirname(__file__)
javaSource = join(myDir, 'URLConnectionDownload.java')
javaClass = join(myDir, 'URLConnectionDownload.class')
if not exists(javaClass) or getmtime(javaClass) < getmtime(javaSource):
subprocess.check_call([java().javac, '-d', _cygpathU2W(myDir), _cygpathU2W(javaSource)])
verbose = []
if sys.stderr.isatty():
verbose.append("-v")
if run([java().java, '-cp', _cygpathU2W(myDir), 'URLConnectionDownload', _cygpathU2W(path)] + verbose + urls, nonZeroIsFatal=False) == 0:
return
abort('Could not download to ' + path + ' from any of the following URLs:\n\n ' +
'\n '.join(urls) + '\n\nPlease use a web browser to do the download manually')
def update_file(path, content):
"""
Updates a file with some given content if the content differs from what's in
the file already. The return value indicates if the file was updated.
"""
existed = exists(path)
try:
old = None
if existed:
with open(path, 'rb') as f:
old = f.read()
if old == content:
return False
if existed and _opts.backup_modified:
shutil.move(path, path + '.orig')
with open(path, 'wb') as f:
f.write(content)
log(('modified ' if existed else 'created ') + path)
return True
except IOError as e:
abort('Error while writing to ' + path + ': ' + str(e))
# Builtin commands
def _defaultEcjPath():
return get_env('JDT', join(_primary_suite.mxDir, 'ecj.jar'))
class JavaCompileTask:
def __init__(self, args, proj, reason, javafilelist, jdk, outputDir, jdtJar, deps):
self.proj = proj
self.reason = reason
self.javafilelist = javafilelist
self.deps = deps
self.jdk = jdk
self.outputDir = outputDir
self.done = False
self.jdtJar = jdtJar
self.args = args
def __str__(self):
return self.proj.name
def logCompilation(self, compiler):
log('Compiling Java sources for {0} with {1}... [{2}]'.format(self.proj.name, compiler, self.reason))
def execute(self):
argfileName = join(self.proj.dir, 'javafilelist.txt')
argfile = open(argfileName, 'wb')
argfile.write('\n'.join(map(_cygpathU2W, self.javafilelist)))
argfile.close()
processorArgs = []
processorPath = self.proj.annotation_processors_path()
if processorPath:
genDir = self.proj.source_gen_dir()
if exists(genDir):
shutil.rmtree(genDir)
os.mkdir(genDir)
processorArgs += ['-processorpath', _separatedCygpathU2W(join(processorPath)), '-s', _cygpathU2W(genDir)]
else:
processorArgs += ['-proc:none']
args = self.args
jdk = self.jdk
outputDir = _cygpathU2W(self.outputDir)
compliance = str(jdk.javaCompliance)
cp = _separatedCygpathU2W(classpath(self.proj.name, includeSelf=True))
toBeDeleted = [argfileName]
try:
if not self.jdtJar:
mainJava = java()
if not args.error_prone:
javac = args.alt_javac if args.alt_javac else mainJava.javac
self.logCompilation('javac' if not args.alt_javac else args.alt_javac)
javacCmd = [javac, '-g', '-J-Xmx1g', '-source', compliance, '-target', compliance, '-classpath', cp, '-d', outputDir, '-bootclasspath', jdk.bootclasspath(), '-endorseddirs', jdk.endorseddirs(), '-extdirs', jdk.extdirs()]
if jdk.debug_port is not None:
javacCmd += ['-J-Xdebug', '-J-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=' + str(jdk.debug_port)]
javacCmd += processorArgs
javacCmd += ['@' + _cygpathU2W(argfile.name)]
if not args.warnAPI:
javacCmd.append('-XDignore.symbol.file')
run(javacCmd)
else:
self.logCompilation('javac (with error-prone)')
javaArgs = ['-Xmx1g']
javacArgs = ['-g', '-source', compliance, '-target', compliance, '-classpath', cp, '-d', outputDir, '-bootclasspath', jdk.bootclasspath(), '-endorseddirs', jdk.endorseddirs(), '-extdirs', jdk.extdirs()]
javacArgs += processorArgs
javacArgs += ['@' + argfile.name]
if not args.warnAPI:
javacArgs.append('-XDignore.symbol.file')
run_java(javaArgs + ['-cp', os.pathsep.join([mainJava.toolsjar, args.error_prone]), 'com.google.errorprone.ErrorProneCompiler'] + javacArgs)
else:
self.logCompilation('JDT')
jdtVmArgs = ['-Xmx1g', '-jar', _cygpathU2W(self.jdtJar)]
jdtArgs = ['-' + compliance,
'-cp', cp, '-g', '-enableJavadoc',
'-d', outputDir,
'-bootclasspath', jdk.bootclasspath(),
'-endorseddirs', jdk.endorseddirs(),
'-extdirs', jdk.extdirs()]
jdtArgs += processorArgs
jdtProperties = join(self.proj.dir, '.settings', 'org.eclipse.jdt.core.prefs')
rootJdtProperties = join(self.proj.suite.mxDir, 'eclipse-settings', 'org.eclipse.jdt.core.prefs')
if not exists(jdtProperties) or os.path.getmtime(jdtProperties) < os.path.getmtime(rootJdtProperties):
# Try to fix a missing properties file by running eclipseinit
_eclipseinit_project(self.proj)
if not exists(jdtProperties):
log('JDT properties file {0} not found'.format(jdtProperties))
else:
with open(jdtProperties) as fp:
origContent = fp.read()
content = origContent
if self.proj.uses_annotation_processor_library():
# unfortunately, the command line compiler doesn't let us ignore warnings for generated files only
content = content.replace('=warning', '=ignore')
elif args.jdt_warning_as_error:
content = content.replace('=warning', '=error')
if not args.jdt_show_task_tags:
content = content + '\norg.eclipse.jdt.core.compiler.problem.tasks=ignore'
if origContent != content:
jdtPropertiesTmp = jdtProperties + '.tmp'
with open(jdtPropertiesTmp, 'w') as fp:
fp.write(content)
toBeDeleted.append(jdtPropertiesTmp)
jdtArgs += ['-properties', _cygpathU2W(jdtPropertiesTmp)]
else:
jdtArgs += ['-properties', _cygpathU2W(jdtProperties)]
jdtArgs.append('@' + _cygpathU2W(argfile.name))
run_java(jdtVmArgs + jdtArgs)
# Create annotation processor jar for a project that defines annotation processors
if self.proj.definedAnnotationProcessorsDist:
self.proj.definedAnnotationProcessorsDist.make_archive()
finally:
# Do not clean up temp files if verbose as there's
# a good chance the user wants to copy and paste the
# Java compiler command directly
if not _opts.verbose:
for n in toBeDeleted:
os.remove(n)
self.done = True
def build(args, parser=None):
"""compile the Java and C sources, linking the latter
Compile all the Java source code using the appropriate compilers
and linkers for the various source code types."""
suppliedParser = parser is not None
if not suppliedParser:
parser = ArgumentParser(prog='mx build')
parser = parser if parser is not None else ArgumentParser(prog='mx build')
parser.add_argument('-f', action='store_true', dest='force', help='force build (disables timestamp checking)')
parser.add_argument('-c', action='store_true', dest='clean', help='removes existing build output')
parser.add_argument('-p', action='store_true', dest='parallelize', help='parallelizes Java compilation if possible')
parser.add_argument('--source', dest='compliance', help='Java compliance level for projects without an explicit one')
parser.add_argument('--Wapi', action='store_true', dest='warnAPI', help='show warnings about using internal APIs')
parser.add_argument('--projects', action='store', help='comma separated projects to build (omit to build all projects)')
parser.add_argument('--only', action='store', help='comma separated projects to build, without checking their dependencies (omit to build all projects)')
parser.add_argument('--no-java', action='store_false', dest='java', help='do not build Java projects')
parser.add_argument('--no-native', action='store_false', dest='native', help='do not build native projects')
parser.add_argument('--jdt-warning-as-error', action='store_true', help='convert all Eclipse batch compiler warnings to errors')
parser.add_argument('--jdt-show-task-tags', action='store_true', help='show task tags as Eclipse batch compiler warnings')
parser.add_argument('--alt-javac', dest='alt_javac', help='path to alternative javac executable', metavar='<path>')
compilerSelect = parser.add_mutually_exclusive_group()
compilerSelect.add_argument('--error-prone', dest='error_prone', help='path to error-prone.jar', metavar='<path>')
compilerSelect.add_argument('--jdt', help='path to ecj.jar, the Eclipse batch compiler', default=_defaultEcjPath(), metavar='<path>')
compilerSelect.add_argument('--force-javac', action='store_true', dest='javac', help='use javac whether ecj.jar is found or not')
if suppliedParser:
parser.add_argument('remainder', nargs=REMAINDER, metavar='...')
args = parser.parse_args(args)
if is_jython():
if args.parallelize:
logv('[multiprocessing not available in jython]')
args.parallelize = False
jdtJar = None
if not args.javac and args.jdt is not None:
if not args.jdt.endswith('.jar'):
abort('Path for Eclipse batch compiler does not look like a jar file: ' + args.jdt)
jdtJar = args.jdt
if not exists(jdtJar):
if os.path.abspath(jdtJar) == os.path.abspath(_defaultEcjPath()) and get_env('JDT', None) is None:
# Silently ignore JDT if default location is used but does not exist
jdtJar = None
else:
abort('Eclipse batch compiler jar does not exist: ' + args.jdt)
if args.only is not None:
# N.B. This build will not include dependencies including annotation processor dependencies
sortedProjects = [project(name) for name in args.only.split(',')]
else:
if args.projects is not None:
projectNames = args.projects.split(',')
else:
projectNames = None
projects = _projects_opt_limit_to_suites(projects_from_names(projectNames))
# N.B. Limiting to a suite only affects the starting set of projects. Dependencies in other suites will still be compiled
sortedProjects = sorted_project_deps(projects, includeAnnotationProcessors=True)
if args.java:
ideinit([], refreshOnly=True, buildProcessorJars=False)
tasks = {}
updatedAnnotationProcessorDists = set()
for p in sortedProjects:
if p.native:
if args.native:
log('Calling GNU make {0}...'.format(p.dir))
if args.clean:
run([gmake_cmd(), 'clean'], cwd=p.dir)
run([gmake_cmd()], cwd=p.dir)
continue
else:
if not args.java:
continue
if exists(join(p.dir, 'plugin.xml')): # eclipse plugin project
continue
# skip building this Java project if its Java compliance level is "higher" than the configured JDK
requiredCompliance = p.javaCompliance if p.javaCompliance else JavaCompliance(args.compliance) if args.compliance else None
jdk = java(requiredCompliance)
assert jdk
outputDir = p.output_dir()
sourceDirs = p.source_dirs()
buildReason = None
if args.force:
buildReason = 'forced build'
elif args.clean:
buildReason = 'clean'
taskDeps = []
for dep in p.all_deps([], includeLibs=False, includeAnnotationProcessors=True):
taskDep = tasks.get(dep.name)
if taskDep:
if not buildReason:
buildReason = dep.name + ' rebuilt'
taskDeps.append(taskDep)
javafilelist = []
nonjavafiletuples = []
for sourceDir in sourceDirs:
for root, _, files in os.walk(sourceDir):
javafiles = [join(root, name) for name in files if name.endswith('.java') and name != 'package-info.java']
javafilelist += javafiles
nonjavafiletuples += [(sourceDir, [join(root, name) for name in files if not name.endswith('.java')])]
if not buildReason:
for javafile in javafiles:
classfile = TimeStampFile(outputDir + javafile[len(sourceDir):-len('java')] + 'class')
if not classfile.exists() or classfile.isOlderThan(javafile):
buildReason = 'class file(s) out of date'
break
apsOutOfDate = p.update_current_annotation_processors_file()
if apsOutOfDate:
buildReason = 'annotation processor(s) changed'
if not buildReason:
logv('[all class files for {0} are up to date - skipping]'.format(p.name))
_handleNonJavaFiles(outputDir, p, False, nonjavafiletuples)
continue
_handleNonJavaFiles(outputDir, p, True, nonjavafiletuples)
if len(javafilelist) == 0:
logv('[no Java sources for {0} - skipping]'.format(p.name))
continue
javafilelist = sorted(javafilelist)
task = JavaCompileTask(args, p, buildReason, javafilelist, jdk, outputDir, jdtJar, taskDeps)
if p.definedAnnotationProcessorsDist:
updatedAnnotationProcessorDists.add(p.definedAnnotationProcessorsDist)
tasks[p.name] = task
if args.parallelize:
# Best to initialize class paths on main process
jdk.bootclasspath()
task.proc = None
else:
task.execute()
if args.parallelize:
def joinTasks(tasks):
failed = []
for t in tasks:
t.proc.join()
_removeSubprocess(t.sub)
if t.proc.exitcode != 0:
failed.append(t)
return failed
def checkTasks(tasks):
active = []
for t in tasks:
if t.proc.is_alive():
active.append(t)
else:
if t.proc.exitcode != 0:
return ([], joinTasks(tasks))
return (active, [])
def remainingDepsDepth(task):
if task._d is None:
incompleteDeps = [d for d in task.deps if d.proc is None or d.proc.is_alive()]
if len(incompleteDeps) == 0:
task._d = 0
else:
task._d = max([remainingDepsDepth(t) for t in incompleteDeps]) + 1
return task._d
def compareTasks(t1, t2):
d = remainingDepsDepth(t1) - remainingDepsDepth(t2)
if d == 0:
t1Work = (1 + len(t1.proj.annotation_processors())) * len(t1.javafilelist)
t2Work = (1 + len(t2.proj.annotation_processors())) * len(t2.javafilelist)
d = t1Work - t2Work
return d
def sortWorklist(tasks):
for t in tasks:
t._d = None
return sorted(tasks, compareTasks)
cpus = cpu_count()
worklist = sortWorklist(tasks.values())
active = []
failed = []
while len(worklist) != 0:
while True:
active, failed = checkTasks(active)
if len(failed) != 0:
assert not active, active
break
if len(active) == cpus:
# Sleep for 1 second
time.sleep(1)
else:
break
if len(failed) != 0:
break
def executeTask(task):
# Clear sub-process list cloned from parent process
del _currentSubprocesses[:]
task.execute()
def depsDone(task):
for d in task.deps:
if d.proc is None or d.proc.exitcode is None:
return False
return True
for task in worklist:
if depsDone(task):
worklist.remove(task)
task.proc = multiprocessing.Process(target=executeTask, args=(task,))
task.proc.start()
active.append(task)
task.sub = _addSubprocess(task.proc, ['JavaCompileTask', str(task)])
if len(active) == cpus:
break
worklist = sortWorklist(worklist)
failed += joinTasks(active)
if len(failed):
for t in failed:
log('Compiling {0} failed'.format(t.proj.name))
abort('{0} Java compilation tasks failed'.format(len(failed)))
if args.java:
for dist in sorted_dists():
if dist not in updatedAnnotationProcessorDists:
archive(['@' + dist.name])
if suppliedParser:
return args
return None
def _handleNonJavaFiles(outputDir, p, clean, nonjavafiletuples):
if exists(outputDir):
if clean:
log('Cleaning {0}...'.format(outputDir))
shutil.rmtree(outputDir)
os.mkdir(outputDir)
else:
os.mkdir(outputDir)
genDir = p.source_gen_dir()
if genDir != '' and exists(genDir) and clean:
log('Cleaning {0}...'.format(genDir))
for f in os.listdir(genDir):
shutil.rmtree(join(genDir, f))
# Copy all non Java resources or assemble Jasmin files
jasminAvailable = None
for nonjavafiletuple in nonjavafiletuples:
sourceDir = nonjavafiletuple[0]
nonjavafilelist = nonjavafiletuple[1]
for src in nonjavafilelist:
if src.endswith('.jasm'):
className = None
with open(src) as f:
for line in f:
if line.startswith('.class '):
className = line.split()[-1]
break
if className is not None:
jasminOutputDir = p.jasmin_output_dir()
classFile = join(jasminOutputDir, className.replace('/', os.sep) + '.class')
if exists(dirname(classFile)) and (not exists(classFile) or os.path.getmtime(classFile) < os.path.getmtime(src)):
if jasminAvailable is None:
try:
with open(os.devnull) as devnull:
subprocess.call('jasmin', stdout=devnull, stderr=subprocess.STDOUT)
jasminAvailable = True
except OSError:
jasminAvailable = False
if jasminAvailable:
log('Assembling Jasmin file ' + src)
run(['jasmin', '-d', jasminOutputDir, src])
else:
log('The jasmin executable could not be found - skipping ' + src)
with file(classFile, 'a'):
os.utime(classFile, None)
else:
log('could not file .class directive in Jasmin source: ' + src)
else:
dst = join(outputDir, src[len(sourceDir) + 1:])
if not exists(dirname(dst)):
os.makedirs(dirname(dst))
if exists(dirname(dst)) and (not exists(dst) or os.path.getmtime(dst) < os.path.getmtime(src)):
shutil.copyfile(src, dst)
def _chunk_files_for_command_line(files, limit=None, pathFunction=None):
"""
Returns a generator for splitting up a list of files into chunks such that the
size of the space separated file paths in a chunk is less than a given limit.
This is used to work around system command line length limits.
"""
chunkSize = 0
chunkStart = 0
if limit is None:
commandLinePrefixAllowance = 3000
if get_os() == 'windows':
# The CreateProcess function on Windows limits the length of a command line to
# 32,768 characters (http://msdn.microsoft.com/en-us/library/ms682425%28VS.85%29.aspx)
limit = 32768 - commandLinePrefixAllowance
else:
# Using just SC_ARG_MAX without extra downwards adjustment
# results in "[Errno 7] Argument list too long" on MacOS.
syslimit = os.sysconf('SC_ARG_MAX') - 20000
limit = syslimit - commandLinePrefixAllowance
for i in range(len(files)):
path = files[i] if pathFunction is None else pathFunction(files[i])
size = len(path) + 1
if chunkSize + size < limit:
chunkSize += size
else:
assert i > chunkStart
yield files[chunkStart:i]
chunkStart = i
chunkSize = 0
if chunkStart == 0:
assert chunkSize < limit
yield files
def eclipseformat(args):
"""run the Eclipse Code Formatter on the Java sources
The exit code 1 denotes that at least one file was modified."""
parser = ArgumentParser(prog='mx eclipseformat')
parser.add_argument('-e', '--eclipse-exe', help='location of the Eclipse executable')
parser.add_argument('-C', '--no-backup', action='store_false', dest='backup', help='do not save backup of modified files')
parser.add_argument('--projects', action='store', help='comma separated projects to process (omit to process all projects)')
args = parser.parse_args(args)
if args.eclipse_exe is None:
args.eclipse_exe = os.environ.get('ECLIPSE_EXE')
if args.eclipse_exe is None:
abort('Could not find Eclipse executable. Use -e option or ensure ECLIPSE_EXE environment variable is set.')
# Maybe an Eclipse installation dir was specified - look for the executable in it
if isdir(args.eclipse_exe):
args.eclipse_exe = join(args.eclipse_exe, exe_suffix('eclipse'))
warn("The eclipse-exe was a directory, now using " + args.eclipse_exe)
if not os.path.isfile(args.eclipse_exe):
abort('File does not exist: ' + args.eclipse_exe)
if not os.access(args.eclipse_exe, os.X_OK):
abort('Not an executable file: ' + args.eclipse_exe)
eclipseinit([], buildProcessorJars=False)
# build list of projects to be processed
projects = sorted_deps()
if args.projects is not None:
projects = [project(name) for name in args.projects.split(',')]
class Batch:
def __init__(self, settingsDir, javaCompliance):
self.path = join(settingsDir, 'org.eclipse.jdt.core.prefs')
self.javaCompliance = javaCompliance
self.javafiles = list()
with open(join(settingsDir, 'org.eclipse.jdt.ui.prefs')) as fp:
jdtUiPrefs = fp.read()
self.removeTrailingWhitespace = 'sp_cleanup.remove_trailing_whitespaces_all=true' in jdtUiPrefs
if self.removeTrailingWhitespace:
assert 'sp_cleanup.remove_trailing_whitespaces=true' in jdtUiPrefs and 'sp_cleanup.remove_trailing_whitespaces_ignore_empty=false' in jdtUiPrefs
def settings(self):
with open(self.path) as fp:
return fp.read() + java(self.javaCompliance).java + str(self.removeTrailingWhitespace)
class FileInfo:
def __init__(self, path):
self.path = path
with open(path) as fp:
self.content = fp.read()
self.times = (os.path.getatime(path), os.path.getmtime(path))
def update(self, removeTrailingWhitespace):
with open(self.path) as fp:
content = fp.read()
if self.content != content:
# Only apply *after* formatting to match the order in which the IDE does it
if removeTrailingWhitespace:
content, n = re.subn(r'[ \t]+$', '', content, flags=re.MULTILINE)
if n != 0 and self.content == content:
# undo on-disk changes made by the Eclipse formatter
with open(self.path, 'w') as fp:
fp.write(content)
if self.content != content:
self.diff = difflib.unified_diff(self.content.splitlines(1), content.splitlines(1))
self.content = content
return True
# reset access and modification time of file
os.utime(self.path, self.times)
modified = list()
batches = dict() # all sources with the same formatting settings are formatted together
for p in projects:
if p.native:
continue
sourceDirs = p.source_dirs()
batch = Batch(join(p.dir, '.settings'), p.javaCompliance)
if not exists(batch.path):
if _opts.verbose:
log('[no Eclipse Code Formatter preferences at {0} - skipping]'.format(batch.path))
continue
for sourceDir in sourceDirs:
for root, _, files in os.walk(sourceDir):
for f in [join(root, name) for name in files if name.endswith('.java')]:
batch.javafiles.append(FileInfo(f))
if len(batch.javafiles) == 0:
logv('[no Java sources in {0} - skipping]'.format(p.name))
continue
res = batches.setdefault(batch.settings(), batch)
if res is not batch:
res.javafiles = res.javafiles + batch.javafiles
log("we have: " + str(len(batches)) + " batches")
for batch in batches.itervalues():
for chunk in _chunk_files_for_command_line(batch.javafiles, pathFunction=lambda f: f.path):
run([args.eclipse_exe,
'-nosplash',
'-application',
'org.eclipse.jdt.core.JavaCodeFormatter',
'-vm', java(batch.javaCompliance).java,
'-config', batch.path]
+ [f.path for f in chunk])
for fi in chunk:
if fi.update(batch.removeTrailingWhitespace):
modified.append(fi)
log('{0} files were modified'.format(len(modified)))
if len(modified) != 0:
arcbase = _primary_suite.dir
if args.backup:
backup = os.path.abspath('eclipseformat.backup.zip')
zf = zipfile.ZipFile(backup, 'w', zipfile.ZIP_DEFLATED)
for fi in modified:
name = os.path.relpath(fi.path, arcbase)
log(' - {0}'.format(name))
log('Changes:')
log(''.join(fi.diff))
if args.backup:
arcname = name.replace(os.sep, '/')
zf.writestr(arcname, fi.content)
if args.backup:
zf.close()
log('Wrote backup of {0} modified files to {1}'.format(len(modified), backup))
return 1
return 0
def processorjars():
for s in suites(True):
_processorjars_suite(s)
def _processorjars_suite(s):
projs = [p for p in s.projects if p.definedAnnotationProcessors is not None]
if len(projs) <= 0:
return []
pnames = [p.name for p in projs]
build(['--jdt-warning-as-error', '--projects', ",".join(pnames)])
return [p.definedAnnotationProcessorsDist.path for p in s.projects if p.definedAnnotationProcessorsDist is not None]
def pylint(args):
"""run pylint (if available) over Python source files (found by 'hg locate' or by tree walk with -walk)"""
parser = ArgumentParser(prog='mx pylint')
parser.add_argument('--walk', action='store_true', help='use tree walk find .py files')
args = parser.parse_args(args)
rcfile = join(dirname(__file__), '.pylintrc')
if not exists(rcfile):
log('pylint configuration file does not exist: ' + rcfile)
return
try:
output = subprocess.check_output(['pylint', '--version'], stderr=subprocess.STDOUT)
m = re.match(r'.*pylint (\d+)\.(\d+)\.(\d+).*', output, re.DOTALL)
if not m:
log('could not determine pylint version from ' + output)
return
major, minor, micro = (int(m.group(1)), int(m.group(2)), int(m.group(3)))
if major != 1 or minor != 1:
log('require pylint version = 1.1.x (got {0}.{1}.{2})'.format(major, minor, micro))
return
except BaseException:
log('pylint is not available')
return
def findfiles_by_walk():
result = []
for suite in suites(True):
for root, dirs, files in os.walk(suite.dir):
for f in files:
if f.endswith('.py'):
pyfile = join(root, f)
result.append(pyfile)
if 'bin' in dirs:
dirs.remove('bin')
if 'lib' in dirs:
# avoids downloaded .py files
dirs.remove('lib')
return result
def findfiles_by_hg():
result = []
for suite in suites(True):
versioned = subprocess.check_output(['hg', 'locate', '-f'], stderr=subprocess.STDOUT, cwd=suite.dir).split(os.linesep)
for f in versioned:
if f.endswith('.py') and exists(f):
result.append(f)
return result
# Perhaps we should just look in suite.mxDir directories for .py files?
if args.walk:
pyfiles = findfiles_by_walk()
else:
pyfiles = findfiles_by_hg()
env = os.environ.copy()
pythonpath = dirname(__file__)
for suite in suites(True):
pythonpath = os.pathsep.join([pythonpath, suite.mxDir])
env['PYTHONPATH'] = pythonpath
for pyfile in pyfiles:
log('Running pylint on ' + pyfile + '...')
run(['pylint', '--reports=n', '--rcfile=' + rcfile, pyfile], env=env)
"""
Utility for creating and updating a zip file atomically.
"""
class Archiver:
def __init__(self, path):
self.path = path
def __enter__(self):
if self.path:
if not isdir(dirname(self.path)):
os.makedirs(dirname(self.path))
fd, tmp = tempfile.mkstemp(suffix='', prefix=basename(self.path) + '.', dir=dirname(self.path))
self.tmpFd = fd
self.tmpPath = tmp
self.zf = zipfile.ZipFile(tmp, 'w')
else:
self.tmpFd = None
self.tmpPath = None
self.zf = None
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.zf:
self.zf.close()
os.close(self.tmpFd)
# Correct the permissions on the temporary file which is created with restrictive permissions
os.chmod(self.tmpPath, 0o666 & ~currentUmask)
# Atomic on Unix
shutil.move(self.tmpPath, self.path)
def _archive(args):
archive(args)
return 0
def archive(args):
"""create jar files for projects and distributions"""
parser = ArgumentParser(prog='mx archive')
parser.add_argument('names', nargs=REMAINDER, metavar='[<project>|@<distribution>]...')
args = parser.parse_args(args)
archives = []
for name in args.names:
if name.startswith('@'):
dname = name[1:]
d = distribution(dname)
d.make_archive()
archives.append(d.path)
else:
p = project(name)
archives.append(p.make_archive())
logv("generated archives: " + str(archives))
return archives
def canonicalizeprojects(args):
"""check all project specifications for canonical dependencies
The exit code of this command reflects how many projects have non-canonical dependencies."""
nonCanonical = []
for s in suites(True):
for p in s.projects:
for pkg in p.defined_java_packages():
if not pkg.startswith(p.name):
abort('package in {0} does not have prefix matching project name: {1}'.format(p, pkg))
ignoredDeps = set([name for name in p.deps if project(name, False) is not None])
for pkg in p.imported_java_packages():
for name in p.deps:
dep = project(name, False)
if dep is None:
ignoredDeps.discard(name)
else:
if pkg in dep.defined_java_packages():
ignoredDeps.discard(name)
if pkg in dep.extended_java_packages():
ignoredDeps.discard(name)
if len(ignoredDeps) != 0:
candidates = set()
# Compute dependencies based on projects required by p
for d in sorted_deps():
if not d.defined_java_packages().isdisjoint(p.imported_java_packages()):
candidates.add(d)
# Remove non-canonical candidates
for c in list(candidates):
candidates.difference_update(c.all_deps([], False, False))
candidates = [d.name for d in candidates]
abort('{0} does not use any packages defined in these projects: {1}\nComputed project dependencies: {2}'.format(
p, ', '.join(ignoredDeps), ','.join(candidates)))
excess = frozenset(p.deps) - set(p.canonical_deps())
if len(excess) != 0:
nonCanonical.append(p)
if len(nonCanonical) != 0:
for p in nonCanonical:
canonicalDeps = p.canonical_deps()
if len(canonicalDeps) != 0:
log('Canonical dependencies for project ' + p.name + ' are: [')
for d in canonicalDeps:
log(' "' + d + '",')
log(' ],')
else:
log('Canonical dependencies for project ' + p.name + ' are: []')
return len(nonCanonical)
class TimeStampFile:
def __init__(self, path):
self.path = path
self.timestamp = os.path.getmtime(path) if exists(path) else None
def isOlderThan(self, arg):
if not self.timestamp:
return True
if isinstance(arg, TimeStampFile):
if arg.timestamp is None:
return False
else:
return arg.timestamp > self.timestamp
elif isinstance(arg, types.ListType):
files = arg
else:
files = [arg]
for f in files:
if os.path.getmtime(f) > self.timestamp:
return True
return False
def exists(self):
return exists(self.path)
def touch(self):
if exists(self.path):
os.utime(self.path, None)
else:
if not isdir(dirname(self.path)):
os.makedirs(dirname(self.path))
file(self.path, 'a')
def checkstyle(args):
"""run Checkstyle on the Java sources
Run Checkstyle over the Java sources. Any errors or warnings
produced by Checkstyle result in a non-zero exit code."""
parser = ArgumentParser(prog='mx checkstyle')
parser.add_argument('-f', action='store_true', dest='force', help='force checking (disables timestamp checking)')
args = parser.parse_args(args)
totalErrors = 0
for p in projects_opt_limit_to_suites():
if p.native:
continue
sourceDirs = p.source_dirs()
config = join(project(p.checkstyleProj).dir, '.checkstyle_checks.xml')
if not exists(config):
logv('[No Checkstyle configuration foudn for {0} - skipping]'.format(p))
continue
# skip checking this Java project if its Java compliance level is "higher" than the configured JDK
jdk = java(p.javaCompliance)
assert jdk
for sourceDir in sourceDirs:
javafilelist = []
for root, _, files in os.walk(sourceDir):
javafilelist += [join(root, name) for name in files if name.endswith('.java') and name != 'package-info.java']
if len(javafilelist) == 0:
logv('[no Java sources in {0} - skipping]'.format(sourceDir))
continue
timestamp = TimeStampFile(join(p.suite.mxDir, 'checkstyle-timestamps', sourceDir[len(p.suite.dir) + 1:].replace(os.sep, '_') + '.timestamp'))
mustCheck = False
if not args.force and timestamp.exists():
mustCheck = timestamp.isOlderThan(javafilelist)
else:
mustCheck = True
if not mustCheck:
if _opts.verbose:
log('[all Java sources in {0} already checked - skipping]'.format(sourceDir))
continue
exclude = join(p.dir, '.checkstyle.exclude')
if exists(exclude):
with open(exclude) as f:
# Convert patterns to OS separators
patterns = [name.rstrip().replace('/', os.sep) for name in f.readlines()]
def match(name):
for p in patterns:
if p in name:
if _opts.verbose:
log('excluding: ' + name)
return True
return False
javafilelist = [name for name in javafilelist if not match(name)]
auditfileName = join(p.dir, 'checkstyleOutput.txt')
log('Running Checkstyle on {0} using {1}...'.format(sourceDir, config))
try:
for chunk in _chunk_files_for_command_line(javafilelist):
try:
run_java(['-Xmx1g', '-jar', library('CHECKSTYLE').get_path(True), '-f', 'xml', '-c', config, '-o', auditfileName] + chunk, nonZeroIsFatal=False)
finally:
if exists(auditfileName):
errors = []
source = [None]
def start_element(name, attrs):
if name == 'file':
source[0] = attrs['name']
elif name == 'error':
errors.append('{0}:{1}: {2}'.format(source[0], attrs['line'], attrs['message']))
xp = xml.parsers.expat.ParserCreate()
xp.StartElementHandler = start_element
with open(auditfileName) as fp:
xp.ParseFile(fp)
if len(errors) != 0:
map(log, errors)
totalErrors = totalErrors + len(errors)
else:
timestamp.touch()
finally:
if exists(auditfileName):
os.unlink(auditfileName)
return totalErrors
def clean(args, parser=None):
"""remove all class files, images, and executables
Removes all files created by a build, including Java class files, executables, and
generated images.
"""
suppliedParser = parser is not None
parser = parser if suppliedParser else ArgumentParser(prog='mx clean')
parser.add_argument('--no-native', action='store_false', dest='native', help='do not clean native projects')
parser.add_argument('--no-java', action='store_false', dest='java', help='do not clean Java projects')
parser.add_argument('--no-dist', action='store_false', dest='dist', help='do not delete distributions')
args = parser.parse_args(args)
def _rmtree(dirPath):
path = dirPath
if get_os() == 'windows':
path = unicode("\\\\?\\" + dirPath)
shutil.rmtree(path)
def _rmIfExists(name):
if name and os.path.isfile(name):
os.unlink(name)
for p in projects_opt_limit_to_suites():
if p.native:
if args.native:
run([gmake_cmd(), '-C', p.dir, 'clean'])
else:
if args.java:
genDir = p.source_gen_dir()
if genDir != '' and exists(genDir):
log('Clearing {0}...'.format(genDir))
for f in os.listdir(genDir):
_rmtree(join(genDir, f))
outputDir = p.output_dir()
if outputDir != '' and exists(outputDir):
log('Removing {0}...'.format(outputDir))
_rmtree(outputDir)
for configName in ['netbeans-config.zip', 'eclipse-config.zip']:
config = TimeStampFile(join(p.suite.mxDir, configName))
if config.exists():
os.unlink(config.path)
if args.java:
if args.dist:
for d in _dists.keys():
log('Removing distribution {0}...'.format(d))
_rmIfExists(distribution(d).path)
_rmIfExists(distribution(d).sourcesPath)
if suppliedParser:
return args
def about(args):
"""show the 'man page' for mx"""
print __doc__
def help_(args):
"""show help for a given command
With no arguments, print a list of commands and short help for each command.
Given a command name, print help for that command."""
if len(args) == 0:
_argParser.print_help()
return
name = args[0]
if not _commands.has_key(name):
hits = [c for c in _commands.iterkeys() if c.startswith(name)]
if len(hits) == 1:
name = hits[0]
elif len(hits) == 0:
abort('mx: unknown command \'{0}\'\n{1}use "mx help" for more options'.format(name, _format_commands()))
else:
abort('mx: command \'{0}\' is ambiguous\n {1}'.format(name, ' '.join(hits)))
value = _commands[name]
(func, usage) = value[:2]
doc = func.__doc__
if len(value) > 2:
docArgs = value[2:]
fmtArgs = []
for d in docArgs:
if isinstance(d, Callable):
fmtArgs += [d()]
else:
fmtArgs += [str(d)]
doc = doc.format(*fmtArgs)
print 'mx {0} {1}\n\n{2}\n'.format(name, usage, doc)
def projectgraph(args, suite=None):
"""create graph for project structure ("mx projectgraph | dot -Tpdf -oprojects.pdf" or "mx projectgraph --igv")"""
parser = ArgumentParser(prog='mx projectgraph')
parser.add_argument('--igv', action='store_true', help='output to IGV listening on 127.0.0.1:4444')
parser.add_argument('--igv-format', action='store_true', help='output graph in IGV format')
args = parser.parse_args(args)
if args.igv or args.igv_format:
ids = {}
nextToIndex = {}
igv = XMLDoc()
igv.open('graphDocument')
igv.open('group')
igv.open('properties')
igv.element('p', {'name' : 'name'}, 'GraalProjectDependencies')
igv.close('properties')
igv.open('graph', {'name' : 'dependencies'})
igv.open('nodes')
for p in sorted_deps(includeLibs=True, includeJreLibs=True):
ident = len(ids)
ids[p.name] = str(ident)
igv.open('node', {'id' : str(ident)})
igv.open('properties')
igv.element('p', {'name' : 'name'}, p.name)
igv.close('properties')
igv.close('node')
igv.close('nodes')
igv.open('edges')
for p in projects():
fromIndex = 0
for dep in p.canonical_deps():
toIndex = nextToIndex.get(dep, 0)
nextToIndex[dep] = toIndex + 1
igv.element('edge', {'from' : ids[p.name], 'fromIndex' : str(fromIndex), 'to' : ids[dep], 'toIndex' : str(toIndex), 'label' : 'dependsOn'})
fromIndex = fromIndex + 1
igv.close('edges')
igv.close('graph')
igv.close('group')
igv.close('graphDocument')
if args.igv:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', 4444))
s.send(igv.xml())
else:
print igv.xml(indent=' ', newl='\n')
return
print 'digraph projects {'
print 'rankdir=BT;'
print 'node [shape=rect];'
for p in projects():
for dep in p.canonical_deps():
print '"' + p.name + '"->"' + dep + '";'
if hasattr(p, '_declaredAnnotationProcessors'):
for ap in p._declaredAnnotationProcessors:
print '"' + p.name + '"->"' + ap + '" [style="dashed"];'
print '}'
def _source_locator_memento(deps):
slm = XMLDoc()
slm.open('sourceLookupDirector')
slm.open('sourceContainers', {'duplicates' : 'false'})
javaCompliance = None
for dep in deps:
if dep.isLibrary():
if hasattr(dep, 'eclipse.container'):
memento = XMLDoc().element('classpathContainer', {'path' : getattr(dep, 'eclipse.container')}).xml(standalone='no')
slm.element('classpathContainer', {'memento' : memento, 'typeId':'org.eclipse.jdt.launching.sourceContainer.classpathContainer'})
elif dep.get_source_path(resolve=True):
memento = XMLDoc().element('archive', {'detectRoot' : 'true', 'path' : dep.get_source_path(resolve=True)}).xml(standalone='no')
slm.element('container', {'memento' : memento, 'typeId':'org.eclipse.debug.core.containerType.externalArchive'})
elif dep.isProject():
memento = XMLDoc().element('javaProject', {'name' : dep.name}).xml(standalone='no')
slm.element('container', {'memento' : memento, 'typeId':'org.eclipse.jdt.launching.sourceContainer.javaProject'})
if javaCompliance is None or dep.javaCompliance > javaCompliance:
javaCompliance = dep.javaCompliance
if javaCompliance:
memento = XMLDoc().element('classpathContainer', {'path' : 'org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-' + str(javaCompliance)}).xml(standalone='no')
slm.element('classpathContainer', {'memento' : memento, 'typeId':'org.eclipse.jdt.launching.sourceContainer.classpathContainer'})
else:
memento = XMLDoc().element('classpathContainer', {'path' : 'org.eclipse.jdt.launching.JRE_CONTAINER'}).xml(standalone='no')
slm.element('classpathContainer', {'memento' : memento, 'typeId':'org.eclipse.jdt.launching.sourceContainer.classpathContainer'})
slm.close('sourceContainers')
slm.close('sourceLookupDirector')
return slm
def make_eclipse_attach(suite, hostname, port, name=None, deps=None):
"""
Creates an Eclipse launch configuration file for attaching to a Java process.
"""
if deps is None:
deps = []
slm = _source_locator_memento(deps)
launch = XMLDoc()
launch.open('launchConfiguration', {'type' : 'org.eclipse.jdt.launching.remoteJavaApplication'})
launch.element('stringAttribute', {'key' : 'org.eclipse.debug.core.source_locator_id', 'value' : 'org.eclipse.jdt.launching.sourceLocator.JavaSourceLookupDirector'})
launch.element('stringAttribute', {'key' : 'org.eclipse.debug.core.source_locator_memento', 'value' : '%s'})
launch.element('booleanAttribute', {'key' : 'org.eclipse.jdt.launching.ALLOW_TERMINATE', 'value' : 'true'})
launch.open('mapAttribute', {'key' : 'org.eclipse.jdt.launching.CONNECT_MAP'})
launch.element('mapEntry', {'key' : 'hostname', 'value' : hostname})
launch.element('mapEntry', {'key' : 'port', 'value' : port})
launch.close('mapAttribute')
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.PROJECT_ATTR', 'value' : ''})
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.VM_CONNECTOR_ID', 'value' : 'org.eclipse.jdt.launching.socketAttachConnector'})
launch.close('launchConfiguration')
launch = launch.xml(newl='\n', standalone='no') % slm.xml(escape=True, standalone='no')
if name is None:
if len(suites()) == 1:
suitePrefix = ''
else:
suitePrefix = suite.name + '-'
name = suitePrefix + 'attach-' + hostname + '-' + port
eclipseLaunches = join(suite.mxDir, 'eclipse-launches')
if not exists(eclipseLaunches):
os.makedirs(eclipseLaunches)
launchFile = join(eclipseLaunches, name + '.launch')
return update_file(launchFile, launch), launchFile
def make_eclipse_launch(javaArgs, jre, name=None, deps=None):
"""
Creates an Eclipse launch configuration file for running/debugging a Java command.
"""
if deps is None:
deps = []
mainClass = None
vmArgs = []
appArgs = []
cp = None
argsCopy = list(reversed(javaArgs))
while len(argsCopy) != 0:
a = argsCopy.pop()
if a == '-jar':
mainClass = '-jar'
appArgs = list(reversed(argsCopy))
break
if a == '-cp' or a == '-classpath':
assert len(argsCopy) != 0
cp = argsCopy.pop()
vmArgs.append(a)
vmArgs.append(cp)
elif a.startswith('-'):
vmArgs.append(a)
else:
mainClass = a
appArgs = list(reversed(argsCopy))
break
if mainClass is None:
log('Cannot create Eclipse launch configuration without main class or jar file: java ' + ' '.join(javaArgs))
return False
if name is None:
if mainClass == '-jar':
name = basename(appArgs[0])
if len(appArgs) > 1 and not appArgs[1].startswith('-'):
name = name + '_' + appArgs[1]
else:
name = mainClass
name = time.strftime('%Y-%m-%d-%H%M%S_' + name)
if cp is not None:
for e in cp.split(os.pathsep):
for s in suites():
deps += [p for p in s.projects if e == p.output_dir()]
deps += [l for l in s.libs if e == l.get_path(False)]
slm = _source_locator_memento(deps)
launch = XMLDoc()
launch.open('launchConfiguration', {'type' : 'org.eclipse.jdt.launching.localJavaApplication'})
launch.element('stringAttribute', {'key' : 'org.eclipse.debug.core.source_locator_id', 'value' : 'org.eclipse.jdt.launching.sourceLocator.JavaSourceLookupDirector'})
launch.element('stringAttribute', {'key' : 'org.eclipse.debug.core.source_locator_memento', 'value' : '%s'})
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.JRE_CONTAINER', 'value' : 'org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/' + jre})
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.MAIN_TYPE', 'value' : mainClass})
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.PROGRAM_ARGUMENTS', 'value' : ' '.join(appArgs)})
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.PROJECT_ATTR', 'value' : ''})
launch.element('stringAttribute', {'key' : 'org.eclipse.jdt.launching.VM_ARGUMENTS', 'value' : ' '.join(vmArgs)})
launch.close('launchConfiguration')
launch = launch.xml(newl='\n', standalone='no') % slm.xml(escape=True, standalone='no')
eclipseLaunches = join('mx', 'eclipse-launches')
if not exists(eclipseLaunches):
os.makedirs(eclipseLaunches)
return update_file(join(eclipseLaunches, name + '.launch'), launch)
def eclipseinit(args, buildProcessorJars=True, refreshOnly=False):
"""(re)generate Eclipse project configurations and working sets"""
for s in suites(True):
_eclipseinit_suite(args, s, buildProcessorJars, refreshOnly)
generate_eclipse_workingsets()
def _check_ide_timestamp(suite, configZip, ide):
"""return True if and only if the projects file, eclipse-settings files, and mx itself are all older than configZip"""
suitePyFiles = [join(suite.mxDir, e) for e in os.listdir(suite.mxDir) if e.startswith('suite') and e.endswith('.py')]
if configZip.isOlderThan(suitePyFiles):
return False
# Assume that any mx change might imply changes to the generated IDE files
if configZip.isOlderThan(__file__):
return False
if ide == 'eclipse':
eclipseSettingsDir = join(suite.mxDir, 'eclipse-settings')
if exists(eclipseSettingsDir):
for name in os.listdir(eclipseSettingsDir):
path = join(eclipseSettingsDir, name)
if configZip.isOlderThan(path):
return False
return True
def _eclipseinit_project(p, files=None, libFiles=None):
assert java(p.javaCompliance)
if not exists(p.dir):
os.makedirs(p.dir)
out = XMLDoc()
out.open('classpath')
for src in p.srcDirs:
srcDir = join(p.dir, src)
if not exists(srcDir):
os.mkdir(srcDir)
out.element('classpathentry', {'kind' : 'src', 'path' : src})
processorPath = p.annotation_processors_path()
if processorPath:
genDir = p.source_gen_dir()
if not exists(genDir):
os.mkdir(genDir)
out.open('classpathentry', {'kind' : 'src', 'path' : 'src_gen'})
if p.uses_annotation_processor_library():
# ignore warnings produced by third-party annotation processors
out.open('attributes')
out.element('attribute', {'name' : 'ignore_optional_problems', 'value' : 'true'})
out.close('attributes')
out.close('classpathentry')
if files:
files.append(genDir)
# Every Java program depends on a JRE
out.element('classpathentry', {'kind' : 'con', 'path' : 'org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-' + str(p.javaCompliance)})
if exists(join(p.dir, 'plugin.xml')): # eclipse plugin project
out.element('classpathentry', {'kind' : 'con', 'path' : 'org.eclipse.pde.core.requiredPlugins'})
containerDeps = set()
libraryDeps = set()
projectDeps = set()
for dep in p.all_deps([], True):
if dep == p:
continue
if dep.isLibrary():
if hasattr(dep, 'eclipse.container'):
container = getattr(dep, 'eclipse.container')
containerDeps.add(container)
libraryDeps -= set(dep.all_deps([], True))
else:
libraryDeps.add(dep)
elif dep.isProject():
projectDeps.add(dep)
for dep in sorted(containerDeps):
out.element('classpathentry', {'exported' : 'true', 'kind' : 'con', 'path' : dep})
for dep in sorted(libraryDeps):
path = dep.path
dep.get_path(resolve=True)
# Relative paths for "lib" class path entries have various semantics depending on the Eclipse
# version being used (e.g. see https://bugs.eclipse.org/bugs/show_bug.cgi?id=274737) so it's
# safest to simply use absolute paths.
path = _make_absolute(path, p.suite.dir)
attributes = {'exported' : 'true', 'kind' : 'lib', 'path' : path}
sourcePath = dep.get_source_path(resolve=True)
if sourcePath is not None:
attributes['sourcepath'] = sourcePath
out.element('classpathentry', attributes)
if libFiles:
libFiles.append(path)
for dep in sorted(projectDeps):
out.element('classpathentry', {'combineaccessrules' : 'false', 'exported' : 'true', 'kind' : 'src', 'path' : '/' + dep.name})
out.element('classpathentry', {'kind' : 'output', 'path' : getattr(p, 'eclipse.output', 'bin')})
out.close('classpath')
classpathFile = join(p.dir, '.classpath')
update_file(classpathFile, out.xml(indent='\t', newl='\n'))
if files:
files.append(classpathFile)
csConfig = join(project(p.checkstyleProj).dir, '.checkstyle_checks.xml')
if exists(csConfig):
out = XMLDoc()
dotCheckstyle = join(p.dir, ".checkstyle")
checkstyleConfigPath = '/' + p.checkstyleProj + '/.checkstyle_checks.xml'
out.open('fileset-config', {'file-format-version' : '1.2.0', 'simple-config' : 'true'})
out.open('local-check-config', {'name' : 'Checks', 'location' : checkstyleConfigPath, 'type' : 'project', 'description' : ''})
out.element('additional-data', {'name' : 'protect-config-file', 'value' : 'false'})
out.close('local-check-config')
out.open('fileset', {'name' : 'all', 'enabled' : 'true', 'check-config-name' : 'Checks', 'local' : 'true'})
out.element('file-match-pattern', {'match-pattern' : '.', 'include-pattern' : 'true'})
out.close('fileset')
out.open('filter', {'name' : 'all', 'enabled' : 'true', 'check-config-name' : 'Checks', 'local' : 'true'})
out.element('filter-data', {'value' : 'java'})
out.close('filter')
exclude = join(p.dir, '.checkstyle.exclude')
if exists(exclude):
out.open('filter', {'name' : 'FilesFromPackage', 'enabled' : 'true'})
with open(exclude) as f:
for line in f:
if not line.startswith('#'):
line = line.strip()
exclDir = join(p.dir, line)
assert isdir(exclDir), 'excluded source directory listed in ' + exclude + ' does not exist or is not a directory: ' + exclDir
out.element('filter-data', {'value' : line})
out.close('filter')
out.close('fileset-config')
update_file(dotCheckstyle, out.xml(indent=' ', newl='\n'))
if files:
files.append(dotCheckstyle)
else:
# clean up existing .checkstyle file
dotCheckstyle = join(p.dir, ".checkstyle")
if exists(dotCheckstyle):
os.unlink(dotCheckstyle)
out = XMLDoc()
out.open('projectDescription')
out.element('name', data=p.name)
out.element('comment', data='')
out.element('projects', data='')
out.open('buildSpec')
out.open('buildCommand')
out.element('name', data='org.eclipse.jdt.core.javabuilder')
out.element('arguments', data='')
out.close('buildCommand')
if exists(csConfig):
out.open('buildCommand')
out.element('name', data='net.sf.eclipsecs.core.CheckstyleBuilder')
out.element('arguments', data='')
out.close('buildCommand')
if exists(join(p.dir, 'plugin.xml')): # eclipse plugin project
for buildCommand in ['org.eclipse.pde.ManifestBuilder', 'org.eclipse.pde.SchemaBuilder']:
out.open('buildCommand')
out.element('name', data=buildCommand)
out.element('arguments', data='')
out.close('buildCommand')
if p.definedAnnotationProcessorsDist:
# Create a launcher that will (re)build the annotation processor
# jar any time one of its sources is modified.
dist = p.definedAnnotationProcessorsDist
distProjects = [d for d in dist.sorted_deps(transitive=True) if d.isProject()]
relevantResources = []
for p in distProjects:
for srcDir in p.source_dirs():
relevantResources.append(join(p.name, os.path.relpath(srcDir, p.dir)))
relevantResources.append(join(p.name, os.path.relpath(p.output_dir(), p.dir)))
# The path should always be p.name/dir independent of where the workspace actually is.
# So we use the parent folder of the project, whatever that is, to generate such a relative path.
logicalWorkspaceRoot = os.path.dirname(p.dir)
refreshFile = os.path.relpath(p.definedAnnotationProcessorsDist.path, logicalWorkspaceRoot)
_genEclipseBuilder(out, p, 'CreateAnnotationProcessorJar', 'archive @' + dist.name, refresh=True, refreshFile=refreshFile, relevantResources=relevantResources, async=True, xmlIndent='', xmlStandalone='no')
out.close('buildSpec')
out.open('natures')
out.element('nature', data='org.eclipse.jdt.core.javanature')
if exists(csConfig):
out.element('nature', data='net.sf.eclipsecs.core.CheckstyleNature')
if exists(join(p.dir, 'plugin.xml')): # eclipse plugin project
out.element('nature', data='org.eclipse.pde.PluginNature')
out.close('natures')
out.close('projectDescription')
projectFile = join(p.dir, '.project')
update_file(projectFile, out.xml(indent='\t', newl='\n'))
if files:
files.append(projectFile)
settingsDir = join(p.dir, ".settings")
if not exists(settingsDir):
os.mkdir(settingsDir)
# collect the defaults from mxtool
defaultEclipseSettingsDir = join(dirname(__file__), 'eclipse-settings')
esdict = {}
if exists(defaultEclipseSettingsDir):
for name in os.listdir(defaultEclipseSettingsDir):
if isfile(join(defaultEclipseSettingsDir, name)):
esdict[name] = os.path.abspath(join(defaultEclipseSettingsDir, name))
# check for suite overrides
eclipseSettingsDir = join(p.suite.mxDir, 'eclipse-settings')
if exists(eclipseSettingsDir):
for name in os.listdir(eclipseSettingsDir):
if isfile(join(eclipseSettingsDir, name)):
esdict[name] = os.path.abspath(join(eclipseSettingsDir, name))
# check for project overrides
projectSettingsDir = join(p.dir, 'eclipse-settings')
if exists(projectSettingsDir):
for name in os.listdir(projectSettingsDir):
if isfile(join(projectSettingsDir, name)):
esdict[name] = os.path.abspath(join(projectSettingsDir, name))
# copy a possibly modified file to the project's .settings directory
for name, path in esdict.iteritems():
# ignore this file altogether if this project has no annotation processors
if name == "org.eclipse.jdt.apt.core.prefs" and not processorPath:
continue
with open(path) as f:
content = f.read()
content = content.replace('${javaCompliance}', str(p.javaCompliance))
if processorPath:
content = content.replace('org.eclipse.jdt.core.compiler.processAnnotations=disabled', 'org.eclipse.jdt.core.compiler.processAnnotations=enabled')
update_file(join(settingsDir, name), content)
if files:
files.append(join(settingsDir, name))
if processorPath:
out = XMLDoc()
out.open('factorypath')
out.element('factorypathentry', {'kind' : 'PLUGIN', 'id' : 'org.eclipse.jst.ws.annotations.core', 'enabled' : 'true', 'runInBatchMode' : 'false'})
for e in processorPath.split(os.pathsep):
out.element('factorypathentry', {'kind' : 'EXTJAR', 'id' : e, 'enabled' : 'true', 'runInBatchMode' : 'false'})
out.close('factorypath')
update_file(join(p.dir, '.factorypath'), out.xml(indent='\t', newl='\n'))
if files:
files.append(join(p.dir, '.factorypath'))
def _eclipseinit_suite(args, suite, buildProcessorJars=True, refreshOnly=False):
configZip = TimeStampFile(join(suite.mxDir, 'eclipse-config.zip'))
configLibsZip = join(suite.mxDir, 'eclipse-config-libs.zip')
if refreshOnly and not configZip.exists():
return
if _check_ide_timestamp(suite, configZip, 'eclipse'):
logv('[Eclipse configurations are up to date - skipping]')
return
files = []
libFiles = []
if buildProcessorJars:
files += _processorjars_suite(suite)
for p in suite.projects:
if p.native:
continue
_eclipseinit_project(p, files, libFiles)
_, launchFile = make_eclipse_attach(suite, 'localhost', '8000', deps=sorted_deps(projectNames=None, includeLibs=True))
files.append(launchFile)
# Create an Eclipse project for each distribution that will create/update the archive
# for the distribution whenever any (transitively) dependent project of the
# distribution is updated.
for dist in suite.dists:
projectDir = dist.get_ide_project_dir()
if not projectDir:
continue
if not exists(projectDir):
os.makedirs(projectDir)
distProjects = [d for d in dist.sorted_deps(transitive=True) if d.isProject()]
relevantResources = []
for p in distProjects:
for srcDir in p.source_dirs():
relevantResources.append(join(p.name, os.path.relpath(srcDir, p.dir)))
relevantResources.append(join(p.name, os.path.relpath(p.output_dir(), p.dir)))
out = XMLDoc()
out.open('projectDescription')
out.element('name', data=dist.name)
out.element('comment', data='Updates ' + dist.path + ' if a project dependency of ' + dist.name + ' is updated')
out.open('projects')
for p in distProjects:
out.element('project', data=p.name)
for d in dist.distDependencies:
out.element('project', data=d)
out.close('projects')
out.open('buildSpec')
dist.dir = projectDir
dist.javaCompliance = max([p.javaCompliance for p in distProjects])
_genEclipseBuilder(out, dist, 'Create' + dist.name + 'Dist', 'archive @' + dist.name, relevantResources=relevantResources, logToFile=True, refresh=False, async=True)
out.close('buildSpec')
out.open('natures')
out.element('nature', data='org.eclipse.jdt.core.javanature')
out.close('natures')
out.close('projectDescription')
projectFile = join(projectDir, '.project')
update_file(projectFile, out.xml(indent='\t', newl='\n'))
files.append(projectFile)
_zip_files(files, suite.dir, configZip.path)
_zip_files(libFiles, suite.dir, configLibsZip)
def _zip_files(files, baseDir, zipPath):
fd, tmp = tempfile.mkstemp(suffix='', prefix=basename(zipPath), dir=baseDir)
try:
zf = zipfile.ZipFile(tmp, 'w')
for f in sorted(set(files)):
relpath = os.path.relpath(f, baseDir)
arcname = relpath.replace(os.sep, '/')
zf.write(f, arcname)
zf.close()
os.close(fd)
# Atomic on Unix
shutil.move(tmp, zipPath)
# Correct the permissions on the temporary file which is created with restrictive permissions
os.chmod(zipPath, 0o666 & ~currentUmask)
finally:
if exists(tmp):
os.remove(tmp)
def _genEclipseBuilder(dotProjectDoc, p, name, mxCommand, refresh=True, refreshFile=None, relevantResources=None, async=False, logToConsole=False, logToFile=False, appendToLogFile=True, xmlIndent='\t', xmlStandalone=None):
externalToolDir = join(p.dir, '.externalToolBuilders')
launchOut = XMLDoc()
consoleOn = 'true' if logToConsole else 'false'
launchOut.open('launchConfiguration', {'type' : 'org.eclipse.ui.externaltools.ProgramBuilderLaunchConfigurationType'})
launchOut.element('booleanAttribute', {'key' : 'org.eclipse.debug.core.capture_output', 'value': consoleOn})
launchOut.open('mapAttribute', {'key' : 'org.eclipse.debug.core.environmentVariables'})
launchOut.element('mapEntry', {'key' : 'JAVA_HOME', 'value' : _opts.java_home})
launchOut.element('mapEntry', {'key' : 'EXTRA_JAVA_HOMES', 'value' : _opts.extra_java_homes})
launchOut.close('mapAttribute')
if refresh:
if refreshFile is None:
refreshScope = '${project}'
else:
refreshScope = '${working_set:<?xml version="1.0" encoding="UTF-8"?><resources><item path="' + refreshFile + '" type="1"/></resources>}'
launchOut.element('booleanAttribute', {'key' : 'org.eclipse.debug.core.ATTR_REFRESH_RECURSIVE', 'value': 'false'})
launchOut.element('stringAttribute', {'key' : 'org.eclipse.debug.core.ATTR_REFRESH_SCOPE', 'value': refreshScope})
if relevantResources is not None:
resources = '${working_set:<?xml version="1.0" encoding="UTF-8"?><resources>'
for relevantResource in relevantResources:
resources += '<item path="' + relevantResource + '" type="2" />'
resources += '</resources>}'
launchOut.element('stringAttribute', {'key' : 'org.eclipse.ui.externaltools.ATTR_BUILD_SCOPE', 'value': resources})
launchOut.element('booleanAttribute', {'key' : 'org.eclipse.debug.ui.ATTR_CONSOLE_OUTPUT_ON', 'value': consoleOn})
launchOut.element('booleanAttribute', {'key' : 'org.eclipse.debug.ui.ATTR_LAUNCH_IN_BACKGROUND', 'value': 'true' if async else 'false'})
if logToFile:
logFile = join(externalToolDir, name + '.log')
launchOut.element('stringAttribute', {'key' : 'org.eclipse.debug.ui.ATTR_CAPTURE_IN_FILE', 'value': logFile})
launchOut.element('booleanAttribute', {'key' : 'org.eclipse.debug.ui.ATTR_APPEND_TO_FILE', 'value': 'true' if appendToLogFile else 'false'})
# expect to find the OS command to invoke mx in the same directory
baseDir = dirname(os.path.abspath(__file__))
cmd = 'mx.sh'
if get_os() == 'windows':
cmd = 'mx.cmd'
cmdPath = join(baseDir, cmd)
if not os.path.exists(cmdPath):
# backwards compatibility for when the commands lived in parent of mxtool
cmdPath = join(dirname(baseDir), cmd)
if not os.path.exists(cmdPath):
abort('cannot locate ' + cmd)
launchOut.element('stringAttribute', {'key' : 'org.eclipse.ui.externaltools.ATTR_LOCATION', 'value': cmdPath})
launchOut.element('stringAttribute', {'key' : 'org.eclipse.ui.externaltools.ATTR_RUN_BUILD_KINDS', 'value': 'full,incremental,auto,'})
launchOut.element('stringAttribute', {'key' : 'org.eclipse.ui.externaltools.ATTR_TOOL_ARGUMENTS', 'value': mxCommand})
launchOut.element('booleanAttribute', {'key' : 'org.eclipse.ui.externaltools.ATTR_TRIGGERS_CONFIGURED', 'value': 'true'})
launchOut.element('stringAttribute', {'key' : 'org.eclipse.ui.externaltools.ATTR_WORKING_DIRECTORY', 'value': p.suite.dir})
launchOut.close('launchConfiguration')
if not exists(externalToolDir):
os.makedirs(externalToolDir)
update_file(join(externalToolDir, name + '.launch'), launchOut.xml(indent=xmlIndent, standalone=xmlStandalone, newl='\n'))
dotProjectDoc.open('buildCommand')
dotProjectDoc.element('name', data='org.eclipse.ui.externaltools.ExternalToolBuilder')
dotProjectDoc.element('triggers', data='auto,full,incremental,')
dotProjectDoc.open('arguments')
dotProjectDoc.open('dictionary')
dotProjectDoc.element('key', data='LaunchConfigHandle')
dotProjectDoc.element('value', data='<project>/.externalToolBuilders/' + name + '.launch')
dotProjectDoc.close('dictionary')
dotProjectDoc.open('dictionary')
dotProjectDoc.element('key', data='incclean')
dotProjectDoc.element('value', data='true')
dotProjectDoc.close('dictionary')
dotProjectDoc.close('arguments')
dotProjectDoc.close('buildCommand')
def generate_eclipse_workingsets():
"""
Populate the workspace's working set configuration with working sets generated from project data for the primary suite
If the workspace already contains working set definitions, the existing ones will be retained and extended.
In case mx/env does not contain a WORKSPACE definition pointing to the workspace root directory, a parent search from the primary suite directory is performed.
If no workspace root directory can be identified, the primary suite directory is used and the user has to place the workingsets.xml file by hand.
"""
# identify the location where to look for workingsets.xml
wsfilename = 'workingsets.xml'
wsloc = '.metadata/.plugins/org.eclipse.ui.workbench'
if os.environ.has_key('WORKSPACE'):
expected_wsroot = os.environ['WORKSPACE']
else:
expected_wsroot = _primary_suite.dir
wsroot = _find_eclipse_wsroot(expected_wsroot)
if wsroot is None:
# failed to find it
wsroot = expected_wsroot
wsdir = join(wsroot, wsloc)
if not exists(wsdir):
wsdir = wsroot
logv('Could not find Eclipse metadata directory. Please place ' + wsfilename + ' in ' + wsloc + ' manually.')
wspath = join(wsdir, wsfilename)
# gather working set info from project data
workingSets = dict()
for p in projects():
if p.workingSets is None:
continue
for w in p.workingSets.split(","):
if not workingSets.has_key(w):
workingSets[w] = [p.name]
else:
workingSets[w].append(p.name)
if exists(wspath):
wsdoc = _copy_workingset_xml(wspath, workingSets)
else:
wsdoc = _make_workingset_xml(workingSets)
update_file(wspath, wsdoc.xml(newl='\n'))
def _find_eclipse_wsroot(wsdir):
md = join(wsdir, '.metadata')
if exists(md):
return wsdir
split = os.path.split(wsdir)
if split[0] == wsdir: # root directory
return None
else:
return _find_eclipse_wsroot(split[0])
def _make_workingset_xml(workingSets):
wsdoc = XMLDoc()
wsdoc.open('workingSetManager')
for w in sorted(workingSets.keys()):
_workingset_open(wsdoc, w)
for p in workingSets[w]:
_workingset_element(wsdoc, p)
wsdoc.close('workingSet')
wsdoc.close('workingSetManager')
return wsdoc
def _copy_workingset_xml(wspath, workingSets):
target = XMLDoc()
target.open('workingSetManager')
parser = xml.parsers.expat.ParserCreate()
class ParserState(object):
def __init__(self):
self.current_ws_name = 'none yet'
self.current_ws = None
self.seen_ws = list()
self.seen_projects = list()
self.aggregate_ws = False
self.nested_ws = False
ps = ParserState()
# parsing logic
def _ws_start(name, attributes):
if name == 'workingSet':
if attributes.has_key('name'):
ps.current_ws_name = attributes['name']
if attributes.has_key('aggregate') and attributes['aggregate'] == 'true':
ps.aggregate_ws = True
ps.current_ws = None
elif workingSets.has_key(ps.current_ws_name):
ps.current_ws = workingSets[ps.current_ws_name]
ps.seen_ws.append(ps.current_ws_name)
ps.seen_projects = list()
else:
ps.current_ws = None
target.open(name, attributes)
parser.StartElementHandler = _ws_item
def _ws_end(name):
closeAndResetHandler = False
if name == 'workingSet':
if ps.aggregate_ws:
if ps.nested_ws:
ps.nested_ws = False
else:
ps.aggregate_ws = False
closeAndResetHandler = True
else:
if not ps.current_ws is None:
for p in ps.current_ws:
if not p in ps.seen_projects:
_workingset_element(target, p)
closeAndResetHandler = True
if closeAndResetHandler:
target.close('workingSet')
parser.StartElementHandler = _ws_start
elif name == 'workingSetManager':
# process all working sets that are new to the file
for w in sorted(workingSets.keys()):
if not w in ps.seen_ws:
_workingset_open(target, w)
for p in workingSets[w]:
_workingset_element(target, p)
target.close('workingSet')
def _ws_item(name, attributes):
if name == 'item':
if ps.current_ws is None:
target.element(name, attributes)
elif not attributes.has_key('elementID') and attributes.has_key('factoryID') and attributes.has_key('path') and attributes.has_key('type'):
target.element(name, attributes)
p_name = attributes['path'][1:] # strip off the leading '/'
ps.seen_projects.append(p_name)
else:
p_name = attributes['elementID'][1:] # strip off the leading '='
_workingset_element(target, p_name)
ps.seen_projects.append(p_name)
elif name == 'workingSet':
ps.nested_ws = True
target.element(name, attributes)
# process document
parser.StartElementHandler = _ws_start
parser.EndElementHandler = _ws_end
with open(wspath, 'r') as wsfile:
parser.ParseFile(wsfile)
target.close('workingSetManager')
return target
def _workingset_open(wsdoc, ws):
wsdoc.open('workingSet', {'editPageID': 'org.eclipse.jdt.ui.JavaWorkingSetPage', 'factoryID': 'org.eclipse.ui.internal.WorkingSetFactory', 'id': 'wsid_' + ws, 'label': ws, 'name': ws})
def _workingset_element(wsdoc, p):
wsdoc.element('item', {'elementID': '=' + p, 'factoryID': 'org.eclipse.jdt.ui.PersistableJavaElementFactory'})
def netbeansinit(args, refreshOnly=False, buildProcessorJars=True):
"""(re)generate NetBeans project configurations"""
for suite in suites(True):
_netbeansinit_suite(args, suite, refreshOnly, buildProcessorJars)
def _netbeansinit_project(p, jdks=None, files=None, libFiles=None):
if not exists(join(p.dir, 'nbproject')):
os.makedirs(join(p.dir, 'nbproject'))
jdk = java(p.javaCompliance)
assert jdk
if jdks:
jdks.add(jdk)
out = XMLDoc()
out.open('project', {'name' : p.name, 'default' : 'default', 'basedir' : '.'})
out.element('description', data='Builds, tests, and runs the project ' + p.name + '.')
out.element('import', {'file' : 'nbproject/build-impl.xml'})
out.open('target', {'name' : '-post-compile'})
out.open('exec', {'executable' : sys.executable})
out.element('env', {'key' : 'JAVA_HOME', 'value' : jdk.jdk})
out.element('arg', {'value' : os.path.abspath(__file__)})
out.element('arg', {'value' : 'archive'})
out.element('arg', {'value' : '@GRAAL'})
out.close('exec')
out.close('target')
out.close('project')
update_file(join(p.dir, 'build.xml'), out.xml(indent='\t', newl='\n'))
if files:
files.append(join(p.dir, 'build.xml'))
out = XMLDoc()
out.open('project', {'xmlns' : 'http://www.netbeans.org/ns/project/1'})
out.element('type', data='org.netbeans.modules.java.j2seproject')
out.open('configuration')
out.open('data', {'xmlns' : 'http://www.netbeans.org/ns/j2se-project/3'})
out.element('name', data=p.name)
out.element('explicit-platform', {'explicit-source-supported' : 'true'})
out.open('source-roots')
out.element('root', {'id' : 'src.dir'})
if len(p.annotation_processors()) > 0:
out.element('root', {'id' : 'src.ap-source-output.dir', 'name' : 'Generated Packages'})
out.close('source-roots')
out.open('test-roots')
out.close('test-roots')
out.close('data')
firstDep = True
for dep in p.all_deps([], includeLibs=False, includeAnnotationProcessors=True):
if dep == p:
continue
if dep.isProject():
n = dep.name.replace('.', '_')
if firstDep:
out.open('references', {'xmlns' : 'http://www.netbeans.org/ns/ant-project-references/1'})
firstDep = False
out.open('reference')
out.element('foreign-project', data=n)
out.element('artifact-type', data='jar')
out.element('script', data='build.xml')
out.element('target', data='jar')
out.element('clean-target', data='clean')
out.element('id', data='jar')
out.close('reference')
if not firstDep:
out.close('references')
out.close('configuration')
out.close('project')
update_file(join(p.dir, 'nbproject', 'project.xml'), out.xml(indent=' ', newl='\n'))
if files:
files.append(join(p.dir, 'nbproject', 'project.xml'))
out = StringIO.StringIO()
jdkPlatform = 'JDK_' + str(jdk.version)
annotationProcessorEnabled = "false"
annotationProcessorSrcFolder = ""
if len(p.annotation_processors()) > 0:
annotationProcessorEnabled = "true"
genSrcDir = p.source_gen_dir()
if not exists(genSrcDir):
os.makedirs(genSrcDir)
annotationProcessorSrcFolder = "src.ap-source-output.dir=" + genSrcDir
content = """
annotation.processing.enabled=""" + annotationProcessorEnabled + """
annotation.processing.enabled.in.editor=""" + annotationProcessorEnabled + """
annotation.processing.processors.list=
annotation.processing.run.all.processors=true
application.title=""" + p.name + """
application.vendor=mx
build.classes.dir=${build.dir}
build.classes.excludes=**/*.java,**/*.form
# This directory is removed when the project is cleaned:
build.dir=bin
build.generated.sources.dir=${build.dir}/generated-sources
# Only compile against the classpath explicitly listed here:
build.sysclasspath=ignore
build.test.classes.dir=${build.dir}/test/classes
build.test.results.dir=${build.dir}/test/results
# Uncomment to specify the preferred debugger connection transport:
#debug.transport=dt_socket
debug.classpath=\\
${run.classpath}
debug.test.classpath=\\
${run.test.classpath}
# This directory is removed when the project is cleaned:
dist.dir=dist
dist.jar=${dist.dir}/""" + p.name + """.jar
dist.javadoc.dir=${dist.dir}/javadoc
endorsed.classpath=
excludes=
includes=**
jar.compress=false
# Space-separated list of extra javac options
javac.compilerargs=-XDignore.symbol.file
javac.deprecation=false
javac.source=""" + str(p.javaCompliance) + """
javac.target=""" + str(p.javaCompliance) + """
javac.test.classpath=\\
${javac.classpath}:\\
${build.classes.dir}
javadoc.additionalparam=
javadoc.author=false
javadoc.encoding=${source.encoding}
javadoc.noindex=false
javadoc.nonavbar=false
javadoc.notree=false
javadoc.private=false
javadoc.splitindex=true
javadoc.use=true
javadoc.version=false
javadoc.windowtitle=
main.class=
manifest.file=manifest.mf
meta.inf.dir=${src.dir}/META-INF
mkdist.disabled=false
platforms.""" + jdkPlatform + """.home=""" + jdk.jdk + """
platform.active=""" + jdkPlatform + """
run.classpath=\\
${javac.classpath}:\\
${build.classes.dir}
# Space-separated list of JVM arguments used when running the project
# (you may also define separate properties like run-sys-prop.name=value instead of -Dname=value
# or test-sys-prop.name=value to set system properties for unit tests):
run.jvmargs=
run.test.classpath=\\
${javac.test.classpath}:\\
${build.test.classes.dir}
test.src.dir=./test
""" + annotationProcessorSrcFolder + """
source.encoding=UTF-8""".replace(':', os.pathsep).replace('/', os.sep)
print >> out, content
mainSrc = True
for src in p.srcDirs:
srcDir = join(p.dir, src)
if not exists(srcDir):
os.mkdir(srcDir)
ref = 'file.reference.' + p.name + '-' + src
print >> out, ref + '=' + src
if mainSrc:
print >> out, 'src.dir=${' + ref + '}'
mainSrc = False
else:
print >> out, 'src.' + src + '.dir=${' + ref + '}'
javacClasspath = []
deps = p.all_deps([], True)
annotationProcessorOnlyDeps = []
if len(p.annotation_processors()) > 0:
for ap in p.annotation_processors():
apDep = dependency(ap)
if not apDep in deps:
deps.append(apDep)
annotationProcessorOnlyDeps.append(apDep)
annotationProcessorReferences = []
for dep in deps:
if dep == p:
continue
if dep.isLibrary():
path = dep.get_path(resolve=True)
if path:
if os.sep == '\\':
path = path.replace('\\', '\\\\')
ref = 'file.reference.' + dep.name + '-bin'
print >> out, ref + '=' + path
if libFiles:
libFiles.append(path)
elif dep.isProject():
n = dep.name.replace('.', '_')
relDepPath = os.path.relpath(dep.dir, p.dir).replace(os.sep, '/')
ref = 'reference.' + n + '.jar'
print >> out, 'project.' + n + '=' + relDepPath
print >> out, ref + '=${project.' + n + '}/dist/' + dep.name + '.jar'
if not dep in annotationProcessorOnlyDeps:
javacClasspath.append('${' + ref + '}')
else:
annotationProcessorReferences.append('${' + ref + '}')
print >> out, 'javac.classpath=\\\n ' + (os.pathsep + '\\\n ').join(javacClasspath)
print >> out, 'javac.processorpath=' + (os.pathsep + '\\\n ').join(['${javac.classpath}'] + annotationProcessorReferences)
print >> out, 'javac.test.processorpath=' + (os.pathsep + '\\\n ').join(['${javac.test.classpath}'] + annotationProcessorReferences)
update_file(join(p.dir, 'nbproject', 'project.properties'), out.getvalue())
out.close()
if files:
files.append(join(p.dir, 'nbproject', 'project.properties'))
def _netbeansinit_suite(args, suite, refreshOnly=False, buildProcessorJars=True):
configZip = TimeStampFile(join(suite.mxDir, 'netbeans-config.zip'))
configLibsZip = join(suite.mxDir, 'eclipse-config-libs.zip')
if refreshOnly and not configZip.exists():
return
if _check_ide_timestamp(suite, configZip, 'netbeans'):
logv('[NetBeans configurations are up to date - skipping]')
return
files = []
libFiles = []
jdks = set()
for p in suite.projects:
if p.native:
continue
if exists(join(p.dir, 'plugin.xml')): # eclipse plugin project
continue
_netbeansinit_project(p, jdks, files, libFiles)
log('If using NetBeans:')
# http://stackoverflow.com/questions/24720665/cant-resolve-jdk-internal-package
log(' 1. Edit etc/netbeans.conf in your NetBeans installation and modify netbeans_default_options variable to include "-J-DCachingArchiveProvider.disableCtSym=true"')
log(' 2. Ensure that the following platform(s) are defined (Tools -> Java Platforms):')
for jdk in jdks:
log(' JDK_' + str(jdk.version))
log(' 3. Open/create a Project Group for the directory containing the projects (File -> Project Group -> New Group... -> Folder of Projects)')
_zip_files(files, suite.dir, configZip.path)
_zip_files(libFiles, suite.dir, configLibsZip)
def intellijinit(args, refreshOnly=False):
"""(re)generate Intellij project configurations"""
for suite in suites(True):
_intellij_suite(args, suite, refreshOnly)
def _intellij_suite(args, suite, refreshOnly=False):
libraries = set()
ideaProjectDirectory = join(suite.dir, '.idea')
if not exists(ideaProjectDirectory):
os.mkdir(ideaProjectDirectory)
nameFile = join(ideaProjectDirectory, '.name')
update_file(nameFile, "Graal")
modulesXml = XMLDoc()
modulesXml.open('project', attributes={'version': '4'})
modulesXml.open('component', attributes={'name': 'ProjectModuleManager'})
modulesXml.open('modules')
def _intellij_exclude_if_exists(xml, p, name):
path = join(p.dir, name)
if exists(path):
xml.element('excludeFolder', attributes={'url':'file://$MODULE_DIR$/' + name})
annotationProcessorProfiles = {}
def _complianceToIntellijLanguageLevel(compliance):
return 'JDK_1_' + str(compliance.value)
# create the modules (1 module = 1 Intellij project)
for p in suite.projects:
if p.native:
continue
assert java(p.javaCompliance)
if not exists(p.dir):
os.makedirs(p.dir)
annotationProcessorProfileKey = tuple(p.annotation_processors())
if not annotationProcessorProfileKey in annotationProcessorProfiles:
annotationProcessorProfiles[annotationProcessorProfileKey] = [p]
else:
annotationProcessorProfiles[annotationProcessorProfileKey].append(p)
intellijLanguageLevel = _complianceToIntellijLanguageLevel(p.javaCompliance)
moduleXml = XMLDoc()
moduleXml.open('module', attributes={'type': 'JAVA_MODULE', 'version': '4'})
moduleXml.open('component', attributes={'name': 'NewModuleRootManager', 'LANGUAGE_LEVEL': intellijLanguageLevel, 'inherit-compiler-output': 'false'})
moduleXml.element('output', attributes={'url': 'file://$MODULE_DIR$/bin'})
moduleXml.element('exclude-output')
moduleXml.open('content', attributes={'url': 'file://$MODULE_DIR$'})
for src in p.srcDirs:
srcDir = join(p.dir, src)
if not exists(srcDir):
os.mkdir(srcDir)
moduleXml.element('sourceFolder', attributes={'url':'file://$MODULE_DIR$/' + src, 'isTestSource': 'false'})
if len(p.annotation_processors()) > 0:
genDir = p.source_gen_dir()
if not exists(genDir):
os.mkdir(genDir)
moduleXml.element('sourceFolder', attributes={'url':'file://$MODULE_DIR$/' + os.path.relpath(genDir, p.dir), 'isTestSource': 'false'})
for name in ['.externalToolBuilders', '.settings', 'nbproject']:
_intellij_exclude_if_exists(moduleXml, p, name)
moduleXml.close('content')
moduleXml.element('orderEntry', attributes={'type': 'jdk', 'jdkType': 'JavaSDK', 'jdkName': str(p.javaCompliance)})
moduleXml.element('orderEntry', attributes={'type': 'sourceFolder', 'forTests': 'false'})
deps = p.all_deps([], True, includeAnnotationProcessors=True)
for dep in deps:
if dep == p:
continue
if dep.isLibrary():
libraries.add(dep)
moduleXml.element('orderEntry', attributes={'type': 'library', 'name': dep.name, 'level': 'project'})
elif dep.isProject():
moduleXml.element('orderEntry', attributes={'type': 'module', 'module-name': dep.name})
moduleXml.close('component')
moduleXml.close('module')
moduleFile = join(p.dir, p.name + '.iml')
update_file(moduleFile, moduleXml.xml(indent=' ', newl='\n'))
moduleFilePath = "$PROJECT_DIR$/" + os.path.relpath(moduleFile, suite.dir)
modulesXml.element('module', attributes={'fileurl': 'file://' + moduleFilePath, 'filepath': moduleFilePath})
modulesXml.close('modules')
modulesXml.close('component')
modulesXml.close('project')
moduleXmlFile = join(ideaProjectDirectory, 'modules.xml')
update_file(moduleXmlFile, modulesXml.xml(indent=' ', newl='\n'))
# TODO What about cross-suite dependencies?
librariesDirectory = join(ideaProjectDirectory, 'libraries')
if not exists(librariesDirectory):
os.mkdir(librariesDirectory)
# Setup the libraries that were used above
# TODO: setup all the libraries from the suite regardless of usage?
for library in libraries:
libraryXml = XMLDoc()
libraryXml.open('component', attributes={'name': 'libraryTable'})
libraryXml.open('library', attributes={'name': library.name})
libraryXml.open('CLASSES')
libraryXml.element('root', attributes={'url': 'jar://$PROJECT_DIR$/' + os.path.relpath(library.get_path(True), suite.dir) + '!/'})
libraryXml.close('CLASSES')
libraryXml.element('JAVADOC')
if library.sourcePath:
libraryXml.open('SOURCES')
libraryXml.element('root', attributes={'url': 'jar://$PROJECT_DIR$/' + os.path.relpath(library.get_source_path(True), suite.dir) + '!/'})
libraryXml.close('SOURCES')
else:
libraryXml.element('SOURCES')
libraryXml.close('library')
libraryXml.close('component')
libraryFile = join(librariesDirectory, library.name + '.xml')
update_file(libraryFile, libraryXml.xml(indent=' ', newl='\n'))
# Set annotation processor profiles up, and link them to modules in compiler.xml
compilerXml = XMLDoc()
compilerXml.open('project', attributes={'version': '4'})
compilerXml.open('component', attributes={'name': 'CompilerConfiguration'})
compilerXml.element('option', attributes={'name': "DEFAULT_COMPILER", 'value': 'Javac'})
compilerXml.element('resourceExtensions')
compilerXml.open('wildcardResourcePatterns')
compilerXml.element('entry', attributes={'name': '!?*.java'})
compilerXml.close('wildcardResourcePatterns')
if annotationProcessorProfiles:
compilerXml.open('annotationProcessing')
for processors, modules in sorted(annotationProcessorProfiles.iteritems()):
compilerXml.open('profile', attributes={'default': 'false', 'name': '-'.join(processors), 'enabled': 'true'})
compilerXml.element('sourceOutputDir', attributes={'name': 'src_gen'}) # TODO use p.source_gen_dir() ?
compilerXml.element('outputRelativeToContentRoot', attributes={'value': 'true'})
compilerXml.open('processorPath', attributes={'useClasspath': 'false'})
for apName in processors:
pDep = dependency(apName)
for entry in pDep.all_deps([], True):
if entry.isLibrary():
compilerXml.element('entry', attributes={'name': '$PROJECT_DIR$/' + os.path.relpath(entry.path, suite.dir)})
elif entry.isProject():
assert entry.isProject()
compilerXml.element('entry', attributes={'name': '$PROJECT_DIR$/' + os.path.relpath(entry.output_dir(), suite.dir)})
compilerXml.close('processorPath')
for module in modules:
compilerXml.element('module', attributes={'name': module.name})
compilerXml.close('profile')
compilerXml.close('annotationProcessing')
compilerXml.close('component')
compilerXml.close('project')
compilerFile = join(ideaProjectDirectory, 'compiler.xml')
update_file(compilerFile, compilerXml.xml(indent=' ', newl='\n'))
# Wite misc.xml for global JDK config
miscXml = XMLDoc()
miscXml.open('project', attributes={'version': '4'})
miscXml.element('component', attributes={'name': 'ProjectRootManager', 'version': '2', 'languageLevel': _complianceToIntellijLanguageLevel(java().javaCompliance), 'project-jdk-name': str(java().javaCompliance), 'project-jdk-type': 'JavaSDK'})
miscXml.close('project')
miscFile = join(ideaProjectDirectory, 'misc.xml')
update_file(miscFile, miscXml.xml(indent=' ', newl='\n'))
# TODO look into copyright settings
# TODO should add vcs.xml support
def ideclean(args):
"""remove all Eclipse and NetBeans project configurations"""
def rm(path):
if exists(path):
os.remove(path)
for s in suites():
rm(join(s.mxDir, 'eclipse-config.zip'))
rm(join(s.mxDir, 'netbeans-config.zip'))
shutil.rmtree(join(s.dir, '.idea'), ignore_errors=True)
for p in projects():
if p.native:
continue
shutil.rmtree(join(p.dir, '.settings'), ignore_errors=True)
shutil.rmtree(join(p.dir, '.externalToolBuilders'), ignore_errors=True)
shutil.rmtree(join(p.dir, 'nbproject'), ignore_errors=True)
rm(join(p.dir, '.classpath'))
rm(join(p.dir, '.checkstyle'))
rm(join(p.dir, '.project'))
rm(join(p.dir, '.factorypath'))
rm(join(p.dir, p.name + '.iml'))
rm(join(p.dir, 'build.xml'))
rm(join(p.dir, 'eclipse-build.xml'))
try:
rm(join(p.dir, p.name + '.jar'))
except:
log("Error removing {0}".format(p.name + '.jar'))
for d in _dists.itervalues():
if d.get_ide_project_dir():
shutil.rmtree(d.get_ide_project_dir(), ignore_errors=True)
def ideinit(args, refreshOnly=False, buildProcessorJars=True):
"""(re)generate Eclipse, NetBeans and Intellij project configurations"""
eclipseinit(args, refreshOnly=refreshOnly, buildProcessorJars=buildProcessorJars)
netbeansinit(args, refreshOnly=refreshOnly, buildProcessorJars=buildProcessorJars)
intellijinit(args, refreshOnly=refreshOnly)
if not refreshOnly:
fsckprojects([])
def fsckprojects(args):
"""find directories corresponding to deleted Java projects and delete them"""
for suite in suites(True):
projectDirs = [p.dir for p in suite.projects]
for dirpath, dirnames, files in os.walk(suite.dir):
if dirpath == suite.dir:
# no point in traversing .hg or lib/
dirnames[:] = [d for d in dirnames if d not in ['.hg', 'lib']]
elif dirpath in projectDirs:
# don't traverse subdirs of an existing project in this suite
dirnames[:] = []
else:
projectConfigFiles = frozenset(['.classpath', 'nbproject'])
indicators = projectConfigFiles.intersection(files)
if len(indicators) != 0:
if not sys.stdout.isatty() or ask_yes_no(dirpath + ' looks like a removed project -- delete it', 'n'):
shutil.rmtree(dirpath)
log('Deleted ' + dirpath)
def javadoc(args, parser=None, docDir='javadoc', includeDeps=True, stdDoclet=True):
"""generate javadoc for some/all Java projects"""
parser = ArgumentParser(prog='mx javadoc') if parser is None else parser
parser.add_argument('-d', '--base', action='store', help='base directory for output')
parser.add_argument('--unified', action='store_true', help='put javadoc in a single directory instead of one per project')
parser.add_argument('--force', action='store_true', help='(re)generate javadoc even if package-list file exists')
parser.add_argument('--projects', action='store', help='comma separated projects to process (omit to process all projects)')
parser.add_argument('--Wapi', action='store_true', dest='warnAPI', help='show warnings about using internal APIs')
parser.add_argument('--argfile', action='store', help='name of file containing extra javadoc options')
parser.add_argument('--arg', action='append', dest='extra_args', help='extra Javadoc arguments (e.g. --arg @-use)', metavar='@<arg>', default=[])
parser.add_argument('-m', '--memory', action='store', help='-Xmx value to pass to underlying JVM')
parser.add_argument('--packages', action='store', help='comma separated packages to process (omit to process all packages)')
parser.add_argument('--exclude-packages', action='store', help='comma separated packages to exclude')
args = parser.parse_args(args)
# build list of projects to be processed
if args.projects is not None:
candidates = [project(name) for name in args.projects.split(',')]
else:
candidates = projects_opt_limit_to_suites()
# optionally restrict packages within a project
packages = []
if args.packages is not None:
packages = [name for name in args.packages.split(',')]
exclude_packages = []
if args.exclude_packages is not None:
exclude_packages = [name for name in args.exclude_packages.split(',')]
def outDir(p):
if args.base is None:
return join(p.dir, docDir)
return join(args.base, p.name, docDir)
def check_package_list(p):
return not exists(join(outDir(p), 'package-list'))
def assess_candidate(p, projects):
if p in projects:
return False
if args.force or args.unified or check_package_list(p):
projects.append(p)
return True
return False
projects = []
for p in candidates:
if not p.native:
if includeDeps:
deps = p.all_deps([], includeLibs=False, includeSelf=False)
for d in deps:
assess_candidate(d, projects)
if not assess_candidate(p, projects):
logv('[package-list file exists - skipping {0}]'.format(p.name))
def find_packages(sourceDirs, pkgs=None):
if pkgs is None:
pkgs = set()
for sourceDir in sourceDirs:
for root, _, files in os.walk(sourceDir):
if len([name for name in files if name.endswith('.java')]) != 0:
pkg = root[len(sourceDir) + 1:].replace(os.sep, '.')
if len(packages) == 0 or pkg in packages:
if len(exclude_packages) == 0 or not pkg in exclude_packages:
pkgs.add(pkg)
return pkgs
extraArgs = [a.lstrip('@') for a in args.extra_args]
if args.argfile is not None:
extraArgs += ['@' + args.argfile]
memory = '2g'
if args.memory is not None:
memory = args.memory
memory = '-J-Xmx' + memory
if not args.unified:
for p in projects:
# The project must be built to ensure javadoc can find class files for all referenced classes
build(['--no-native', '--projects', p.name])
pkgs = find_packages(p.source_dirs(), set())
deps = p.all_deps([], includeLibs=False, includeSelf=False)
links = ['-link', 'http://docs.oracle.com/javase/' + str(p.javaCompliance.value) + '/docs/api/']
out = outDir(p)
for d in deps:
depOut = outDir(d)
links.append('-link')
links.append(os.path.relpath(depOut, out))
cp = classpath(p.name, includeSelf=True)
sp = os.pathsep.join(p.source_dirs())
overviewFile = join(p.dir, 'overview.html')
delOverviewFile = False
if not exists(overviewFile):
with open(overviewFile, 'w') as fp:
print >> fp, '<html><body>Documentation for the <code>' + p.name + '</code> project.</body></html>'
delOverviewFile = True
nowarnAPI = []
if not args.warnAPI:
nowarnAPI.append('-XDignore.symbol.file')
# windowTitle onloy applies to the standard doclet processor
windowTitle = []
if stdDoclet:
windowTitle = ['-windowtitle', p.name + ' javadoc']
try:
log('Generating {2} for {0} in {1}'.format(p.name, out, docDir))
projectJava = java(p.javaCompliance)
# Once https://bugs.openjdk.java.net/browse/JDK-8041628 is fixed,
# this should be reverted to:
# javadocExe = java().javadoc
javadocExe = projectJava.javadoc
run([javadocExe, memory,
'-XDignore.symbol.file',
'-classpath', cp,
'-quiet',
'-d', out,
'-overview', overviewFile,
'-sourcepath', sp,
'-source', str(projectJava.javaCompliance),
'-bootclasspath', projectJava.bootclasspath(),
'-extdirs', projectJava.extdirs()] +
([] if projectJava.javaCompliance < JavaCompliance('1.8') else ['-Xdoclint:none']) +
links +
extraArgs +
nowarnAPI +
windowTitle +
list(pkgs))
log('Generated {2} for {0} in {1}'.format(p.name, out, docDir))
finally:
if delOverviewFile:
os.remove(overviewFile)
else:
# The projects must be built to ensure javadoc can find class files for all referenced classes
build(['--no-native'])
pkgs = set()
sp = []
names = []
for p in projects:
find_packages(p.source_dirs(), pkgs)
sp += p.source_dirs()
names.append(p.name)
links = ['-link', 'http://docs.oracle.com/javase/' + str(java().javaCompliance.value) + '/docs/api/']
out = join(_primary_suite.dir, docDir)
if args.base is not None:
out = join(args.base, docDir)
cp = classpath()
sp = os.pathsep.join(sp)
nowarnAPI = []
if not args.warnAPI:
nowarnAPI.append('-XDignore.symbol.file')
log('Generating {2} for {0} in {1}'.format(', '.join(names), out, docDir))
run([java().javadoc, memory,
'-classpath', cp,
'-quiet',
'-d', out,
'-sourcepath', sp] +
([] if java().javaCompliance < JavaCompliance('1.8') else ['-Xdoclint:none']) +
links +
extraArgs +
nowarnAPI +
list(pkgs))
log('Generated {2} for {0} in {1}'.format(', '.join(names), out, docDir))
def site(args):
"""creates a website containing javadoc and the project dependency graph"""
parser = ArgumentParser(prog='site')
parser.add_argument('-d', '--base', action='store', help='directory for generated site', required=True, metavar='<dir>')
parser.add_argument('--tmp', action='store', help='directory to use for intermediate results', metavar='<dir>')
parser.add_argument('--name', action='store', help='name of overall documentation', required=True, metavar='<name>')
parser.add_argument('--overview', action='store', help='path to the overview content for overall documentation', required=True, metavar='<path>')
parser.add_argument('--projects', action='store', help='comma separated projects to process (omit to process all projects)')
parser.add_argument('--jd', action='append', help='extra Javadoc arguments (e.g. --jd @-use)', metavar='@<arg>', default=[])
parser.add_argument('--exclude-packages', action='store', help='comma separated packages to exclude', metavar='<pkgs>')
parser.add_argument('--dot-output-base', action='store', help='base file name (relative to <dir>/all) for project dependency graph .svg and .jpg files generated by dot (omit to disable dot generation)', metavar='<path>')
parser.add_argument('--title', action='store', help='value used for -windowtitle and -doctitle javadoc args for overall documentation (default: "<name>")', metavar='<title>')
args = parser.parse_args(args)
args.base = os.path.abspath(args.base)
tmpbase = args.tmp if args.tmp else tempfile.mkdtemp(prefix=basename(args.base) + '.', dir=dirname(args.base))
unified = join(tmpbase, 'all')
exclude_packages_arg = []
if args.exclude_packages is not None:
exclude_packages_arg = ['--exclude-packages', args.exclude_packages]
projects = sorted_deps()
projects_arg = []
if args.projects is not None:
projects_arg = ['--projects', args.projects]
projects = [project(name) for name in args.projects.split(',')]
extra_javadoc_args = []
for a in args.jd:
extra_javadoc_args.append('--arg')
extra_javadoc_args.append('@' + a)
try:
# Create javadoc for each project
javadoc(['--base', tmpbase] + exclude_packages_arg + projects_arg + extra_javadoc_args)
# Create unified javadoc for all projects
with open(args.overview) as fp:
content = fp.read()
idx = content.rfind('</body>')
if idx != -1:
args.overview = join(tmpbase, 'overview_with_projects.html')
with open(args.overview, 'w') as fp2:
print >> fp2, content[0:idx]
print >> fp2, """<div class="contentContainer">
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Projects table">
<caption><span>Projects</span><span class="tabEnd"> </span></caption>
<tr><th class="colFirst" scope="col">Project</th><th class="colLast" scope="col"> </th></tr>
<tbody>"""
color = 'row'
for p in projects:
print >> fp2, '<tr class="{1}Color"><td class="colFirst"><a href="../{0}/javadoc/index.html",target = "_top">{0}</a></td><td class="colLast"> </td></tr>'.format(p.name, color)
color = 'row' if color == 'alt' else 'alt'
print >> fp2, '</tbody></table></div>'
print >> fp2, content[idx:]
title = args.title if args.title is not None else args.name
javadoc(['--base', tmpbase,
'--unified',
'--arg', '@-windowtitle', '--arg', '@' + title,
'--arg', '@-doctitle', '--arg', '@' + title,
'--arg', '@-overview', '--arg', '@' + args.overview] + exclude_packages_arg + projects_arg + extra_javadoc_args)
if exists(unified):
shutil.rmtree(unified)
os.rename(join(tmpbase, 'javadoc'), unified)
# Generate dependency graph with Graphviz
if args.dot_output_base is not None:
dotErr = None
try:
if not 'version' in subprocess.check_output(['dot', '-V'], stderr=subprocess.STDOUT):
dotErr = 'dot -V does not print a string containing "version"'
except subprocess.CalledProcessError as e:
dotErr = 'error calling "dot -V": {0}'.format(e)
except OSError as e:
dotErr = 'error calling "dot -V": {0}'.format(e)
if dotErr != None:
abort('cannot generate dependency graph: ' + dotErr)
dot = join(tmpbase, 'all', str(args.dot_output_base) + '.dot')
svg = join(tmpbase, 'all', str(args.dot_output_base) + '.svg')
jpg = join(tmpbase, 'all', str(args.dot_output_base) + '.jpg')
html = join(tmpbase, 'all', str(args.dot_output_base) + '.html')
with open(dot, 'w') as fp:
dim = len(projects)
print >> fp, 'digraph projects {'
print >> fp, 'rankdir=BT;'
print >> fp, 'size = "' + str(dim) + ',' + str(dim) + '";'
print >> fp, 'node [shape=rect, fontcolor="blue"];'
# print >> fp, 'edge [color="green"];'
for p in projects:
print >> fp, '"' + p.name + '" [URL = "../' + p.name + '/javadoc/index.html", target = "_top"]'
for dep in p.canonical_deps():
if dep in [proj.name for proj in projects]:
print >> fp, '"' + p.name + '" -> "' + dep + '"'
depths = dict()
for p in projects:
d = p.max_depth()
depths.setdefault(d, list()).append(p.name)
print >> fp, '}'
run(['dot', '-Tsvg', '-o' + svg, '-Tjpg', '-o' + jpg, dot])
# Post-process generated SVG to remove title elements which most browsers
# render as redundant (and annoying) tooltips.
with open(svg, 'r') as fp:
content = fp.read()
content = re.sub('<title>.*</title>', '', content)
content = re.sub('xlink:title="[^"]*"', '', content)
with open(svg, 'w') as fp:
fp.write(content)
# Create HTML that embeds the svg file in an <object> frame
with open(html, 'w') as fp:
print >> fp, '<html><body><object data="{0}.svg" type="image/svg+xml"></object></body></html>'.format(args.dot_output_base)
if exists(args.base):
shutil.rmtree(args.base)
if args.tmp:
shutil.copytree(tmpbase, args.base)
else:
shutil.move(tmpbase, args.base)
print 'Created website - root is ' + join(args.base, 'all', 'index.html')
finally:
if not args.tmp and exists(tmpbase):
shutil.rmtree(tmpbase)
def _kwArg(kwargs):
if len(kwargs) > 0:
return kwargs.pop(0)
return None
def findclass(args, logToConsole=True, matcher=lambda string, classname: string in classname):
"""find all classes matching a given substring"""
matches = []
for entry, filename in classpath_walk(includeBootClasspath=True):
if filename.endswith('.class'):
if isinstance(entry, zipfile.ZipFile):
classname = filename.replace('/', '.')
else:
classname = filename.replace(os.sep, '.')
classname = classname[:-len('.class')]
for a in args:
if matcher(a, classname):
matches.append(classname)
if logToConsole:
log(classname)
return matches
def select_items(items, descriptions=None, allowMultiple=True):
"""
Presents a command line interface for selecting one or more (if allowMultiple is true) items.
"""
if len(items) <= 1:
return items
else:
if allowMultiple:
log('[0] <all>')
for i in range(0, len(items)):
if descriptions is None:
log('[{0}] {1}'.format(i + 1, items[i]))
else:
assert len(items) == len(descriptions)
wrapper = textwrap.TextWrapper(subsequent_indent=' ')
log('\n'.join(wrapper.wrap('[{0}] {1} - {2}'.format(i + 1, items[i], descriptions[i]))))
while True:
if allowMultiple:
s = raw_input('Enter number(s) of selection (separate multiple choices with spaces): ').split()
else:
s = [raw_input('Enter number of selection: ')]
try:
s = [int(x) for x in s]
except:
log('Selection contains non-numeric characters: "' + ' '.join(s) + '"')
continue
if allowMultiple and 0 in s:
return items
indexes = []
for n in s:
if n not in range(1, len(items) + 1):
log('Invalid selection: ' + str(n))
continue
else:
indexes.append(n - 1)
if allowMultiple:
return [items[i] for i in indexes]
if len(indexes) == 1:
return items[indexes[0]]
return None
def exportlibs(args):
"""export libraries to an archive file"""
parser = ArgumentParser(prog='exportlibs')
parser.add_argument('-b', '--base', action='store', help='base name of archive (default: libs)', default='libs', metavar='<path>')
parser.add_argument('-a', '--include-all', action='store_true', help="include all defined libaries")
parser.add_argument('--arc', action='store', choices=['tgz', 'tbz2', 'tar', 'zip'], default='tgz', help='the type of the archive to create')
parser.add_argument('--no-sha1', action='store_false', dest='sha1', help='do not create SHA1 signature of archive')
parser.add_argument('--no-md5', action='store_false', dest='md5', help='do not create MD5 signature of archive')
parser.add_argument('--include-system-libs', action='store_true', help='include system libraries (i.e., those not downloaded from URLs)')
parser.add_argument('extras', nargs=REMAINDER, help='extra files and directories to add to archive', metavar='files...')
args = parser.parse_args(args)
def createArchive(addMethod):
entries = {}
def add(path, arcname):
apath = os.path.abspath(path)
if not entries.has_key(arcname):
entries[arcname] = apath
logv('[adding ' + path + ']')
addMethod(path, arcname=arcname)
elif entries[arcname] != apath:
logv('[warning: ' + apath + ' collides with ' + entries[arcname] + ' as ' + arcname + ']')
else:
logv('[already added ' + path + ']')
libsToExport = set()
if args.include_all:
for lib in _libs.itervalues():
libsToExport.add(lib)
else:
def isValidLibrary(dep):
if dep in _libs.iterkeys():
lib = _libs[dep]
if len(lib.urls) != 0 or args.include_system_libs:
return lib
return None
# iterate over all project dependencies and find used libraries
for p in _projects.itervalues():
for dep in p.deps:
r = isValidLibrary(dep)
if r:
libsToExport.add(r)
# a library can have other libraries as dependency
size = 0
while size != len(libsToExport):
size = len(libsToExport)
for lib in libsToExport.copy():
for dep in lib.deps:
r = isValidLibrary(dep)
if r:
libsToExport.add(r)
for lib in libsToExport:
add(lib.get_path(resolve=True), lib.path)
if lib.sha1:
add(lib.get_path(resolve=True) + ".sha1", lib.path + ".sha1")
if lib.sourcePath:
add(lib.get_source_path(resolve=True), lib.sourcePath)
if lib.sourceSha1:
add(lib.get_source_path(resolve=True) + ".sha1", lib.sourcePath + ".sha1")
if args.extras:
for e in args.extras:
if os.path.isdir(e):
for root, _, filenames in os.walk(e):
for name in filenames:
f = join(root, name)
add(f, f)
else:
add(e, e)
if args.arc == 'zip':
path = args.base + '.zip'
with zipfile.ZipFile(path, 'w') as zf:
createArchive(zf.write)
else:
path = args.base + '.tar'
mode = 'w'
if args.arc != 'tar':
sfx = args.arc[1:]
mode = mode + ':' + sfx
path = path + '.' + sfx
with tarfile.open(path, mode) as tar:
createArchive(tar.add)
log('created ' + path)
def digest(enabled, path, factory, suffix):
if enabled:
d = factory()
with open(path, 'rb') as f:
while True:
buf = f.read(4096)
if not buf:
break
d.update(buf)
with open(path + '.' + suffix, 'w') as fp:
print >> fp, d.hexdigest()
log('created ' + path + '.' + suffix)
digest(args.sha1, path, hashlib.sha1, 'sha1')
digest(args.md5, path, hashlib.md5, 'md5')
def javap(args):
"""disassemble classes matching given pattern with javap"""
javapExe = java().javap
if not exists(javapExe):
abort('The javap executable does not exists: ' + javapExe)
else:
candidates = findclass(args, logToConsole=False)
if len(candidates) == 0:
log('no matches')
selection = select_items(candidates)
run([javapExe, '-private', '-verbose', '-classpath', classpath()] + selection)
def show_projects(args):
"""show all projects"""
for s in suites():
if len(s.projects) != 0:
log(join(s.mxDir, 'suite*.py'))
for p in s.projects:
log('\t' + p.name)
def show_suites(args):
"""show all suites"""
def _show_section(name, section):
if len(section) != 0:
log(' ' + name + ':')
for e in section:
log(' ' + e.name)
for s in suites():
log(join(s.mxDir, 'suite*.py'))
_show_section('libraries', s.libs)
_show_section('jrelibraries', s.jreLibs)
_show_section('projects', s.projects)
_show_section('distributions', s.dists)
def ask_yes_no(question, default=None):
""""""
assert not default or default == 'y' or default == 'n'
if not sys.stdout.isatty():
if default:
return default
else:
abort("Can not answer '" + question + "?' if stdout is not a tty")
questionMark = '? [yn]: '
if default:
questionMark = questionMark.replace(default, default.upper())
answer = raw_input(question + questionMark) or default
while not answer:
answer = raw_input(question + questionMark)
return answer.lower().startswith('y')
def add_argument(*args, **kwargs):
"""
Define how a single command-line argument.
"""
assert _argParser is not None
_argParser.add_argument(*args, **kwargs)
def update_commands(suite, new_commands):
for key, value in new_commands.iteritems():
if _commands.has_key(key):
warn("redefining command '" + key + "' in suite " + suite.name)
_commands[key] = value
def warn(msg):
if _warn:
print 'WARNING: ' + msg
# Table of commands in alphabetical order.
# Keys are command names, value are lists: [<function>, <usage msg>, <format args to doc string of function>...]
# If any of the format args are instances of Callable, then they are called with an 'env' are before being
# used in the call to str.format().
# Suite extensions should not update this table directly, but use update_commands
_commands = {
'about': [about, ''],
'build': [build, '[options]'],
'checkstyle': [checkstyle, ''],
'canonicalizeprojects': [canonicalizeprojects, ''],
'clean': [clean, ''],
'eclipseinit': [eclipseinit, ''],
'eclipseformat': [eclipseformat, ''],
'exportlibs': [exportlibs, ''],
'findclass': [findclass, ''],
'fsckprojects': [fsckprojects, ''],
'help': [help_, '[command]'],
'ideclean': [ideclean, ''],
'ideinit': [ideinit, ''],
'intellijinit': [intellijinit, ''],
'archive': [_archive, '[options]'],
'projectgraph': [projectgraph, ''],
'pylint': [pylint, ''],
'javap': [javap, '<class name patterns>'],
'javadoc': [javadoc, '[options]'],
'site': [site, '[options]'],
'netbeansinit': [netbeansinit, ''],
'suites': [show_suites, ''],
'projects': [show_projects, ''],
}
_argParser = ArgParser()
def _suitename(mxDir):
base = os.path.basename(mxDir)
parts = base.split('.')
# temporary workaround until mx.graal exists
if len(parts) == 1:
return 'graal'
else:
return parts[1]
def _is_suite_dir(d, mxDirName=None):
"""
Checks if d contains a suite.
If mxDirName is None, matches any suite name, otherwise checks for exactly that suite.
"""
if os.path.isdir(d):
for f in os.listdir(d):
if (mxDirName == None and (f == 'mx' or fnmatch.fnmatch(f, 'mx.*'))) or f == mxDirName:
mxDir = join(d, f)
if exists(mxDir) and isdir(mxDir) and (exists(join(mxDir, 'suite.py'))):
return mxDir
def _check_primary_suite():
if _primary_suite is None:
abort('no primary suite found')
else:
return _primary_suite
def _findPrimarySuiteMxDirFrom(d):
""" search for a suite directory upwards from 'd' """
while d:
mxDir = _is_suite_dir(d)
if mxDir is not None:
return mxDir
parent = dirname(d)
if d == parent:
return None
d = parent
return None
def _findPrimarySuiteMxDir():
# check for explicit setting
if _primary_suite_path is not None:
mxDir = _is_suite_dir(_primary_suite_path)
if mxDir is not None:
return mxDir
else:
abort(_primary_suite_path + ' does not contain an mx suite')
# try current working directory first
mxDir = _findPrimarySuiteMxDirFrom(os.getcwd())
if mxDir is not None:
return mxDir
# backwards compatibility: search from path of this file
return _findPrimarySuiteMxDirFrom(dirname(__file__))
def main():
primarySuiteMxDir = _findPrimarySuiteMxDir()
if primarySuiteMxDir:
global _primary_suite
_primary_suite = _loadSuite(primarySuiteMxDir, True)
else:
abort('no primary suite found')
opts, commandAndArgs = _argParser._parse_cmd_line()
assert _opts == opts
global _java_homes
defaultJdk = JavaConfig(opts.java_home, opts.java_dbg_port)
_java_homes = [defaultJdk]
if opts.extra_java_homes:
for java_home in opts.extra_java_homes.split(os.pathsep):
extraJdk = JavaConfig(java_home, opts.java_dbg_port)
if extraJdk > defaultJdk:
abort('Secondary JDK ' + extraJdk.jdk + ' has higher compliance level than default JDK ' + defaultJdk.jdk)
_java_homes.append(extraJdk)
for s in suites():
s._post_init(opts)
if len(commandAndArgs) == 0:
_argParser.print_help()
return
command = commandAndArgs[0]
command_args = commandAndArgs[1:]
if not _commands.has_key(command):
hits = [c for c in _commands.iterkeys() if c.startswith(command)]
if len(hits) == 1:
command = hits[0]
elif len(hits) == 0:
abort('mx: unknown command \'{0}\'\n{1}use "mx help" for more options'.format(command, _format_commands()))
else:
abort('mx: command \'{0}\' is ambiguous\n {1}'.format(command, ' '.join(hits)))
c, _ = _commands[command][:2]
def term_handler(signum, frame):
abort(1)
if not is_jython():
signal.signal(signal.SIGTERM, term_handler)
def quit_handler(signum, frame):
_send_sigquit()
if not is_jython() and get_os() != 'windows':
signal.signal(signal.SIGQUIT, quit_handler)
try:
if opts.timeout != 0:
def alarm_handler(signum, frame):
abort('Command timed out after ' + str(opts.timeout) + ' seconds: ' + ' '.join(commandAndArgs))
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(opts.timeout)
retcode = c(command_args)
if retcode is not None and retcode != 0:
abort(retcode)
except KeyboardInterrupt:
# no need to show the stack trace when the user presses CTRL-C
abort(1)
version = VersionSpec("1.0")
currentUmask = None
if __name__ == '__main__':
# rename this module as 'mx' so it is not imported twice by the commands.py modules
sys.modules['mx'] = sys.modules.pop('__main__')
# Capture the current umask since there's no way to query it without mutating it.
currentUmask = os.umask(0)
os.umask(currentUmask)
main()
|
BunnyWei/truffle-llvmir
|
mxtool/mx.py
|
Python
|
gpl-2.0
| 222,574
|
[
"VisIt"
] |
e3b506a219b60fb99ff244d98ad3c4f2463d03882de0b4429497a58e6cc6ced2
|
# -*- coding: utf-8 -*-
#
# lin_rate_ipn_network.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Network of linear rate neurons
-----------------------------------
This script simulates an excitatory and an inhibitory population
of lin_rate_ipn neurons with delayed excitatory and instantaneous
inhibitory connections. The rate of all neurons is recorded using
a multimeter. The resulting rate for one excitatory and one
inhibitory neuron is plotted.
References
~~~~~~~~~~~
See Also
~~~~~~~~~~
:Authors:
KEYWORDS:
"""
import nest
import pylab
import numpy
###############################################################################
# Assigning the simulation parameters to variables.
dt = 0.1 # the resolution in ms
T = 100.0 # Simulation time in ms
###############################################################################
# Definition of the number of neurons
order = 50
NE = int(4 * order) # number of excitatory neurons
NI = int(1 * order) # number of inhibitory neurons
N = int(NE+NI) # total number of neurons
###############################################################################
# Definition of the connections
d_e = 5. # delay of excitatory connections in ms
g = 5.0 # ratio inhibitory weight/excitatory weight
epsilon = 0.1 # connection probability
w = 0.1/numpy.sqrt(N) # excitatory connection strength
KE = int(epsilon * NE) # number of excitatory synapses per neuron (outdegree)
KI = int(epsilon * NI) # number of inhibitory synapses per neuron (outdegree)
K_tot = int(KI + KE) # total number of synapses per neuron
connection_rule = 'fixed_outdegree' # connection rule
###############################################################################
# Definition of the neuron model and its neuron parameters
neuron_model = 'lin_rate_ipn' # neuron model
neuron_params = {'linear_summation': True,
# type of non-linearity (not affecting linear rate models)
'tau': 10.0,
# time constant of neuronal dynamics in ms
'mu': 2.0,
# mean input
'sigma': 5.
# noise parameter
}
###############################################################################
# Configuration of the simulation kernel by the previously defined time
# resolution used in the simulation. Setting "print_time" to True prints
# the already processed simulation time as well as its percentage of the
# total simulation time.
nest.ResetKernel()
nest.SetKernelStatus({"resolution": dt, "use_wfr": False,
"print_time": True,
"overwrite_files": True})
print("Building network")
###############################################################################
# Configuration of the neuron model using SetDefaults().
nest.SetDefaults(neuron_model, neuron_params)
###############################################################################
# Creation of the nodes using `Create`.
n_e = nest.Create(neuron_model, NE)
n_i = nest.Create(neuron_model, NI)
###############################################################################
# To record from the rate neurons a multimeter is created and the parameter
# `record_from` is set to `'rate'` as well as the recording interval to `dt`
mm = nest.Create('multimeter', params={'record_from': ['rate'],
'interval': dt})
###############################################################################
# Specify synapse and connection dictionaries:
# Connections originating from excitatory neurons are associatated
# with a delay d (rate_connection_delayed).
# Connections originating from inhibitory neurons are not associatated
# with a delay (rate_connection_instantaneous).
syn_e = {'weight': w, 'delay': d_e, 'model': 'rate_connection_delayed'}
syn_i = {'weight': -g*w, 'model': 'rate_connection_instantaneous'}
conn_e = {'rule': connection_rule, 'outdegree': KE}
conn_i = {'rule': connection_rule, 'outdegree': KI}
###############################################################################
# Connect rate units
nest.Connect(n_e, n_e, conn_e, syn_e)
nest.Connect(n_i, n_i, conn_i, syn_i)
nest.Connect(n_e, n_i, conn_i, syn_e)
nest.Connect(n_i, n_e, conn_e, syn_i)
###############################################################################
# Connect recording device to rate units
nest.Connect(mm, n_e+n_i)
###############################################################################
# Simulate the network
nest.Simulate(T)
###############################################################################
# Plot rates of one excitatory and one inhibitory neuron
data = nest.GetStatus(mm)[0]['events']
rate_ex = data['rate'][numpy.where(data['senders'] == n_e[0])]
rate_in = data['rate'][numpy.where(data['senders'] == n_i[0])]
times = data['times'][numpy.where(data['senders'] == n_e[0])]
pylab.figure()
pylab.plot(times, rate_ex, label='excitatory')
pylab.plot(times, rate_in, label='inhibitory')
pylab.xlabel('time (ms)')
pylab.ylabel('rate (a.u.)')
pylab.show()
|
terhorstd/nest-simulator
|
pynest/examples/lin_rate_ipn_network.py
|
Python
|
gpl-2.0
| 5,743
|
[
"NEURON"
] |
0943a3ef0d190d28589850a124cc4f97647b6a95685bc93cbf2fb15f14018c7f
|
# import to process args
import sys
import os
import math
import json
try:
import argparse
except ImportError:
# since Python 2.6 and earlier don't have argparse, we simply provide
# the source for the same as _argparse and we use it instead.
import _argparse as argparse
# import annotations
from autobahn.wamp import exportRpc
# import paraview modules.
from paraview import simple, servermanager
from paraview.web import protocols, wamp
from vtk.web import server
# Setup global variables
timesteps = []
currentTimeIndex = 0
view = None
dataPath = None
authKey = None
def initView(width, height):
global view
view = simple.GetRenderView()
simple.Render()
view.ViewSize = [width, height]
view.Background = [1, 1, 1]
view.OrientationAxesLabelColor = [0, 0, 0]
print 'View created successfully (%dx%d)' % (width, height)
# This class defines the exposed RPC methods for the midas application
class MidasApp(wamp.PVServerProtocol):
DISTANCE_FACTOR = 2.0
CONTOUR_LINE_WIDTH = 2.0
colorArrayName = None
sof = None
lookupTable = None
srcObj = None
bounds = None
extent = None
center = None
centerExtent = None
rep = None
scalarRange = None
imageData = None
subgrid = None
sliceMode = None
meshSlice = None
sphere = None
surfaces = []
def initialize(self):
global authKey
# Bring used components
self.registerVtkWebProtocol(protocols.ParaViewWebMouseHandler())
self.registerVtkWebProtocol(protocols.ParaViewWebViewPort())
self.registerVtkWebProtocol(protocols.ParaViewWebViewPortImageDelivery())
self.registerVtkWebProtocol(protocols.ParaViewWebViewPortGeometryDelivery())
# Update authentication key to use
self.updateSecret(authKey)
def _extractVolumeImageData(self):
if(self.srcObj.GetPointDataInformation().GetNumberOfArrays() == 0):
print 'Error: no data information arrays'
raise Exception('No data information arrays')
self.imageData = self.srcObj.GetPointDataInformation().GetArray(0)
self.colorArrayName = self.imageData.Name
self.scalarRange = self.imageData.GetRange()
self.bounds = self.srcObj.GetDataInformation().DataInformation.GetBounds()
self.extent = self.srcObj.GetDataInformation().DataInformation.GetExtent()
self.center = [(self.bounds[1] + self.bounds[0]) / 2.0,
(self.bounds[3] + self.bounds[2]) / 2.0,
(self.bounds[5] + self.bounds[4]) / 2.0]
self.centerExtent = [(self.extent[1] + self.extent[0]) / 2.0,
(self.extent[3] + self.extent[2]) / 2.0,
(self.extent[5] + self.extent[4]) / 2.0]
def _loadSurfaceWithProperties(self, fullpath):
if not fullpath.endswith('.properties'):
surfaceObj = simple.OpenDataFile(fullpath)
self.surfaces.append(surfaceObj)
rep = simple.Show()
rep.Representation = 'Surface'
# If a corresponding properties file exists, load it in
# and apply the properties to the surface
if os.path.isfile(fullpath+'.properties'):
with open(fullpath+'.properties') as f:
lines = f.readlines()
for line in lines:
(property, value) = line.split(' ', 1)
if hasattr(rep, property):
value = json.loads(value.strip())
setattr(rep, property, value)
else:
print 'Skipping invalid property %s' % property
print 'Loaded surface %s into scene' % fullpath
def _sliceSurfaces(self, slice):
if self.meshSlice is not None:
simple.Delete(self.meshSlice)
self.meshSlice = None
for surface in self.surfaces:
rep = simple.Show(surface)
if self.sliceMode == 'XY Plane':
origin = [0.0, 0.0, math.cos(math.radians(rep.Orientation[2]))*slice]
normal = [0.0, 0.0, 1.0]
elif self.sliceMode == 'XZ Plane':
origin = [0.0, math.cos(math.radians(rep.Orientation[1]))*slice, 0.0]
normal = [0.0, 1.0, 0.0]
else:
origin = [math.cos(math.radians(rep.Orientation[0]))*slice, 0.0, 0.0]
normal = [1.0, 0.0, 0.0]
simple.Hide(surface)
self.meshSlice = simple.Slice(Input=surface, SliceType='Plane')
simple.SetActiveSource(self.srcObj)
self.meshSlice.SliceOffsetValues = [0.0]
self.meshSlice.SliceType = 'Plane'
self.meshSlice.SliceType.Origin = origin
self.meshSlice.SliceType.Normal = normal
meshDataRep = simple.Show(self.meshSlice)
meshDataRep.Representation = 'Points'
meshDataRep.LineWidth = self.CONTOUR_LINE_WIDTH
meshDataRep.PointSize = self.CONTOUR_LINE_WIDTH
meshDataRep.AmbientColor = rep.DiffuseColor
meshDataRep.Orientation = rep.Orientation
simple.SetActiveSource(self.srcObj)
@exportRpc("loadData")
def loadData(self):
global dataPath
mainpath = os.path.join(dataPath, "main")
if os.path.isdir(mainpath):
files = os.listdir(mainpath)
for file in files:
fullpath = os.path.join(mainpath, file)
if os.path.isfile(fullpath):
self.srcObj = simple.OpenDataFile(fullpath)
simple.SetActiveSource(self.srcObj)
self.rep = simple.GetDisplayProperties()
simple.Hide()
print 'Loaded %s into scene' % fullpath
else:
print 'Error: '+mainpath+' does not exist\n'
raise Exception("The main directory does not exist")
surfacespath = os.path.join(dataPath, "surfaces")
files = os.listdir(surfacespath)
for file in files:
fullpath = os.path.join(surfacespath, file)
if os.path.isfile(fullpath):
self._loadSurfaceWithProperties(fullpath)
simple.SetActiveSource(self.srcObj)
simple.ResetCamera()
simple.Render()
@exportRpc("showSphere")
def showSphere(self, params):
if self.sphere is not None:
simple.Delete(self.sphere)
maxDim = max(self.bounds[1] - self.bounds[0],
self.bounds[3] - self.bounds[2],
self.bounds[5] - self.bounds[4])
self.sphere = simple.Sphere()
self.sphere.Radius = maxDim / 100.0
self.sphere.Center = params['point']
rep = simple.Show()
rep.Representation = 'Surface'
rep.DiffuseColor = params['color']
simple.SetActiveSource(self.srcObj)
@exportRpc("cameraPreset")
def cameraPreset(self, direction):
global view
(midx, midy, midz) = (self.center[0], self.center[1], self.center[2])
(lenx, leny, lenz) = (self.bounds[1] - self.bounds[0],
self.bounds[3] - self.bounds[2],
self.bounds[5] - self.bounds[4])
maxDim = max(lenx, leny, lenz)
view.CameraFocalPoint = self.center
view.CenterOfRotation = self.center
view.CameraViewUp = [0, 0, 1]
if(direction == '+x'):
view.CameraPosition = [midx - self.DISTANCE_FACTOR * maxDim, midy, midz]
elif(direction == '-x'):
view.CameraPosition = [midx + self.DISTANCE_FACTOR * maxDim, midy, midz]
elif(direction == '+y'):
view.CameraPosition = [midx, midy - self.DISTANCE_FACTOR * maxDim, midz]
elif(direction == '-y'):
view.CameraPosition = [midx, midy + self.DISTANCE_FACTOR * maxDim, midz]
elif(direction == '+z'):
view.CameraPosition = [midx, midy, midz - self.DISTANCE_FACTOR * maxDim]
view.CameraViewUp = [0, 1, 0]
elif(direction == '-z'):
view.CameraPosition = [midx, midy, midz + self.DISTANCE_FACTOR * maxDim]
view.CameraViewUp = [0, 1, 0]
else:
print "Invalid preset direction: %s" % direction
simple.Render()
@exportRpc("setSliceMode")
def setSliceMode(self, sliceMode):
global view
if type(sliceMode) is unicode:
sliceMode = sliceMode.encode('ascii', 'ignore')
if(sliceMode == 'XY Plane'):
sliceNum = int(math.floor(self.centerExtent[2]))
cameraParallelScale = max(self.bounds[1] - self.bounds[0],
self.bounds[3] - self.bounds[2]) / 2.0
cameraPosition = [self.center[0], self.center[1], self.bounds[4] - 10]
maxSlices = self.extent[5] - self.extent[4]
cameraViewUp = [0, -1, 0]
elif(sliceMode == 'XZ Plane'):
sliceNum = int(math.floor(self.centerExtent[1]))
cameraParallelScale = max(self.bounds[1] - self.bounds[0],
self.bounds[5] - self.bounds[4]) / 2.0
maxSlices = self.extent[3] - self.extent[2]
cameraPosition = [self.center[0], self.bounds[3] + 10, self.center[2]]
cameraViewUp = [0, 0, 1]
elif(sliceMode == 'YZ Plane'):
sliceNum = int(math.floor(self.centerExtent[0]))
cameraParallelScale = max(self.bounds[3] - self.bounds[2],
self.bounds[5] - self.bounds[4]) / 2.0
maxSlices = self.extent[1] - self.extent[0]
cameraPosition = [self.bounds[1] + 10, self.center[1], self.center[2]]
cameraViewUp = [0, 0, 1]
else:
print 'Error: invalid slice mode %s' % sliceMode
raise Exception('Error: invalid slice mode %s' % sliceMode)
view.CameraParallelScale = cameraParallelScale
view.CameraViewUp = cameraViewUp
view.CameraPosition = cameraPosition
self.rep.Slice = sliceNum
self.rep.SliceMode = sliceMode
self.sliceMode = sliceMode
# TODO calculate slice plane origin for surfaces!!!
self._sliceSurfaces(sliceNum)
simple.Render()
return {'slice': sliceNum,
'maxSlices': maxSlices,
'cameraParallelScale': cameraParallelScale}
@exportRpc("changeSlice")
def changeSlice(self, sliceNum):
self.rep.Slice = sliceNum
self._sliceSurfaces(sliceNum)
simple.Render()
@exportRpc("changeWindow")
def changeWindow(self, points):
self.lookupTable.RGBPoints = points
simple.Render()
@exportRpc("changeBgColor")
def changeBgColor(self, rgb):
global view
view.Background = rgb
if (sum(rgb) / 3.0) > 0.5:
view.OrientationAxesLabelColor = [0, 0, 0]
else:
view.OrientationAxesLabelColor = [1, 1, 1]
simple.Render()
@exportRpc("surfaceRender")
def surfaceRender(self):
self.bounds = self.srcObj.GetDataInformation().DataInformation.GetBounds()
self.center = [(self.bounds[1] + self.bounds[0]) / 2.0,
(self.bounds[3] + self.bounds[2]) / 2.0,
(self.bounds[5] + self.bounds[4]) / 2.0]
self.cameraPreset('+x')
self.rep.Representation = 'Surface'
nbPoints = self.srcObj.GetDataInformation().GetNumberOfPoints()
nbCells = self.srcObj.GetDataInformation().GetNumberOfCells()
simple.Show()
simple.Render()
return {'bounds': self.bounds,
'nbPoints': nbPoints,
'nbCells': nbCells}
@exportRpc("toggleEdges")
def toggleEdges(self):
if self.rep.Representation == 'Surface':
self.rep.Representation = 'Surface With Edges'
else:
self.rep.Representation = 'Surface'
simple.Render()
@exportRpc("sliceRender")
def sliceRender(self, sliceMode):
global view
self._extractVolumeImageData()
(midx, midy, midz) = (self.center[0], self.center[1], self.center[2])
(lenx, leny, lenz) = (self.bounds[1] - self.bounds[0],
self.bounds[3] - self.bounds[2],
self.bounds[5] - self.bounds[4])
maxDim = max(lenx, leny, lenz)
# Adjust camera properties appropriately
view.Background = [0, 0, 0]
view.CameraFocalPoint = self.center
view.CenterOfRotation = self.center
view.CenterAxesVisibility = False
view.OrientationAxesVisibility = False
view.CameraParallelProjection = True
# Configure data representation
rgbPoints = [self.scalarRange[0], 0, 0, 0, self.scalarRange[1], 1, 1, 1]
self.lookupTable = simple.GetLookupTableForArray(self.colorArrayName, 1)
self.lookupTable.RGBPoints = rgbPoints
self.lookupTable.ScalarRangeInitialized = 1.0
self.lookupTable.ColorSpace = 0 # 0 corresponds to RGB
self.rep.ColorArrayName = self.colorArrayName
self.rep.Representation = 'Slice'
self.rep.LookupTable = self.lookupTable
sliceInfo = self.setSliceMode(sliceMode)
simple.Show()
simple.Render()
return {'scalarRange': self.scalarRange,
'bounds': self.bounds,
'extent': self.extent,
'center': self.center,
'sliceInfo': sliceInfo}
@exportRpc("volumeRender")
def volumeRender(self):
global view
self._extractVolumeImageData()
(lenx, leny, lenz) = (self.bounds[1] - self.bounds[0],
self.bounds[3] - self.bounds[2],
self.bounds[5] - self.bounds[4])
(midx, midy, midz) = (self.center[0], self.center[1], self.center[2])
maxDim = max(lenx, leny, lenz)
# Adjust camera properties appropriately
view.CameraFocalPoint = self.center
view.CenterOfRotation = self.center
view.CameraPosition = [midx - self.DISTANCE_FACTOR * maxDim, midy, midz]
view.CameraViewUp = [0, 0, 1]
# Create RGB transfer function
rgbPoints = [self.scalarRange[0], 0, 0, 0, self.scalarRange[1], 1, 1, 1]
self.lookupTable = simple.GetLookupTableForArray(self.colorArrayName, 1)
self.lookupTable.RGBPoints = rgbPoints
self.lookupTable.ScalarRangeInitialized = 1.0
self.lookupTable.ColorSpace = 0 # 0 corresponds to RGB
# Create opacity transfer function
sofPoints = [self.scalarRange[0], 0, 0.5, 0,
self.scalarRange[1], 1, 0.5, 0]
self.sof = simple.CreatePiecewiseFunction()
self.sof.Points = sofPoints
self.rep.ColorArrayName = self.colorArrayName
self.rep.Representation = 'Volume'
self.rep.ScalarOpacityFunction = self.sof
self.rep.LookupTable = self.lookupTable
simple.Show()
simple.Render()
return {'scalarRange': self.scalarRange,
'bounds': self.bounds,
'extent': self.extent,
'sofPoints': sofPoints,
'rgbPoints': rgbPoints}
@exportRpc("updateSof")
def updateSof(self, sofPoints):
self.sof = simple.CreatePiecewiseFunction()
self.sof.Points = sofPoints
self.rep.ScalarOpacityFunction = self.sof
simple.Render()
@exportRpc("updateColorMap")
def updateColorMap(self, rgbPoints):
self.lookupTable = simple.GetLookupTableForArray(self.colorArrayName, 1)
self.lookupTable.RGBPoints = rgbPoints
self.rep.LookupTable = self.lookupTable
@exportRpc("extractSubgrid")
def extractSubgrid(self, bounds):
if(self.subgrid is not None):
simple.Delete(self.subgrid)
simple.SetActiveSource(self.srcObj)
self.subgrid = simple.ExtractSubset()
self.subgrid.VOI = bounds
simple.SetActiveSource(self.subgrid)
self.rep = simple.Show()
self.rep.ScalarOpacityFunction = self.sof
self.rep.ColorArrayName = self.colorArrayName
self.rep.Representation = 'Volume'
self.rep.SelectionPointFieldDataArrayName = self.colorArrayName
self.rep.LookupTable = self.lookupTable
simple.Hide(self.srcObj)
simple.SetActiveSource(self.subgrid)
simple.Render()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Midas+ParaViewWeb application")
server.add_arguments(parser)
parser.add_argument("--data-dir", default=os.getcwd(),
help="path to data directory", dest="path")
parser.add_argument("--width", default=575,
help="width of the render window", dest="width")
parser.add_argument("--height", default=575,
help="height of the render window", dest="height")
args = parser.parse_args()
dataPath = args.path
authKey = args.authKey
width = args.width
height = args.height
initView(width, height)
server.start_webserver(options=args, protocol=MidasApp)
|
jcfr/Midas
|
modules/pvw/apps/midas.py
|
Python
|
apache-2.0
| 15,740
|
[
"ParaView",
"VTK"
] |
4050684293dce10e65c53eefdab626a9fc022d96ccd2aff8b673f24f63d01e9c
|
from owmeta_core.command import OWM
def save_schema():
owm = OWM()
for module in ('owmeta.neuron',
'owmeta.worm',
'owmeta.biology',
'owmeta.cell',
'owmeta.channel',
'owmeta.channelworm',
'owmeta.connection',
'owmeta.document',
'owmeta.evidence',
'owmeta.experiment',
'owmeta.muscle',
'owmeta.network',
'owmeta.plot',
'owmeta.website',
'owmeta.data_trans.bibtex',
'owmeta.data_trans.connections',
'owmeta.data_trans.context_merge',
'owmeta.data_trans.data_with_evidence_ds',
'owmeta.data_trans.neuron_data',
'owmeta.data_trans.wormatlas',
'owmeta.data_trans.wormbase',
'owmeta.sources',
'owmeta.translators',
):
owm.save(module)
if __name__ == '__main__':
save_schema()
|
openworm/PyOpenWorm
|
save_schema.py
|
Python
|
mit
| 1,131
|
[
"NEURON"
] |
efb5b60dd8da26578df55919e0bec8c9708af7ce942f8292a3384ec8751018e5
|
"""Test input validation functionality."""
import pytest
from qmflows import cp2k, run
from qmflows.type_hints import PathLike
from scm import plams
from nanoqm.common import read_cell_parameters_as_array
from nanoqm.workflows.input_validation import process_input
from .utilsTest import PATH_TEST, cp2k_available, remove_files
def test_input_validation() -> None:
"""Test the templates and keywords completion."""
path_input = PATH_TEST / "input_test_pbe0.yml"
dict_input = process_input(path_input, "derivative_couplings")
sett = dict_input['cp2k_general_settings']['cp2k_settings_guess']
scale_x = sett.specific.cp2k.force_eval.dft.xc.xc_functional.pbe.scale_x
assert abs(scale_x - 0.75) < 1e-16
@pytest.mark.skipif(
not cp2k_available(), reason="CP2K is not install or not loaded")
def test_call_cp2k_pbe() -> None:
"""Check if the input for a PBE cp2k job is valid."""
try:
results = run_plams(PATH_TEST / "input_test_pbe.yml")
assert (results is not None)
finally:
remove_files()
def run_plams(path_input: PathLike) -> float:
"""Call Plams to run a CP2K job."""
# create settings
dict_input = process_input(path_input, "derivative_couplings")
sett = dict_input['cp2k_general_settings']['cp2k_settings_guess']
# adjust the cell parameters
file_cell_parameters = dict_input['cp2k_general_settings'].get("file_cell_parameters")
if file_cell_parameters is not None:
array_cell_parameters = read_cell_parameters_as_array(file_cell_parameters)[1]
sett.cell_parameters = array_cell_parameters[0, 2:11].reshape(3, 3).tolist()
# Run the job
job = cp2k(sett, plams.Molecule(PATH_TEST / "C.xyz"))
return run(job.energy)
|
SCM-NV/qmworks-namd
|
test/test_input_validation.py
|
Python
|
mit
| 1,752
|
[
"CP2K"
] |
3844f1a33f5547f92da5f46852b04e0b41932dcc12a7af08c703bd4ae88ca420
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Measure the excess chemical potential of a charged WCA fluid via Widom's
insertion method.
"""
import numpy as np
import argparse
import espressomd
from espressomd import reaction_ensemble
from espressomd import electrostatics
required_features = ["WCA", "P3M"]
espressomd.assert_features(required_features)
parser = argparse.ArgumentParser(epilog=__doc__)
parser.add_argument('cs_bulk', type=float,
help="bulk salt concentration [1/sigma^3]")
args = parser.parse_args()
# System parameters
#############################################################
cs_bulk = args.cs_bulk
N0 = 70
box_l = (N0 / cs_bulk)**(1.0 / 3.0)
# Integration parameters
#############################################################
system = espressomd.System(box_l=[box_l, box_l, box_l])
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
np.random.seed(seed=system.seed)
system.time_step = 0.01
system.cell_system.skin = 0.4
temperature = 1.0
#############################################################
# Setup System #
#############################################################
# Particle setup
#############################################################
# type 0 = HA
# type 1 = A-
# type 2 = H+
for i in range(N0):
system.part.add(id=i, pos=np.random.random(3) * system.box_l, type=1, q=-1)
for i in range(N0, 2 * N0):
system.part.add(id=i, pos=np.random.random(3) * system.box_l, type=2, q=1)
wca_eps = 1.0
wca_sig = 1.0
types = [0, 1, 2]
for type_1 in types:
for type_2 in types:
system.non_bonded_inter[type_1, type_2].wca.set_params(
epsilon=wca_eps, sigma=wca_sig)
p3m = electrostatics.P3M(prefactor=2.0, accuracy=1e-3)
system.actors.add(p3m)
p3m_params = p3m.get_params()
for key, value in p3m_params.items():
print("{} = {}".format(key, value))
# Warmup
#############################################################
# warmup integration (steepest descent)
warm_steps = 20
warm_n_times = 20
min_dist = 0.9 * wca_sig
# minimize energy using min_dist as the convergence criterion
system.integrator.set_steepest_descent(f_max=0, gamma=1e-3,
max_displacement=0.01)
i = 0
while system.analysis.min_dist() < min_dist and i < warm_n_times:
print("minimization: {:+.2e}".format(system.analysis.energy()["total"]))
system.integrator.run(warm_steps)
i += 1
print("minimization: {:+.2e}".format(system.analysis.energy()["total"]))
print()
system.integrator.set_vv()
# activate thermostat
system.thermostat.set_langevin(kT=temperature, gamma=1.0, seed=42)
widom = reaction_ensemble.WidomInsertion(
temperature=temperature, seed=77)
# add insertion reaction
insertion_reaction_id = 0
widom.add_reaction(reactant_types=[],
reactant_coefficients=[], product_types=[1, 2],
product_coefficients=[1, 1], default_charges={1: -1, 2: +1})
print(widom.get_status())
system.setup_type_map([0, 1, 2])
n_iterations = 100
for i in range(n_iterations):
for j in range(50):
widom.measure_excess_chemical_potential(insertion_reaction_id)
system.integrator.run(steps=500)
if i % 20 == 0:
print("mu_ex_pair ({:.4f}, +/- {:.4f})".format(
*widom.measure_excess_chemical_potential(insertion_reaction_id)))
print("HA", system.number_of_particles(type=0), "A-",
system.number_of_particles(type=1), "H+",
system.number_of_particles(type=2))
print("excess chemical potential for an ion pair ",
widom.measure_excess_chemical_potential(insertion_reaction_id))
|
psci2195/espresso-ffans
|
samples/widom_insertion.py
|
Python
|
gpl-3.0
| 4,371
|
[
"ESPResSo"
] |
0a9320ab19594a01cd0b42b43df2b9364b412c74b59bb292fbb1cd7bfebb2909
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# annToMask - Convert segmentation in an annotation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>annToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import copy
import itertools
from pycocotools import mask as maskUtils
import os
from collections import defaultdict
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
from urllib import urlretrieve
elif PYTHON_VERSION == 3:
from urllib.request import urlretrieve
def _isArrayLike(obj):
return hasattr(obj, '__iter__') and hasattr(obj, '__len__')
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if _isArrayLike(catNms) else [catNms]
supNms = supNms if _isArrayLike(supNms) else [supNms]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if _isArrayLike(ids):
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if _isArrayLike(ids):
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if _isArrayLike(ids):
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception('datasetType not supported')
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = maskUtils.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print(ann['caption'])
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print('Loading and preparing results...')
tic = time.time()
if type(resFile) == str: #or type(resFile) == unicode:
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if not 'bbox' in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
res.createIndex()
return res
def download(self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urlretrieve(img['coco_url'], fname)
print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print('Converting ndarray to lists...')
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print('{}/{}'.format(i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, h, w)
rle = maskUtils.merge(rles)
elif type(segm['counts']) == list:
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, h, w)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann)
m = maskUtils.decode(rle)
return m
|
mlperf/training_results_v0.5
|
v0.5.0/nvidia/submission/code/single_stage_detector/pytorch/coco.py
|
Python
|
apache-2.0
| 19,010
|
[
"VisIt"
] |
744f8d3a3e4ffabaac7029b22c0aff68e70f8f64a5e308a7d97173b98c9ef8b5
|
#GLM2 bench
import os, sys, time, csv, re, requests, string
sys.path.append('../py/')
sys.path.extend(['.','..'])
import h2o_cmd, h2o, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_rf, h2o_jobs
from pprint import pprint
csv_header = ('h2o_build','nMachines','nJVMs','Xmx/JVM','dataset','nTrainRows','nTestRows','nCols','nPredictors','trainParseWallTime','nfolds','family','glm2BuildTime','testParseWallTime','nIterations','AUC','AIC','AverageError')
files = {'Airlines' : {'train': ('AirlinesTrain1x', 'AirlinesTrain10x', 'AirlinesTrain100x'), 'test' : 'AirlinesTest'},
'AllBedrooms' : {'train': ('AllBedroomsTrain1x', 'AllBedroomsTrain10x', 'AllBedroomsTrain100x'), 'test' : 'AllBedroomsTest'},
}
build = ""
debug = False
json = ""
def doGLM2(f, folderPath, family, lambda_, alpha, nfolds, y, x, testFilehex, row, case_mode, case_val):
debug = False
bench = "bench"
if debug:
print "DOING GLM2 DEBUG"
bench = "bench/debug"
date = '-'.join([str(z) for z in list(time.localtime())][0:3])
overallWallStart = time.time()
pre = ""
if debug: pre = "DEBUG"
glm2benchcsv = 'benchmarks/'+build+'/'+pre+'glm2bench.csv'
if not os.path.exists(glm2benchcsv):
output = open(glm2benchcsv,'w')
output.write(','.join(csv_header)+'\n')
else:
output = open(glm2benchcsv,'a')
csvWrt = csv.DictWriter(output, fieldnames=csv_header, restval=None,
dialect='excel', extrasaction='ignore',delimiter=',')
try:
java_heap_GB = h2o.nodes[0].java_heap_GB
importFolderPath = bench+"/" + folderPath
if (f in ['AirlinesTrain1x','AllBedroomsTrain1x', 'AllBedroomsTrain10x', 'AllBedroomsTrain100x']):
csvPathname = importFolderPath + "/" + f + '.csv'
else:
#print "Not doing Airlines10x and 100x for Parse2, regex seems to be broken..."
#continue
csvPathname = importFolderPath + "/" + f + "/*"
hex_key = f + '.hex'
hK = folderPath + "Header.csv"
headerPathname = importFolderPath + "/" + hK
h2i.import_only(bucket='home-0xdiag-datasets', path=headerPathname)
headerKey = h2i.find_key(hK)
trainParseWallStart = time.time()
if f in (['AirlinesTrain10x', 'AirlinesTrain100x']): h2o.beta_features = False #regex parsing acting weird when not using browser, use VA -> FVEC converter
parseResult = h2i.import_parse(bucket = 'home-0xdiag-datasets',
path = csvPathname,
schema = 'local',
hex_key = hex_key,
header = 1,
header_from_file = headerKey,
separator = 44,
timeoutSecs = 7200,
retryDelaySecs = 5,
pollTimeoutSecs = 7200,
noPoll = True,
doSummary = False
)
h2o_jobs.pollWaitJobs(timeoutSecs=7200, pollTimeoutSecs=7200, retryDelaySecs=5)
parseResult = {'destination_key':hex_key}
parseWallTime = time.time() - trainParseWallStart
print "Parsing training file took ", parseWallTime ," seconds."
h2o.beta_features = True
inspect_train = h2o.nodes[0].inspect(hex_key, timeoutSecs=7200)
inspect_test = h2o.nodes[0].inspect(testFilehex, timeoutSecs=7200)
nMachines = 1 if len(h2o_hosts.hosts) is 0 else len(h2o_hosts.hosts)
row.update( {'h2o_build' : build,
'nMachines' : nMachines,
'Xmx/JVM' : java_heap_GB,
'nJVMs' : len(h2o.nodes),
'dataset' : f,
'nTrainRows' : inspect_train['numRows'],
'nTestRows' : inspect_test['numRows'],
'nCols' : inspect_train['numCols'],
'trainParseWallTime' : parseWallTime,
'nfolds' : nfolds,
'family' : family,
})
params = {'vresponse' : y,
'ignored_cols' : x,
'family' : family,
'lambda' : lambda_,
'alpha' : alpha,
'n_folds' : nfolds,
#'case_mode' : case_mode,
#'case_val' : case_val,
'destination_key' : "GLM("+f+")",
}
h2o.beta_features = True
kwargs = params.copy()
glmStart = time.time()
glm = h2o_cmd.runGLM(parseResult = parseResult, timeoutSecs=1800, noPoll=True, **kwargs)
h2o_jobs.pollWaitJobs(timeoutSecs=7200, pollTimeoutSecs=7200, retryDelaySecs=5)
glmTime = time.time() - glmStart
cmd = 'bash startloggers.sh ' + json + ' stop_'
os.system(cmd)
#glm = h2o.nodes[0].inspect("GLM("+f+")")
row.update( {'glm2BuildTime' : glmTime,
#'AverageErrorOver10Folds' : glm['glm_model']['validations'][0]['err'],
})
#if "Bedrooms" in f:
#print "Sleeping 30"
#time.sleep(30)
glmView = h2o_cmd.runGLMView(modelKey = "GLM("+f+")", timeoutSecs=380)
#glmScoreStart = time.time()
#glmScore = h2o_cmd.runGLMScore(key=testFilehex,model_key=params['destination_key'])
#scoreTime = time.time() - glmScoreStart
row.update( {'AIC' : glmView['glm_model']['validation']['aic'],
'nIterations' : glmView['glm_model']['iteration'],
'nPredictors' : len(glmView['glm_model']['beta']),
#'AverageError' : glmView['glm_model']['validation']['avg_err'],
})
if family == "binomial":
#Scrape html of 2/glmmodelview to get best threshold,
#then, multiply by 100 and cast to int...
#then ask for the coresponding CM from _cms inside glmView
url = 'http://%s:%d/2/GLMModelView.html?_modelKey=%s' % (h2o.nodes[0].http_addr, 55555, 'GLM('+f+')')
r = requests.get(url).text
p1 = re.compile('threshold[:<>/a-z]*[0-9]\.[0-9]*')
p2 = re.compile('[0-9]\.[0-9]*')
best = int(float(p2.search(p1.search(r).group()).group()) * 100)
best_cm = glmView['glm_model']['validation']['_cms'][best]['_arr']
avg_err = 1.0*(best_cm[0][1] + best_cm[1][0] + 0.0) / (sum([i for sublist in best_cm for i in sublist]))
row.update( {#'scoreTime' : scoreTime,
'AUC' : glmView['glm_model']['validation']['auc'],
'AverageError' : avg_err,
})
else:
row.update( {#'scoreTime' : scoreTime,
'AUC' : 'NA',
'AverageError' : glmView['glm_model']['validation']['avg_err'],
})
csvWrt.writerow(row)
finally:
output.close()
if __name__ == '__main__':
dat = sys.argv.pop(-1)
debug = sys.argv.pop(-1)
build = sys.argv.pop(-1)
json = sys.argv[-1].split('/')[-1]
h2o.parse_our_args()
h2o_hosts.build_cloud_with_hosts()
fp = 'Airlines' if 'Air' in dat else 'AllBedrooms'
h2o.beta_features = True
if dat == 'Air1x' : fs = files['Airlines']['train'][0]
if dat == 'Air10x' : fs = files['Airlines']['train'][1]
if dat == 'Air100x' : fs = files['Airlines']['train'][2]
if dat == 'AllB1x' : fs = files['AllBedrooms']['train'][0]
if dat == 'AllB10x' : fs = files['AllBedrooms']['train'][1]
if dat == 'AllB100x' : fs = files['AllBedrooms']['train'][2]
debug = False
bench = "bench"
if debug:
bench = "bench/debug"
if fp == "Airlines":
#AIRLINES
airlinesTestParseStart = time.time()
hK = "AirlinesHeader.csv"
headerPathname = bench+"/Airlines" + "/" + hK
h2i.import_only(bucket = 'home-0xdiag-datasets', path=headerPathname)
headerKey = h2i.find_key(hK)
testFile = h2i.import_parse(bucket='home-0xdiag-datasets', path=bench+'/Airlines/AirlinesTest.csv', schema='local', hex_key="atest.hex", header=1, header_from_file=headerKey, separator=44, noPoll = True, doSummary = False)
h2o_jobs.pollWaitJobs(timeoutSecs=7200, pollTimeoutSecs=7200, retryDelaySecs=5)
elapsedAirlinesTestParse = time.time() - airlinesTestParseStart
row = {'testParseWallTime' : elapsedAirlinesTestParse}
x = None #"DepTime,ArrTime,FlightNum,TailNum,ActualElapsedTime,AirTime,ArrDelay,DepDelay,TaxiIn,TaxiOut,Cancelled,CancellationCode,Diverted,CarrierDelay,WeatherDelay,NASDelay,SecurityDelay,LateAircraftDelay,IsArrDelayed" #columns to be ignored
doGLM2(fs, fp,
family = 'binomial',
lambda_ = 1E-5,
alpha = 0.5,
nfolds = 10,
y = 'IsDepDelayed',
x = x,
testFilehex = 'atest.hex',
row = row,
case_mode = "%3D",
case_val = 1.0
)
if fp == "AllBedrooms":
#ALLBEDROOMS
allBedroomsTestParseStart = time.time()
hK = "AllBedroomsHeader.csv"
headerPathname = bench+"/AllBedrooms" + "/" + hK
h2i.import_only(bucket='home-0xdiag-datasets', path=headerPathname)
headerKey = h2i.find_key(hK)
testFile = h2i.import_parse(bucket='home-0xdiag-datasets', path=bench+'/AllBedrooms/AllBedroomsTest.csv', schema='local', hex_key="allBtest.hex", header=1, header_from_file=headerKey, separator=44, noPoll = True, doSummary = False)
h2o_jobs.pollWaitJobs(timeoutSecs=7200, pollTimeoutSecs=7200, retryDelaySecs=5)
elapsedAllBedroomsTestParse = time.time() - allBedroomsTestParseStart
row = {'testParseWallTime' : elapsedAllBedroomsTestParse}
x = "county,place,Rent_Type,mcd" #columns to be ignored
doGLM2(fs, fp,
family = 'gaussian',
lambda_ = 1E-4,
alpha = 0.75,
nfolds = 10,
y = 'medrent',
x = x,
testFilehex = 'allBtest.hex',
row = row,
case_mode = "n/a",
case_val = 0.0
)
h2o.tear_down_cloud()
|
janezhango/BigDataMachineLearning
|
bench/BMscripts/glm2Bench.py
|
Python
|
apache-2.0
| 11,359
|
[
"Gaussian"
] |
86b0d3f1934bf2d38f7b868fd6c0517587f53e179fb57f6bc992bc4b5110eb70
|
#pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
#pylint: enable=missing-docstring
import os
import logging
import pandas
import jinja2
from markdown.util import etree
from markdown.inlinepatterns import Pattern
import MooseDocs
from MooseDocs.common import nodes
from MooseMarkdownExtension import MooseMarkdownExtension
from MooseMarkdownCommon import MooseMarkdownCommon
LOG = logging.getLogger(__name__)
class GoogleChartExtension(MooseMarkdownExtension):
"""
Adds support for google charts.
"""
@staticmethod
def defaultConfig():
"""GoogleChartExtension configuration."""
config = MooseMarkdownExtension.defaultConfig()
return config
def extendMarkdown(self, md, md_globals):
"""
Adds eqref support for MOOSE flavored markdown.
"""
md.registerExtension(self)
config = self.getConfigs()
md.inlinePatterns.add('moose-line-chart',
LineChart(markdown_instance=md, **config),
'_begin')
md.inlinePatterns.add('moose-scatter-chart',
ScatterChart(markdown_instance=md, **config),
'_begin')
md.inlinePatterns.add('moose-diff-scatter-chart',
ScatterDiffChart(markdown_instance=md, **config),
'_begin')
def makeExtension(*args, **kwargs): #pylint: disable=invalid-name
"""Create GoogleChartExtension"""
return GoogleChartExtension(*args, **kwargs)
class GoogleChartBase(MooseMarkdownCommon, Pattern):
"""
Base class for !chart command.
"""
TEMPLATE = None
@staticmethod
def defaultSettings():
"""GoogleChartBase settings."""
settings = MooseMarkdownCommon.defaultSettings()
settings['caption'] = (None, "The caption to place after the float heading and number.")
settings['counter'] = ('figure', "The name of global counter to utilized for numbering.")
settings['csv'] = (None, "The name of the CSV file to load.")
return settings
def __init__(self, markdown_instance=None, **kwargs):
MooseMarkdownCommon.__init__(self, **kwargs)
regex = r'^!chart\s+(?P<template>{})(?:$|\s+)(?P<settings>.*)'.format(self.TEMPLATE)
Pattern.__init__(self, regex, markdown_instance)
self._csv = dict() # CSV DataFrame cache
self._count = 0
self._status = None
def setStatus(self, message, *args):
"""
Set the error status message, this should be used in the arguments() and globals() methods.
"""
self._status = message.format(*args)
def clearStatus(self):
"""
Remove any existing error status messages.
"""
self._status = None
def arguments(self, settings):
"""
Method for modifying the template arguments to be applied to the jinja2 templates engine.
By default all the "settings" from the class are returned as template arguments.
Args:
settings[dict]: The class object settings.
"""
if settings['csv'] is None:
if isinstance(self.markdown.current, nodes.FileNodeBase):
self.setStatus("The 'csv' setting is required in {}.",
self.markdown.current.filename)
else:
self.setStatus("The 'csv' setting is required.")
settings['data_frame'] = pandas.DataFrame()
else:
settings['data_frame'] = self._readCSV(os.path.join(MooseDocs.ROOT_DIR,
settings['csv']))
return settings
def globals(self, env):
"""
Defines global template functions. (virtual)
Args:
env[jinja2.Environment]: Template object for adding global functions.
"""
pass
def handleMatch(self, match):
"""
Creates chart from a chart template.
"""
# Extract settings and template
template = match.group('template') + '.js'
settings = self.getSettings(match.group('settings'), legacy_style=False)
# Create a float element
div = self.createFloatElement(settings)
# Create 'chart_id' for linking JS with <div>
settings['chart_id'] = 'moose-google-{}-chart-{}'.format(self.TEMPLATE, int(self._count))
self._count += 1
# Paths to Google Chart template
paths = [os.path.join(MooseDocs.MOOSE_DIR, 'docs', 'templates', 'gchart'),
os.path.join(os.getcwd(), 'templates', 'gchart')]
# Apply the arguments to the template
self.clearStatus()
env = jinja2.Environment(loader=jinja2.FileSystemLoader(paths))
self.globals(env)
template = env.get_template(template)
complete = template.render(**self.arguments(settings))
if self._status is not None:
return self.createErrorElement(self._status, title="Google Chart Creation Error",
error=False)
# Create the <script> tag
script = etree.SubElement(div, 'script')
script.set('type', 'text/javascript')
script.text = self.markdown.htmlStash.store(complete, safe=True)
# Add the <div> to be replaced with the chart
el = etree.Element('div')
el.set('id', settings['chart_id'])
div.insert(0, el)
return div
def _readCSV(self, filename):
"""
Read the CSV data into a pandas DataFrame.
"""
if self._csv.get(filename, None) is None:
try:
self._csv[filename] = pandas.read_csv(filename)
except IOError:
if isinstance(self.markdown.current, nodes.FileNodeBase):
self.setStatus("Failed to read CSV file '{}' in chart command of {}.",
filename, self.markdown.current.filename)
else:
self.setStatus("Failed to read CSV file '{}' in chart command.", filename)
return pandas.DataFrame()
return self._csv[filename]
class ColumnChartBase(GoogleChartBase):
"""
Base class for column based chart types (e.g., 'line', 'scatter').
"""
@staticmethod
def defaultSettings():
"""LineChart settings."""
settings = GoogleChartBase.defaultSettings()
settings['columns'] = ('', "A comma separated list of names defining the columns from the "
"the CSV to extract for plotting in the chart.")
settings['column_names'] = ('', "A comma separated list of names to associate with each "
"column, the number of names must match the number of "
"columns.")
settings['title'] = ('', "The chart title.")
settings['subtitle'] = ('', "The chart sub-title.")
settings['chart_width'] = (900, "The Google chart width.")
settings['chart_height'] = (400, "The Google chart height.")
return settings
def arguments(self, settings):
"""
Define template arguments to pass to template.
"""
settings = super(ColumnChartBase, self).arguments(settings)
# Update the 'columns' and 'column_names'
settings['columns'] = [col.strip() for col in settings['columns'].split(',')]
if settings['column_names']:
settings['column_names'] = [col.strip() for col in settings['column_names'].split(',')]
else:
settings['column_names'] = settings['columns']
if len(settings['column_names']) != len(settings['columns']):
LOG.error("The 'column_names' list must be the same length as 'columns'.")
settings['column_names'] = settings['columns']
return settings
class LineChart(ColumnChartBase):
"""
Creates a Google line chart from CSV data.
"""
TEMPLATE = 'line'
class ScatterChart(ColumnChartBase):
"""
Creates a Google scatter chart from CSV data.
"""
TEMPLATE = 'scatter'
@staticmethod
def defaultSettings():
"""ScatterChart settings."""
settings = ColumnChartBase.defaultSettings()
settings['vaxis_title'] = ('y', "The vertical y-axis title.")
settings['haxis_title'] = ('x', "The horizontal x-axis title.")
settings['vaxis_ticks'] = (None, "The vertical x-axis tick marks (default: auto)")
settings['haxis_ticks'] = (None, "The vertical x-axis tick marks (default: auto)")
return settings
class ScatterDiffChart(ScatterChart):
"""
Creates a Google scatter diff chart from CSV data.
"""
TEMPLATE = 'diffscatter'
@staticmethod
def defaultSettings():
"""DiffScatterChart settings"""
settings = ScatterChart.defaultSettings()
settings['gold'] = ('', "The gold file to use for comparison, by default the file provided "
"in the 'csv' setting is used but with a gold directory prefix.")
return settings
def arguments(self, settings):
"""
Define template arguments for diff scatter chart.
"""
settings = super(ScatterDiffChart, self).arguments(settings)
if not settings['gold']:
base, name = os.path.split(settings['csv'])
settings['gold'] = os.path.join(base, 'gold', name)
settings['gold_data_frame'] = self._readCSV(os.path.join(MooseDocs.ROOT_DIR,
settings['gold']))
if settings['gold_data_frame'].empty:
self.setStatus("The gold file ({}) does not exist or does not contain data.",
settings['gold'])
return settings
|
yipenggao/moose
|
python/MooseDocs/extensions/gchart.py
|
Python
|
lgpl-2.1
| 11,198
|
[
"MOOSE"
] |
59b1e3408a89b34708b80e5675a8077b1f61c97f1b349b5eb7c6065bfa0292d0
|
#!/usr/bin/env python2.6
# Zeckviz IRC bot
# Copyright (C) 2011 Bruno Rahle
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from irc.ircbot import IrcBot
from spoilbot.models import SpoilerBot
from django.core.exceptions import MiddlewareNotUsed
import random
class IrcMiddleware(object):
"""This sucks big time! If you want to start the bot, after you start the
server, you need to visit the website.
"""
def __init__(self):
"""Creates the IrcBot and reports that the bot is not used.
"""
try:
self.bots
except AttributeError:
self.bots = [
SpoilerBot(
host='vilma.hsin.hr',
port=6667,
channels=['#zadaci'],
nick='SpoilerBot',
identity='SpoilerBot',
real_name='I like to spoil things!',
owner='brahle'),
]
raise MiddlewareNotUsed()
def main():
StartMiddleware()
if __name__ == '__main__':
main()
|
brahle/I-Rcbot
|
irc/ircstart.py
|
Python
|
agpl-3.0
| 1,673
|
[
"VisIt"
] |
7f5d53819af50643eb2de114282156f5cb79d8ad900a18107f31096a41235b5c
|
"""
Conformer generation.
"""
import numpy as np
from typing import Any, List, Optional
from deepchem.utils.typing import RDKitMol
class ConformerGenerator(object):
"""
Generate molecule conformers.
Notes
-----
Procedure
1. Generate a pool of conformers.
2. Minimize conformers.
3. Prune conformers using an RMSD threshold.
Note that pruning is done _after_ minimization, which differs from the
protocol described in the references [1]_ [2]_.
References
----------
.. [1] http://rdkit.org/docs/GettingStartedInPython.html#working-with-3d-molecules
.. [2] http://pubs.acs.org/doi/full/10.1021/ci2004658
Notes
-----
This class requires RDKit to be installed.
"""
def __init__(self,
max_conformers: int = 1,
rmsd_threshold: float = 0.5,
force_field: str = 'uff',
pool_multiplier: int = 10):
"""
Parameters
----------
max_conformers: int, optional (default 1)
Maximum number of conformers to generate (after pruning).
rmsd_threshold: float, optional (default 0.5)
RMSD threshold for pruning conformers. If None or negative, no
pruning is performed.
force_field: str, optional (default 'uff')
Force field to use for conformer energy calculation and
minimization. Options are 'uff', 'mmff94', and 'mmff94s'.
pool_multiplier: int, optional (default 10)
Factor to multiply by max_conformers to generate the initial
conformer pool. Since conformers are pruned after energy
minimization, increasing the size of the pool increases the chance
of identifying max_conformers unique conformers.
"""
self.max_conformers = max_conformers
if rmsd_threshold is None or rmsd_threshold < 0:
rmsd_threshold = -1.
self.rmsd_threshold = rmsd_threshold
self.force_field = force_field
self.pool_multiplier = pool_multiplier
def __call__(self, mol: RDKitMol) -> RDKitMol:
"""
Generate conformers for a molecule.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
mol: rdkit.Chem.rdchem.Mol
A new RDKit Mol object containing the chosen conformers, sorted by
increasing energy.
"""
return self.generate_conformers(mol)
def generate_conformers(self, mol: RDKitMol) -> RDKitMol:
"""
Generate conformers for a molecule.
This function returns a copy of the original molecule with embedded
conformers.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
mol: rdkit.Chem.rdchem.Mol
A new RDKit Mol object containing the chosen conformers, sorted by
increasing energy.
"""
# initial embedding
mol = self.embed_molecule(mol)
if not mol.GetNumConformers():
msg = 'No conformers generated for molecule'
if mol.HasProp('_Name'):
name = mol.GetProp('_Name')
msg += ' "{}".'.format(name)
else:
msg += '.'
raise RuntimeError(msg)
# minimization and pruning
self.minimize_conformers(mol)
mol = self.prune_conformers(mol)
return mol
def embed_molecule(self, mol: RDKitMol) -> RDKitMol:
"""
Generate conformers, possibly with pruning.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object with embedded multiple conformers.
"""
try:
from rdkit import Chem
from rdkit.Chem import AllChem
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
mol = Chem.AddHs(mol) # add hydrogens
n_confs = self.max_conformers * self.pool_multiplier
AllChem.EmbedMultipleConfs(mol, numConfs=n_confs, pruneRmsThresh=-1.)
return mol
def get_molecule_force_field(self,
mol: RDKitMol,
conf_id: Optional[int] = None,
**kwargs) -> Any:
"""
Get a force field for a molecule.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object with embedded conformers.
conf_id: int, optional
ID of the conformer to associate with the force field.
kwargs: dict, optional
Keyword arguments for force field constructor.
Returns
-------
ff: rdkit.ForceField.rdForceField.ForceField
RDKit force field instance for a molecule.
"""
try:
from rdkit.Chem import AllChem
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
if self.force_field == 'uff':
ff = AllChem.UFFGetMoleculeForceField(mol, confId=conf_id, **kwargs)
elif self.force_field.startswith('mmff'):
AllChem.MMFFSanitizeMolecule(mol)
mmff_props = AllChem.MMFFGetMoleculeProperties(
mol, mmffVariant=self.force_field)
ff = AllChem.MMFFGetMoleculeForceField(
mol, mmff_props, confId=conf_id, **kwargs)
else:
raise ValueError("Invalid force_field " +
"'{}'.".format(self.force_field))
return ff
def minimize_conformers(self, mol: RDKitMol) -> None:
"""
Minimize molecule conformers.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object with embedded conformers.
"""
for conf in mol.GetConformers():
ff = self.get_molecule_force_field(mol, conf_id=conf.GetId())
ff.Minimize()
def get_conformer_energies(self, mol: RDKitMol) -> np.ndarray:
"""
Calculate conformer energies.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object with embedded conformers.
Returns
-------
energies : np.ndarray
Minimized conformer energies.
"""
energies = []
for conf in mol.GetConformers():
ff = self.get_molecule_force_field(mol, conf_id=conf.GetId())
energy = ff.CalcEnergy()
energies.append(energy)
energies = np.asarray(energies, dtype=float)
return energies
def prune_conformers(self, mol: RDKitMol) -> RDKitMol:
"""
Prune conformers from a molecule using an RMSD threshold, starting
with the lowest energy conformer.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
new_mol: rdkit.Chem.rdchem.Mol
A new rdkit.Chem.rdchem.Mol containing the chosen conformers, sorted by
increasing energy.
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
if self.rmsd_threshold < 0 or mol.GetNumConformers() <= 1:
return mol
energies = self.get_conformer_energies(mol)
rmsd = self.get_conformer_rmsd(mol)
sort = np.argsort(energies) # sort by increasing energy
keep: List[float] = [] # always keep lowest-energy conformer
discard = []
for i in sort:
# always keep lowest-energy conformer
if len(keep) == 0:
keep.append(i)
continue
# discard conformers after max_conformers is reached
if len(keep) >= self.max_conformers:
discard.append(i)
continue
# get RMSD to selected conformers
this_rmsd = rmsd[i][np.asarray(keep, dtype=int)]
# discard conformers within the RMSD threshold
if np.all(this_rmsd >= self.rmsd_threshold):
keep.append(i)
else:
discard.append(i)
# create a new molecule to hold the chosen conformers
# this ensures proper conformer IDs and energy-based ordering
new_mol = Chem.Mol(mol)
new_mol.RemoveAllConformers()
conf_ids = [conf.GetId() for conf in mol.GetConformers()]
for i in keep:
conf = mol.GetConformer(conf_ids[i])
new_mol.AddConformer(conf, assignId=True)
return new_mol
@staticmethod
def get_conformer_rmsd(mol: RDKitMol) -> np.ndarray:
"""
Calculate conformer-conformer RMSD.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
rmsd: np.ndarray
A conformer-conformer RMSD value. The shape is `(NumConformers, NumConformers)`
"""
try:
from rdkit.Chem import AllChem
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
rmsd = np.zeros(
(mol.GetNumConformers(), mol.GetNumConformers()), dtype=float)
for i, ref_conf in enumerate(mol.GetConformers()):
for j, fit_conf in enumerate(mol.GetConformers()):
if i >= j:
continue
rmsd[i, j] = AllChem.GetBestRMS(mol, mol, ref_conf.GetId(),
fit_conf.GetId())
rmsd[j, i] = rmsd[i, j]
return rmsd
|
lilleswing/deepchem
|
deepchem/utils/conformers.py
|
Python
|
mit
| 8,816
|
[
"RDKit"
] |
3e7344e97c86b2fa348c1e0313d65aeb7bd10cd917a66a644050bd2e77b7e3d2
|
######################################################################
##
## Copyright 2011 Christian Iversen <ci@sikkerhed.org>
##
## Permission is hereby granted, free of charge, to any person
## obtaining a copy of this software and associated documentation
## files (the "Software"), to deal in the Software without
## restriction, including without limitation the rights to use,
## copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the
## Software is furnished to do so, subject to the following
## conditions:
##
## The above copyright notice and this permission notice shall be
## included in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
## OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
## HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
## WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
## OTHER DEALINGS IN THE SOFTWARE.
##
######################################################################
import pyjaco.compiler.python
import pyjaco.compiler.javascript
import pyjaco.compiler.multiplexer
import re
import StringIO
import ast
import inspect
def compile_string(script, jsvars = None):
"""Compile a python expression into javascript"""
comp = Compiler(jsvars)
comp.append_string(script)
return str(comp)
class Compiler(object):
"""
pyjaco. A python-to-javascript compiler
Usage:
c = Compiler()
# Compile a str of python source code, with the section header "Shared code"
c.append_string("function Q(id)\n{\n return $(js(id))\n};", "Shared code")
# Append a raw string to the output (no compilation performed)
c.append_raw('{"foo": "bar"}')
# Append an entire class, including all methods, with the section header "Class: My class"
c.append_class(MyClass, "Class: MyClass")
# Print the resulting code
print str(c)
"""
re_comment = re.compile("^[ ]*#")
def __init__(self, jsvars = None, opts = dict()):
defaults = dict(check_params = True)
compiler_opts = dict()
compiler_opts.update(defaults)
compiler_opts.update(opts)
self.compiler = pyjaco.compiler.multiplexer.Compiler(jsvars, compiler_opts)
self.buffer = None
self.reset()
def reset(self):
self.buffer = StringIO.StringIO()
def __str__(self):
return self.buffer.getvalue()
def dedent(self, code, body):
if body:
if code[0].lstrip().startswith('def'):
code.pop(0)
dedent = len(code[0]) - len(code[0].lstrip())
res = []
for c in code:
res.append(c[dedent:])
return "\n".join(res)
def find_js(self, names):
js = []
for x in names:
l = x.lstrip()
if l.startswith("@JSVar"):
names = l[7:-1].split(",")
for n in [n.strip()[1:-1] for n in names]:
js.append(n.split("."))
return js
def split(self, code):
code = [x for x in code.split("\n") if x != "" and not re.match(self.re_comment, x)]
decos, lines = [], []
for i, x in enumerate(code):
if not x.lstrip().startswith("@"):
return self.find_js(code[:i]), code[i:]
@staticmethod
def format_name(name):
if name:
return "/*%s*/\n" % ("| %s |" % name).center(80, "*")
else:
return ""
def comment_section(self, name):
if name:
self.buffer.write(self.format_name(name))
def append_raw(self, code, name = None):
self.comment_section(name)
self.buffer.write(code)
self.buffer.write("\n\n")
def append_string(self, code, name = None, jsvars = None):
self.comment_section(name)
if jsvars:
self.compiler.jsvars = jsvars
self.buffer.write("\n".join(self.compiler.visit(ast.parse(code))))
self.buffer.write("\n\n")
self.compiler.jsvars = []
def append_method(self, code, name = None, body = False):
jsvars, code = self.split(inspect.getsource(code))
self.append_string(self.dedent(code, body), name, jsvars)
def append_class(self, code, name = None):
self.append_string(inspect.getsource(code), name)
def append_module(self, module, classes, name = None):
self.append_raw(self.compile_module(module, classes, name))
def append_data(self, key, value, name = None):
self.append_raw(self.compile_data(key, value))
def compile_string(self, code, name = None, jsvars = None):
if jsvars:
self.compiler.jsvars = jsvars
res = self.format_name(name) + "\n".join(self.compiler.visit(ast.parse(code)))
self.compiler.jsvars = []
return res
def compile_method(self, code, name = None, body = False):
jsvars, code = self.split(inspect.getsource(code))
return self.compile_string(self.dedent(code, body), name, jsvars)
def compile_class(self, code, name = None):
return self.compile_string(inspect.getsource(code), name)
def compile_module(self, module, classes, name = None):
res = [self.format_name(name), "var %s = object();" % module]
for cls in classes:
res.append(self.format_name("Class %s.%s" % (module, cls.__name__)))
res.append("%s.PY$__setattr__('%s', function() {" % (module, cls.__name__))
res.append(self.compile_class(cls))
res.append("return %s}());" % (cls.__name__))
res.append("")
return "\n".join(res)
def compile_data(self, key, value):
return "var %s = %s" % (key, "\n".join(self.compiler.visit(ast.parse(repr(value)))))
def compile_expr(self, value):
return "\n".join(self.compiler.visit(ast.parse(repr(value))))
|
buchuki/pyjaco
|
pyjaco/__init__.py
|
Python
|
mit
| 6,141
|
[
"VisIt"
] |
3f0f9cbff05fa9b119b1f26f9a86e1413ae34784a4844df650bb54eae2ce8306
|
import ast
import datetime
import re
import secrets
import time
from datetime import timedelta
from typing import (
AbstractSet,
Any,
Callable,
Dict,
List,
Optional,
Pattern,
Sequence,
Set,
Tuple,
TypeVar,
Union,
)
import django.contrib.auth
from bitfield import BitField
from bitfield.types import BitHandler
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from django.core.exceptions import ValidationError
from django.core.validators import MinLengthValidator, RegexValidator, URLValidator, validate_email
from django.db import models, transaction
from django.db.models import CASCADE, Manager, Q, Sum
from django.db.models.query import QuerySet
from django.db.models.signals import post_delete, post_save
from django.utils.functional import Promise
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy
from confirmation import settings as confirmation_settings
from zerver.lib import cache
from zerver.lib.cache import (
active_non_guest_user_ids_cache_key,
active_user_ids_cache_key,
bot_dict_fields,
bot_dicts_in_realm_cache_key,
bot_profile_cache_key,
bulk_cached_fetch,
cache_delete,
cache_set,
cache_with_key,
flush_message,
flush_muting_users_cache,
flush_realm,
flush_stream,
flush_submessage,
flush_used_upload_space_cache,
flush_user_profile,
get_realm_used_upload_space_cache_key,
get_stream_cache_key,
realm_alert_words_automaton_cache_key,
realm_alert_words_cache_key,
realm_user_dict_fields,
realm_user_dicts_cache_key,
user_profile_by_api_key_cache_key,
user_profile_by_id_cache_key,
user_profile_cache_key,
)
from zerver.lib.exceptions import JsonableError
from zerver.lib.pysa import mark_sanitized
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.types import (
DisplayRecipientT,
ExtendedFieldElement,
ExtendedValidator,
FieldElement,
LinkifierDict,
ProfileData,
ProfileDataElementBase,
RealmUserValidator,
UserFieldElement,
Validator,
)
from zerver.lib.utils import make_safe_digest
from zerver.lib.validator import (
check_date,
check_int,
check_list,
check_long_string,
check_short_string,
check_url,
validate_select_field,
)
MAX_TOPIC_NAME_LENGTH = 60
MAX_LANGUAGE_ID_LENGTH: int = 50
STREAM_NAMES = TypeVar("STREAM_NAMES", Sequence[str], AbstractSet[str])
def query_for_ids(query: QuerySet, user_ids: List[int], field: str) -> QuerySet:
"""
This function optimizes searches of the form
`user_profile_id in (1, 2, 3, 4)` by quickly
building the where clauses. Profiling shows significant
speedups over the normal Django-based approach.
Use this very carefully! Also, the caller should
guard against empty lists of user_ids.
"""
assert user_ids
clause = f"{field} IN %s"
query = query.extra(
where=[clause],
params=(tuple(user_ids),),
)
return query
# Doing 1000 remote cache requests to get_display_recipient is quite slow,
# so add a local cache as well as the remote cache cache.
#
# This local cache has a lifetime of just a single request; it is
# cleared inside `flush_per_request_caches` in our middleware. It
# could be replaced with smarter bulk-fetching logic that deduplicates
# queries for the same recipient; this is just a convenient way to
# write that code.
per_request_display_recipient_cache: Dict[int, DisplayRecipientT] = {}
def get_display_recipient_by_id(
recipient_id: int, recipient_type: int, recipient_type_id: Optional[int]
) -> DisplayRecipientT:
"""
returns: an object describing the recipient (using a cache).
If the type is a stream, the type_id must be an int; a string is returned.
Otherwise, type_id may be None; an array of recipient dicts is returned.
"""
# Have to import here, to avoid circular dependency.
from zerver.lib.display_recipient import get_display_recipient_remote_cache
if recipient_id not in per_request_display_recipient_cache:
result = get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id)
per_request_display_recipient_cache[recipient_id] = result
return per_request_display_recipient_cache[recipient_id]
def get_display_recipient(recipient: "Recipient") -> DisplayRecipientT:
return get_display_recipient_by_id(
recipient.id,
recipient.type,
recipient.type_id,
)
def get_realm_emoji_cache_key(realm: "Realm") -> str:
return f"realm_emoji:{realm.id}"
def get_active_realm_emoji_cache_key(realm: "Realm") -> str:
return f"active_realm_emoji:{realm.id}"
# This simple call-once caching saves ~500us in auth_enabled_helper,
# which is a significant optimization for common_context. Note that
# these values cannot change in a running production system, but do
# regularly change within unit tests; we address the latter by calling
# clear_supported_auth_backends_cache in our standard tearDown code.
supported_backends: Optional[Set[type]] = None
def supported_auth_backends() -> Set[type]:
global supported_backends
# Caching temporarily disabled for debugging
supported_backends = django.contrib.auth.get_backends()
assert supported_backends is not None
return supported_backends
def clear_supported_auth_backends_cache() -> None:
global supported_backends
supported_backends = None
class Realm(models.Model):
MAX_REALM_NAME_LENGTH = 40
MAX_REALM_DESCRIPTION_LENGTH = 1000
MAX_REALM_SUBDOMAIN_LENGTH = 40
MAX_REALM_REDIRECT_URL_LENGTH = 128
INVITES_STANDARD_REALM_DAILY_MAX = 3000
MESSAGE_VISIBILITY_LIMITED = 10000
AUTHENTICATION_FLAGS = [
"Google",
"Email",
"GitHub",
"LDAP",
"Dev",
"RemoteUser",
"AzureAD",
"SAML",
"GitLab",
"Apple",
"OpenID Connect",
]
SUBDOMAIN_FOR_ROOT_DOMAIN = ""
WILDCARD_MENTION_THRESHOLD = 15
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
# User-visible display name and description used on e.g. the organization homepage
name: Optional[str] = models.CharField(max_length=MAX_REALM_NAME_LENGTH, null=True)
description: str = models.TextField(default="")
# A short, identifier-like name for the organization. Used in subdomains;
# e.g. on a server at example.com, an org with string_id `foo` is reached
# at `foo.example.com`.
string_id: str = models.CharField(max_length=MAX_REALM_SUBDOMAIN_LENGTH, unique=True)
date_created: datetime.datetime = models.DateTimeField(default=timezone_now)
deactivated: bool = models.BooleanField(default=False)
# Redirect URL if the Realm has moved to another server
deactivated_redirect = models.URLField(max_length=MAX_REALM_REDIRECT_URL_LENGTH, null=True)
# See RealmDomain for the domains that apply for a given organization.
emails_restricted_to_domains: bool = models.BooleanField(default=False)
invite_required: bool = models.BooleanField(default=True)
_max_invites: Optional[int] = models.IntegerField(null=True, db_column="max_invites")
disallow_disposable_email_addresses: bool = models.BooleanField(default=True)
authentication_methods: BitHandler = BitField(
flags=AUTHENTICATION_FLAGS,
default=2 ** 31 - 1,
)
# Whether the organization has enabled inline image and URL previews.
inline_image_preview: bool = models.BooleanField(default=True)
inline_url_embed_preview: bool = models.BooleanField(default=False)
# Whether digest emails are enabled for the organization.
digest_emails_enabled: bool = models.BooleanField(default=False)
# Day of the week on which the digest is sent (default: Tuesday).
digest_weekday: int = models.SmallIntegerField(default=1)
send_welcome_emails: bool = models.BooleanField(default=True)
message_content_allowed_in_email_notifications: bool = models.BooleanField(default=True)
mandatory_topics: bool = models.BooleanField(default=False)
add_emoji_by_admins_only: bool = models.BooleanField(default=False)
name_changes_disabled: bool = models.BooleanField(default=False)
email_changes_disabled: bool = models.BooleanField(default=False)
avatar_changes_disabled: bool = models.BooleanField(default=False)
POLICY_MEMBERS_ONLY = 1
POLICY_ADMINS_ONLY = 2
POLICY_FULL_MEMBERS_ONLY = 3
POLICY_MODERATORS_ONLY = 4
POLICY_EVERYONE = 5
COMMON_POLICY_TYPES = [
POLICY_MEMBERS_ONLY,
POLICY_ADMINS_ONLY,
POLICY_FULL_MEMBERS_ONLY,
POLICY_MODERATORS_ONLY,
]
COMMON_MESSAGE_POLICY_TYPES = [
POLICY_MEMBERS_ONLY,
POLICY_ADMINS_ONLY,
POLICY_FULL_MEMBERS_ONLY,
POLICY_MODERATORS_ONLY,
POLICY_EVERYONE,
]
DEFAULT_COMMUNITY_TOPIC_EDITING_LIMIT_SECONDS = 259200
# Who in the organization is allowed to create streams.
create_stream_policy: int = models.PositiveSmallIntegerField(default=POLICY_MEMBERS_ONLY)
# Who in the organization is allowed to edit topics of any message.
edit_topic_policy: int = models.PositiveSmallIntegerField(default=POLICY_EVERYONE)
# Who in the organization is allowed to invite other users to organization.
invite_to_realm_policy: int = models.PositiveSmallIntegerField(default=POLICY_MEMBERS_ONLY)
# Who in the organization is allowed to invite other users to streams.
invite_to_stream_policy: int = models.PositiveSmallIntegerField(default=POLICY_MEMBERS_ONLY)
# Who in the organization is allowed to move messages between streams.
move_messages_between_streams_policy: int = models.PositiveSmallIntegerField(
default=POLICY_ADMINS_ONLY
)
user_group_edit_policy: int = models.PositiveSmallIntegerField(default=POLICY_MEMBERS_ONLY)
PRIVATE_MESSAGE_POLICY_UNLIMITED = 1
PRIVATE_MESSAGE_POLICY_DISABLED = 2
private_message_policy: int = models.PositiveSmallIntegerField(
default=PRIVATE_MESSAGE_POLICY_UNLIMITED
)
PRIVATE_MESSAGE_POLICY_TYPES = [
PRIVATE_MESSAGE_POLICY_UNLIMITED,
PRIVATE_MESSAGE_POLICY_DISABLED,
]
# Global policy for who is allowed to use wildcard mentions in
# streams with a large number of subscribers. Anyone can use
# wildcard mentions in small streams regardless of this setting.
WILDCARD_MENTION_POLICY_EVERYONE = 1
WILDCARD_MENTION_POLICY_MEMBERS = 2
WILDCARD_MENTION_POLICY_FULL_MEMBERS = 3
WILDCARD_MENTION_POLICY_STREAM_ADMINS = 4
WILDCARD_MENTION_POLICY_ADMINS = 5
WILDCARD_MENTION_POLICY_NOBODY = 6
WILDCARD_MENTION_POLICY_MODERATORS = 7
wildcard_mention_policy: int = models.PositiveSmallIntegerField(
default=WILDCARD_MENTION_POLICY_STREAM_ADMINS,
)
WILDCARD_MENTION_POLICY_TYPES = [
WILDCARD_MENTION_POLICY_EVERYONE,
WILDCARD_MENTION_POLICY_MEMBERS,
WILDCARD_MENTION_POLICY_FULL_MEMBERS,
WILDCARD_MENTION_POLICY_STREAM_ADMINS,
WILDCARD_MENTION_POLICY_ADMINS,
WILDCARD_MENTION_POLICY_NOBODY,
WILDCARD_MENTION_POLICY_MODERATORS,
]
# Who in the organization has access to users' actual email
# addresses. Controls whether the UserProfile.email field is the
# same as UserProfile.delivery_email, or is instead garbage.
EMAIL_ADDRESS_VISIBILITY_EVERYONE = 1
EMAIL_ADDRESS_VISIBILITY_MEMBERS = 2
EMAIL_ADDRESS_VISIBILITY_ADMINS = 3
EMAIL_ADDRESS_VISIBILITY_NOBODY = 4
EMAIL_ADDRESS_VISIBILITY_MODERATORS = 5
email_address_visibility: int = models.PositiveSmallIntegerField(
default=EMAIL_ADDRESS_VISIBILITY_EVERYONE,
)
EMAIL_ADDRESS_VISIBILITY_TYPES = [
EMAIL_ADDRESS_VISIBILITY_EVERYONE,
# The MEMBERS level is not yet implemented on the backend.
## EMAIL_ADDRESS_VISIBILITY_MEMBERS,
EMAIL_ADDRESS_VISIBILITY_ADMINS,
EMAIL_ADDRESS_VISIBILITY_NOBODY,
EMAIL_ADDRESS_VISIBILITY_MODERATORS,
]
# Threshold in days for new users to create streams, and potentially take
# some other actions.
waiting_period_threshold: int = models.PositiveIntegerField(default=0)
allow_message_deleting: bool = models.BooleanField(default=False)
DEFAULT_MESSAGE_CONTENT_DELETE_LIMIT_SECONDS = (
600 # if changed, also change in admin.js, setting_org.js
)
message_content_delete_limit_seconds: int = models.IntegerField(
default=DEFAULT_MESSAGE_CONTENT_DELETE_LIMIT_SECONDS,
)
allow_message_editing: bool = models.BooleanField(default=True)
DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS = (
600 # if changed, also change in admin.js, setting_org.js
)
message_content_edit_limit_seconds: int = models.IntegerField(
default=DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS,
)
# Whether users have access to message edit history
allow_edit_history: bool = models.BooleanField(default=True)
# Defaults for new users
default_twenty_four_hour_time: bool = models.BooleanField(default=False)
default_language: str = models.CharField(default="en", max_length=MAX_LANGUAGE_ID_LENGTH)
DEFAULT_NOTIFICATION_STREAM_NAME = "general"
INITIAL_PRIVATE_STREAM_NAME = "core team"
STREAM_EVENTS_NOTIFICATION_TOPIC = gettext_lazy("stream events")
notifications_stream: Optional["Stream"] = models.ForeignKey(
"Stream",
related_name="+",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
signup_notifications_stream: Optional["Stream"] = models.ForeignKey(
"Stream",
related_name="+",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
MESSAGE_RETENTION_SPECIAL_VALUES_MAP = {
"forever": -1,
}
# For old messages being automatically deleted
message_retention_days: int = models.IntegerField(null=False, default=-1)
# When non-null, all but the latest this many messages in the organization
# are inaccessible to users (but not deleted).
message_visibility_limit: Optional[int] = models.IntegerField(null=True)
# Messages older than this message ID in the organization are inaccessible.
first_visible_message_id: int = models.IntegerField(default=0)
# Valid org_types are {CORPORATE, COMMUNITY}
CORPORATE = 1
COMMUNITY = 2
org_type: int = models.PositiveSmallIntegerField(default=CORPORATE)
UPGRADE_TEXT_STANDARD = gettext_lazy("Available on Zulip Standard. Upgrade to access.")
# plan_type controls various features around resource/feature
# limitations for a Zulip organization on multi-tenant installations
# like Zulip Cloud.
SELF_HOSTED = 1
LIMITED = 2
STANDARD = 3
STANDARD_FREE = 4
plan_type: int = models.PositiveSmallIntegerField(default=SELF_HOSTED)
# This value is also being used in static/js/settings_bots.bot_creation_policy_values.
# On updating it here, update it there as well.
BOT_CREATION_EVERYONE = 1
BOT_CREATION_LIMIT_GENERIC_BOTS = 2
BOT_CREATION_ADMINS_ONLY = 3
bot_creation_policy: int = models.PositiveSmallIntegerField(default=BOT_CREATION_EVERYONE)
BOT_CREATION_POLICY_TYPES = [
BOT_CREATION_EVERYONE,
BOT_CREATION_LIMIT_GENERIC_BOTS,
BOT_CREATION_ADMINS_ONLY,
]
# See upload_quota_bytes; don't interpret upload_quota_gb directly.
UPLOAD_QUOTA_LIMITED = 5
UPLOAD_QUOTA_STANDARD = 50
upload_quota_gb: Optional[int] = models.IntegerField(null=True)
VIDEO_CHAT_PROVIDERS = {
"disabled": {
"name": "None",
"id": 0,
},
"jitsi_meet": {
"name": "Jitsi Meet",
"id": 1,
},
# ID 2 was used for the now-deleted Google Hangouts.
# ID 3 reserved for optional Zoom, see below.
# ID 4 reserved for optional BigBlueButton, see below.
}
if settings.VIDEO_ZOOM_CLIENT_ID is not None and settings.VIDEO_ZOOM_CLIENT_SECRET is not None:
VIDEO_CHAT_PROVIDERS["zoom"] = {
"name": "Zoom",
"id": 3,
}
if settings.BIG_BLUE_BUTTON_SECRET is not None and settings.BIG_BLUE_BUTTON_URL is not None:
VIDEO_CHAT_PROVIDERS["big_blue_button"] = {"name": "BigBlueButton", "id": 4}
video_chat_provider: int = models.PositiveSmallIntegerField(
default=VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"]
)
GIPHY_RATING_OPTIONS = {
"disabled": {
"name": "GIPHY integration disabled",
"id": 0,
},
# Source: https://github.com/Giphy/giphy-js/blob/master/packages/fetch-api/README.md#shared-options
"y": {
"name": "Allow GIFs rated Y (Very young audience)",
"id": 1,
},
"g": {
"name": "Allow GIFs rated G (General audience)",
"id": 2,
},
"pg": {
"name": "Allow GIFs rated PG (Parental guidance)",
"id": 3,
},
"pg-13": {
"name": "Allow GIFs rated PG13 (Parental guidance - under 13)",
"id": 4,
},
"r": {
"name": "Allow GIFs rated R (Restricted)",
"id": 5,
},
}
# maximum rating of the GIFs that will be retrieved from GIPHY
giphy_rating: int = models.PositiveSmallIntegerField(default=GIPHY_RATING_OPTIONS["g"]["id"])
default_code_block_language: Optional[str] = models.TextField(null=True, default=None)
# Define the types of the various automatically managed properties
property_types: Dict[str, Union[type, Tuple[type, ...]]] = dict(
add_emoji_by_admins_only=bool,
allow_edit_history=bool,
allow_message_deleting=bool,
bot_creation_policy=int,
create_stream_policy=int,
invite_to_stream_policy=int,
move_messages_between_streams_policy=int,
default_language=str,
default_twenty_four_hour_time=bool,
description=str,
digest_emails_enabled=bool,
disallow_disposable_email_addresses=bool,
email_address_visibility=int,
email_changes_disabled=bool,
giphy_rating=int,
invite_required=bool,
invite_to_realm_policy=int,
inline_image_preview=bool,
inline_url_embed_preview=bool,
mandatory_topics=bool,
message_retention_days=(int, type(None)),
name=str,
name_changes_disabled=bool,
avatar_changes_disabled=bool,
emails_restricted_to_domains=bool,
send_welcome_emails=bool,
message_content_allowed_in_email_notifications=bool,
video_chat_provider=int,
waiting_period_threshold=int,
digest_weekday=int,
private_message_policy=int,
user_group_edit_policy=int,
default_code_block_language=(str, type(None)),
message_content_delete_limit_seconds=int,
wildcard_mention_policy=int,
)
DIGEST_WEEKDAY_VALUES = [0, 1, 2, 3, 4, 5, 6]
# Icon is the square mobile icon.
ICON_FROM_GRAVATAR = "G"
ICON_UPLOADED = "U"
ICON_SOURCES = (
(ICON_FROM_GRAVATAR, "Hosted by Gravatar"),
(ICON_UPLOADED, "Uploaded by administrator"),
)
icon_source: str = models.CharField(
default=ICON_FROM_GRAVATAR,
choices=ICON_SOURCES,
max_length=1,
)
icon_version: int = models.PositiveSmallIntegerField(default=1)
# Logo is the horizontal logo we show in top-left of web app navbar UI.
LOGO_DEFAULT = "D"
LOGO_UPLOADED = "U"
LOGO_SOURCES = (
(LOGO_DEFAULT, "Default to Zulip"),
(LOGO_UPLOADED, "Uploaded by administrator"),
)
logo_source: str = models.CharField(
default=LOGO_DEFAULT,
choices=LOGO_SOURCES,
max_length=1,
)
logo_version: int = models.PositiveSmallIntegerField(default=1)
night_logo_source: str = models.CharField(
default=LOGO_DEFAULT,
choices=LOGO_SOURCES,
max_length=1,
)
night_logo_version: int = models.PositiveSmallIntegerField(default=1)
def authentication_methods_dict(self) -> Dict[str, bool]:
"""Returns the a mapping from authentication flags to their status,
showing only those authentication flags that are supported on
the current server (i.e. if EmailAuthBackend is not configured
on the server, this will not return an entry for "Email")."""
# This mapping needs to be imported from here due to the cyclic
# dependency.
from zproject.backends import AUTH_BACKEND_NAME_MAP
ret: Dict[str, bool] = {}
supported_backends = [backend.__class__ for backend in supported_auth_backends()]
# `authentication_methods` is a bitfield.types.BitHandler, not
# a true dict; since it is still python2- and python3-compat,
# `iteritems` is its method to iterate over its contents.
for k, v in self.authentication_methods.iteritems():
backend = AUTH_BACKEND_NAME_MAP[k]
if backend in supported_backends:
ret[k] = v
return ret
def __str__(self) -> str:
return f"<Realm: {self.string_id} {self.id}>"
@cache_with_key(get_realm_emoji_cache_key, timeout=3600 * 24 * 7)
def get_emoji(self) -> Dict[str, Dict[str, Any]]:
return get_realm_emoji_uncached(self)
@cache_with_key(get_active_realm_emoji_cache_key, timeout=3600 * 24 * 7)
def get_active_emoji(self) -> Dict[str, Dict[str, Any]]:
return get_active_realm_emoji_uncached(self)
def get_admin_users_and_bots(
self, include_realm_owners: bool = True
) -> Sequence["UserProfile"]:
"""Use this in contexts where we want administrative users as well as
bots with administrator privileges, like send_event calls for
notifications to all administrator users.
"""
if include_realm_owners:
roles = [UserProfile.ROLE_REALM_ADMINISTRATOR, UserProfile.ROLE_REALM_OWNER]
else:
roles = [UserProfile.ROLE_REALM_ADMINISTRATOR]
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(
realm=self,
is_active=True,
role__in=roles,
)
def get_human_admin_users(self, include_realm_owners: bool = True) -> QuerySet:
"""Use this in contexts where we want only human users with
administrative privileges, like sending an email to all of a
realm's administrators (bots don't have real email addresses).
"""
if include_realm_owners:
roles = [UserProfile.ROLE_REALM_ADMINISTRATOR, UserProfile.ROLE_REALM_OWNER]
else:
roles = [UserProfile.ROLE_REALM_ADMINISTRATOR]
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(
realm=self,
is_bot=False,
is_active=True,
role__in=roles,
)
def get_human_billing_admin_and_realm_owner_users(self) -> QuerySet:
return UserProfile.objects.filter(
Q(role=UserProfile.ROLE_REALM_OWNER) | Q(is_billing_admin=True),
realm=self,
is_bot=False,
is_active=True,
)
def get_active_users(self) -> Sequence["UserProfile"]:
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_active=True).select_related()
def get_first_human_user(self) -> Optional["UserProfile"]:
"""A useful value for communications with newly created realms.
Has a few fundamental limitations:
* Its value will be effectively random for realms imported from Slack or
other third-party tools.
* The user may be deactivated, etc., so it's not something that's useful
for features, permissions, etc.
"""
return UserProfile.objects.filter(realm=self, is_bot=False).order_by("id").first()
def get_human_owner_users(self) -> QuerySet:
return UserProfile.objects.filter(
realm=self, is_bot=False, role=UserProfile.ROLE_REALM_OWNER, is_active=True
)
def get_bot_domain(self) -> str:
return get_fake_email_domain(self)
def get_notifications_stream(self) -> Optional["Stream"]:
if self.notifications_stream is not None and not self.notifications_stream.deactivated:
return self.notifications_stream
return None
def get_signup_notifications_stream(self) -> Optional["Stream"]:
if (
self.signup_notifications_stream is not None
and not self.signup_notifications_stream.deactivated
):
return self.signup_notifications_stream
return None
@property
def max_invites(self) -> int:
if self._max_invites is None:
return settings.INVITES_DEFAULT_REALM_DAILY_MAX
return self._max_invites
@max_invites.setter
def max_invites(self, value: Optional[int]) -> None:
self._max_invites = value
def upload_quota_bytes(self) -> Optional[int]:
if self.upload_quota_gb is None:
return None
# We describe the quota to users in "GB" or "gigabytes", but actually apply
# it as gibibytes (GiB) to be a bit more generous in case of confusion.
return self.upload_quota_gb << 30
@cache_with_key(get_realm_used_upload_space_cache_key, timeout=3600 * 24 * 7)
def currently_used_upload_space_bytes(self) -> int:
used_space = Attachment.objects.filter(realm=self).aggregate(Sum("size"))["size__sum"]
if used_space is None:
return 0
return used_space
def ensure_not_on_limited_plan(self) -> None:
if self.plan_type == Realm.LIMITED:
raise JsonableError(self.UPGRADE_TEXT_STANDARD)
@property
def subdomain(self) -> str:
return self.string_id
@property
def display_subdomain(self) -> str:
"""Likely to be temporary function to avoid signup messages being sent
to an empty topic"""
if self.string_id == "":
return "."
return self.string_id
@property
def uri(self) -> str:
return settings.EXTERNAL_URI_SCHEME + self.host
@property
def host(self) -> str:
# Use mark sanitized to prevent false positives from Pysa thinking that
# the host is user controlled.
return mark_sanitized(self.host_for_subdomain(self.subdomain))
@staticmethod
def host_for_subdomain(subdomain: str) -> str:
if subdomain == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
return settings.EXTERNAL_HOST
default_host = f"{subdomain}.{settings.EXTERNAL_HOST}"
return settings.REALM_HOSTS.get(subdomain, default_host)
@property
def is_zephyr_mirror_realm(self) -> bool:
return self.string_id == "zephyr"
@property
def webathena_enabled(self) -> bool:
return self.is_zephyr_mirror_realm
@property
def presence_disabled(self) -> bool:
return self.is_zephyr_mirror_realm
def realm_post_delete_handler(sender: Any, **kwargs: Any) -> None:
# This would be better as a functools.partial, but for some reason
# Django doesn't call it even when it's registered as a post_delete handler.
flush_realm(sender, from_deletion=True, **kwargs)
post_save.connect(flush_realm, sender=Realm)
post_delete.connect(realm_post_delete_handler, sender=Realm)
def get_realm(string_id: str) -> Realm:
return Realm.objects.get(string_id=string_id)
def get_realm_by_id(realm_id: int) -> Realm:
return Realm.objects.get(id=realm_id)
def name_changes_disabled(realm: Optional[Realm]) -> bool:
if realm is None:
return settings.NAME_CHANGES_DISABLED
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
def avatar_changes_disabled(realm: Realm) -> bool:
return settings.AVATAR_CHANGES_DISABLED or realm.avatar_changes_disabled
class RealmDomain(models.Model):
"""For an organization with emails_restricted_to_domains enabled, the list of
allowed domains"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
# should always be stored lowercase
domain: str = models.CharField(max_length=80, db_index=True)
allow_subdomains: bool = models.BooleanField(default=False)
class Meta:
unique_together = ("realm", "domain")
# These functions should only be used on email addresses that have
# been validated via django.core.validators.validate_email
#
# Note that we need to use some care, since can you have multiple @-signs; e.g.
# "tabbott@test"@zulip.com
# is valid email address
def email_to_username(email: str) -> str:
return "@".join(email.split("@")[:-1]).lower()
# Returns the raw domain portion of the desired email address
def email_to_domain(email: str) -> str:
return email.split("@")[-1].lower()
class DomainNotAllowedForRealmError(Exception):
pass
class DisposableEmailError(Exception):
pass
class EmailContainsPlusError(Exception):
pass
def get_realm_domains(realm: Realm) -> List[Dict[str, str]]:
return list(realm.realmdomain_set.values("domain", "allow_subdomains"))
class RealmEmoji(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
author: Optional["UserProfile"] = models.ForeignKey(
"UserProfile",
blank=True,
null=True,
on_delete=CASCADE,
)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
name: str = models.TextField(
validators=[
MinLengthValidator(1),
# The second part of the regex (negative lookbehind) disallows names
# ending with one of the punctuation characters.
RegexValidator(
regex=r"^[0-9a-z.\-_]+(?<![.\-_])$",
message=gettext_lazy("Invalid characters in emoji name"),
),
]
)
# The basename of the custom emoji's filename; see PATH_ID_TEMPLATE for the full path.
file_name: Optional[str] = models.TextField(db_index=True, null=True, blank=True)
deactivated: bool = models.BooleanField(default=False)
PATH_ID_TEMPLATE = "{realm_id}/emoji/images/{emoji_file_name}"
def __str__(self) -> str:
return f"<RealmEmoji({self.realm.string_id}): {self.id} {self.name} {self.deactivated} {self.file_name}>"
def get_realm_emoji_dicts(
realm: Realm, only_active_emojis: bool = False
) -> Dict[str, Dict[str, Any]]:
query = RealmEmoji.objects.filter(realm=realm).select_related("author")
if only_active_emojis:
query = query.filter(deactivated=False)
d = {}
from zerver.lib.emoji import get_emoji_url
for realm_emoji in query.all():
author_id = None
if realm_emoji.author:
author_id = realm_emoji.author_id
emoji_url = get_emoji_url(realm_emoji.file_name, realm_emoji.realm_id)
d[str(realm_emoji.id)] = dict(
id=str(realm_emoji.id),
name=realm_emoji.name,
source_url=emoji_url,
deactivated=realm_emoji.deactivated,
author_id=author_id,
)
return d
def get_realm_emoji_uncached(realm: Realm) -> Dict[str, Dict[str, Any]]:
return get_realm_emoji_dicts(realm)
def get_active_realm_emoji_uncached(realm: Realm) -> Dict[str, Dict[str, Any]]:
realm_emojis = get_realm_emoji_dicts(realm, only_active_emojis=True)
d = {}
for emoji_id, emoji_dict in realm_emojis.items():
d[emoji_dict["name"]] = emoji_dict
return d
def flush_realm_emoji(sender: Any, **kwargs: Any) -> None:
realm = kwargs["instance"].realm
cache_set(
get_realm_emoji_cache_key(realm), get_realm_emoji_uncached(realm), timeout=3600 * 24 * 7
)
cache_set(
get_active_realm_emoji_cache_key(realm),
get_active_realm_emoji_uncached(realm),
timeout=3600 * 24 * 7,
)
post_save.connect(flush_realm_emoji, sender=RealmEmoji)
post_delete.connect(flush_realm_emoji, sender=RealmEmoji)
def filter_pattern_validator(value: str) -> Pattern[str]:
regex = re.compile(r"^(?:(?:[\w\-#_= /:]*|[+]|[!])(\(\?P<\w+>.+\)))+$")
error_msg = _("Invalid linkifier pattern. Valid characters are {}.").format(
"[ a-zA-Z_#=/:+!-]",
)
if not regex.match(str(value)):
raise ValidationError(error_msg)
try:
pattern = re.compile(value)
except re.error:
# Regex is invalid
raise ValidationError(error_msg)
return pattern
def filter_format_validator(value: str) -> None:
regex = re.compile(r"^([\.\/:a-zA-Z0-9#_?=&;~-]+%\(([a-zA-Z0-9_-]+)\)s)+[/a-zA-Z0-9#_?=&;~-]*$")
if not regex.match(value):
raise ValidationError(_("Invalid URL format string."))
class RealmFilter(models.Model):
"""Realm-specific regular expressions to automatically linkify certain
strings inside the Markdown processor. See "Custom filters" in the settings UI.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
pattern: str = models.TextField()
url_format_string: str = models.TextField(validators=[URLValidator(), filter_format_validator])
class Meta:
unique_together = ("realm", "pattern")
def clean(self) -> None:
"""Validate whether the set of parameters in the URL Format string
match the set of parameters in the regular expression.
Django's `full_clean` calls `clean_fields` followed by `clean` method
and stores all ValidationErrors from all stages to return as JSON.
"""
# Extract variables present in the pattern
pattern = filter_pattern_validator(self.pattern)
group_set = set(pattern.groupindex.keys())
# Extract variables used in the URL format string. Note that
# this regex will incorrectly reject patterns that attempt to
# escape % using %%.
found_group_set: Set[str] = set()
group_match_regex = r"(?<!%)%\((?P<group_name>[^()]+)\)s"
for m in re.finditer(group_match_regex, self.url_format_string):
group_name = m.group("group_name")
found_group_set.add(group_name)
# Report patterns missing in linkifier pattern.
missing_in_pattern_set = found_group_set - group_set
if len(missing_in_pattern_set) > 0:
name = list(sorted(missing_in_pattern_set))[0]
raise ValidationError(
_("Group %(name)r in URL format string is not present in linkifier pattern."),
params={"name": name},
)
missing_in_url_set = group_set - found_group_set
# Report patterns missing in URL format string.
if len(missing_in_url_set) > 0:
# We just report the first missing pattern here. Users can
# incrementally resolve errors if there are multiple
# missing patterns.
name = list(sorted(missing_in_url_set))[0]
raise ValidationError(
_("Group %(name)r in linkifier pattern is not present in URL format string."),
params={"name": name},
)
def __str__(self) -> str:
return f"<RealmFilter({self.realm.string_id}): {self.pattern} {self.url_format_string}>"
def get_linkifiers_cache_key(realm_id: int) -> str:
return f"{cache.KEY_PREFIX}:all_linkifiers_for_realm:{realm_id}"
# We have a per-process cache to avoid doing 1000 remote cache queries during page load
per_request_linkifiers_cache: Dict[int, List[LinkifierDict]] = {}
def realm_in_local_linkifiers_cache(realm_id: int) -> bool:
return realm_id in per_request_linkifiers_cache
def linkifiers_for_realm(realm_id: int) -> List[LinkifierDict]:
if not realm_in_local_linkifiers_cache(realm_id):
per_request_linkifiers_cache[realm_id] = linkifiers_for_realm_remote_cache(realm_id)
return per_request_linkifiers_cache[realm_id]
def realm_filters_for_realm(realm_id: int) -> List[Tuple[str, str, int]]:
"""
Processes data from `linkifiers_for_realm` to return to older clients,
which use the `realm_filters` events.
"""
linkifiers = linkifiers_for_realm(realm_id)
realm_filters: List[Tuple[str, str, int]] = []
for linkifier in linkifiers:
realm_filters.append((linkifier["pattern"], linkifier["url_format"], linkifier["id"]))
return realm_filters
@cache_with_key(get_linkifiers_cache_key, timeout=3600 * 24 * 7)
def linkifiers_for_realm_remote_cache(realm_id: int) -> List[LinkifierDict]:
linkifiers = []
for linkifier in RealmFilter.objects.filter(realm_id=realm_id):
linkifiers.append(
LinkifierDict(
pattern=linkifier.pattern,
url_format=linkifier.url_format_string,
id=linkifier.id,
)
)
return linkifiers
def flush_linkifiers(sender: Any, **kwargs: Any) -> None:
realm_id = kwargs["instance"].realm_id
cache_delete(get_linkifiers_cache_key(realm_id))
try:
per_request_linkifiers_cache.pop(realm_id)
except KeyError:
pass
post_save.connect(flush_linkifiers, sender=RealmFilter)
post_delete.connect(flush_linkifiers, sender=RealmFilter)
def flush_per_request_caches() -> None:
global per_request_display_recipient_cache
per_request_display_recipient_cache = {}
global per_request_linkifiers_cache
per_request_linkifiers_cache = {}
class RealmPlayground(models.Model):
"""Server side storage model to store playground information needed by our
'view code in playground' feature in code blocks.
"""
MAX_PYGMENTS_LANGUAGE_LENGTH = 40
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
url_prefix: str = models.TextField(validators=[URLValidator()])
# User-visible display name used when configuring playgrounds in the settings page and
# when displaying them in the playground links popover.
name: str = models.TextField(db_index=True)
# This stores the pygments lexer subclass names and not the aliases themselves.
pygments_language: str = models.CharField(
db_index=True,
max_length=MAX_PYGMENTS_LANGUAGE_LENGTH,
# We validate to see if this conforms to the character set allowed for a
# language in the code block.
validators=[
RegexValidator(
regex=r"^[ a-zA-Z0-9_+-./#]*$", message=_("Invalid characters in pygments language")
)
],
)
class Meta:
unique_together = (("realm", "pygments_language", "name"),)
def __str__(self) -> str:
return f"<RealmPlayground({self.realm.string_id}): {self.pygments_language} {self.name}>"
def get_realm_playgrounds(realm: Realm) -> List[Dict[str, Union[int, str]]]:
playgrounds: List[Dict[str, Union[int, str]]] = []
for playground in RealmPlayground.objects.filter(realm=realm).all():
playgrounds.append(
dict(
id=playground.id,
name=playground.name,
pygments_language=playground.pygments_language,
url_prefix=playground.url_prefix,
)
)
return playgrounds
# The Recipient table is used to map Messages to the set of users who
# received the message. It is implemented as a set of triples (id,
# type_id, type). We have 3 types of recipients: Huddles (for group
# private messages), UserProfiles (for 1:1 private messages), and
# Streams. The recipient table maps a globally unique recipient id
# (used by the Message table) to the type-specific unique id (the
# stream id, user_profile id, or huddle id).
class Recipient(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
type_id: int = models.IntegerField(db_index=True)
type: int = models.PositiveSmallIntegerField(db_index=True)
# Valid types are {personal, stream, huddle}
PERSONAL = 1
STREAM = 2
HUDDLE = 3
class Meta:
unique_together = ("type", "type_id")
# N.B. If we used Django's choice=... we would get this for free (kinda)
_type_names = {PERSONAL: "personal", STREAM: "stream", HUDDLE: "huddle"}
def type_name(self) -> str:
# Raises KeyError if invalid
return self._type_names[self.type]
def __str__(self) -> str:
display_recipient = get_display_recipient(self)
return f"<Recipient: {display_recipient} ({self.type_id}, {self.type})>"
class UserBaseSettings(models.Model):
"""This abstract class is the container for all preferences/personal
settings for users that control the behavior of the application.
It was extracted from UserProfile to support the RealmUserDefault
model (i.e. allow individual realms to configure the default
values of these preferences for new users in their organization).
Changing the default value for a field declared here likely
requires a migration to update all RealmUserDefault rows that had
the old default value to have the new default value. Otherwise,
the default change will only affect new users joining Realms
created after the change.
"""
# UI settings
enter_sends: Optional[bool] = models.BooleanField(null=True, default=False)
# display settings
left_side_userlist: bool = models.BooleanField(default=False)
default_language: str = models.CharField(default="en", max_length=MAX_LANGUAGE_ID_LENGTH)
# This setting controls which view is rendered first when Zulip loads.
# Values for it are URL suffix after `#`.
default_view: str = models.TextField(default="recent_topics")
dense_mode: bool = models.BooleanField(default=True)
fluid_layout_width: bool = models.BooleanField(default=False)
high_contrast_mode: bool = models.BooleanField(default=False)
translate_emoticons: bool = models.BooleanField(default=False)
twenty_four_hour_time: bool = models.BooleanField(default=False)
starred_message_counts: bool = models.BooleanField(default=True)
COLOR_SCHEME_AUTOMATIC = 1
COLOR_SCHEME_NIGHT = 2
COLOR_SCHEME_LIGHT = 3
COLOR_SCHEME_CHOICES = [COLOR_SCHEME_AUTOMATIC, COLOR_SCHEME_NIGHT, COLOR_SCHEME_LIGHT]
color_scheme: int = models.PositiveSmallIntegerField(default=COLOR_SCHEME_AUTOMATIC)
# UI setting controlling Zulip's behavior of demoting in the sort
# order and graying out streams with no recent traffic. The
# default behavior, automatic, enables this behavior once a user
# is subscribed to 30+ streams in the web app.
DEMOTE_STREAMS_AUTOMATIC = 1
DEMOTE_STREAMS_ALWAYS = 2
DEMOTE_STREAMS_NEVER = 3
DEMOTE_STREAMS_CHOICES = [
DEMOTE_STREAMS_AUTOMATIC,
DEMOTE_STREAMS_ALWAYS,
DEMOTE_STREAMS_NEVER,
]
demote_inactive_streams: int = models.PositiveSmallIntegerField(
default=DEMOTE_STREAMS_AUTOMATIC
)
# Emojisets
GOOGLE_EMOJISET = "google"
GOOGLE_BLOB_EMOJISET = "google-blob"
TEXT_EMOJISET = "text"
TWITTER_EMOJISET = "twitter"
EMOJISET_CHOICES = (
(GOOGLE_EMOJISET, "Google modern"),
(GOOGLE_BLOB_EMOJISET, "Google classic"),
(TWITTER_EMOJISET, "Twitter"),
(TEXT_EMOJISET, "Plain text"),
)
emojiset: str = models.CharField(
default=GOOGLE_BLOB_EMOJISET, choices=EMOJISET_CHOICES, max_length=20
)
### Notifications settings. ###
# Stream notifications.
enable_stream_desktop_notifications: bool = models.BooleanField(default=False)
enable_stream_email_notifications: bool = models.BooleanField(default=False)
enable_stream_push_notifications: bool = models.BooleanField(default=False)
enable_stream_audible_notifications: bool = models.BooleanField(default=False)
notification_sound: str = models.CharField(max_length=20, default="zulip")
wildcard_mentions_notify: bool = models.BooleanField(default=True)
# PM + @-mention notifications.
enable_desktop_notifications: bool = models.BooleanField(default=True)
pm_content_in_desktop_notifications: bool = models.BooleanField(default=True)
enable_sounds: bool = models.BooleanField(default=True)
enable_offline_email_notifications: bool = models.BooleanField(default=True)
message_content_in_email_notifications: bool = models.BooleanField(default=True)
enable_offline_push_notifications: bool = models.BooleanField(default=True)
enable_online_push_notifications: bool = models.BooleanField(default=True)
DESKTOP_ICON_COUNT_DISPLAY_MESSAGES = 1
DESKTOP_ICON_COUNT_DISPLAY_NOTIFIABLE = 2
DESKTOP_ICON_COUNT_DISPLAY_NONE = 3
desktop_icon_count_display: int = models.PositiveSmallIntegerField(
default=DESKTOP_ICON_COUNT_DISPLAY_MESSAGES
)
enable_digest_emails: bool = models.BooleanField(default=True)
enable_login_emails: bool = models.BooleanField(default=True)
enable_marketing_emails: bool = models.BooleanField(default=True)
realm_name_in_notifications: bool = models.BooleanField(default=False)
presence_enabled: bool = models.BooleanField(default=True)
# Define the types of the various automatically managed properties
property_types = dict(
color_scheme=int,
default_language=str,
default_view=str,
demote_inactive_streams=int,
dense_mode=bool,
emojiset=str,
fluid_layout_width=bool,
high_contrast_mode=bool,
left_side_userlist=bool,
starred_message_counts=bool,
translate_emoticons=bool,
twenty_four_hour_time=bool,
)
notification_setting_types = dict(
enable_desktop_notifications=bool,
enable_digest_emails=bool,
enable_login_emails=bool,
enable_marketing_emails=bool,
enable_offline_email_notifications=bool,
enable_offline_push_notifications=bool,
enable_online_push_notifications=bool,
enable_sounds=bool,
enable_stream_desktop_notifications=bool,
enable_stream_email_notifications=bool,
enable_stream_push_notifications=bool,
enable_stream_audible_notifications=bool,
wildcard_mentions_notify=bool,
message_content_in_email_notifications=bool,
notification_sound=str,
pm_content_in_desktop_notifications=bool,
desktop_icon_count_display=int,
realm_name_in_notifications=bool,
presence_enabled=bool,
)
class Meta:
abstract = True
class RealmUserDefault(UserBaseSettings):
"""This table stores realm-level default values for user preferences
like notification settings, used when creating a new user account.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
class UserProfile(AbstractBaseUser, PermissionsMixin, UserBaseSettings):
USERNAME_FIELD = "email"
MAX_NAME_LENGTH = 100
MIN_NAME_LENGTH = 2
API_KEY_LENGTH = 32
NAME_INVALID_CHARS = ["*", "`", "\\", ">", '"', "@"]
DEFAULT_BOT = 1
"""
Incoming webhook bots are limited to only sending messages via webhooks.
Thus, it is less of a security risk to expose their API keys to third-party services,
since they can't be used to read messages.
"""
INCOMING_WEBHOOK_BOT = 2
# This value is also being used in static/js/settings_bots.js.
# On updating it here, update it there as well.
OUTGOING_WEBHOOK_BOT = 3
"""
Embedded bots run within the Zulip server itself; events are added to the
embedded_bots queue and then handled by a QueueProcessingWorker.
"""
EMBEDDED_BOT = 4
BOT_TYPES = {
DEFAULT_BOT: "Generic bot",
INCOMING_WEBHOOK_BOT: "Incoming webhook",
OUTGOING_WEBHOOK_BOT: "Outgoing webhook",
EMBEDDED_BOT: "Embedded bot",
}
SERVICE_BOT_TYPES = [
OUTGOING_WEBHOOK_BOT,
EMBEDDED_BOT,
]
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
# For historical reasons, Zulip has two email fields. The
# `delivery_email` field is the user's email address, where all
# email notifications will be sent, and is used for all
# authentication use cases.
#
# The `email` field is the same as delivery_email in organizations
# with EMAIL_ADDRESS_VISIBILITY_EVERYONE. For other
# organizations, it will be a unique value of the form
# user1234@example.com. This field exists for backwards
# compatibility in Zulip APIs where users are referred to by their
# email address, not their ID; it should be used in all API use cases.
#
# Both fields are unique within a realm (in a case-insensitive fashion).
delivery_email: str = models.EmailField(blank=False, db_index=True)
email: str = models.EmailField(blank=False, db_index=True)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
# Foreign key to the Recipient object for PERSONAL type messages to this user.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
# The user's name. We prefer the model of a full_name
# over first+last because cultures vary on how many
# names one has, whether the family name is first or last, etc.
# It also allows organizations to encode a bit of non-name data in
# the "name" attribute if desired, like gender pronouns,
# graduation year, etc.
full_name: str = models.CharField(max_length=MAX_NAME_LENGTH)
date_joined: datetime.datetime = models.DateTimeField(default=timezone_now)
tos_version: Optional[str] = models.CharField(null=True, max_length=10)
api_key: str = models.CharField(max_length=API_KEY_LENGTH)
# Whether the user has access to server-level administrator pages, like /activity
is_staff: bool = models.BooleanField(default=False)
# For a normal user, this is True unless the user or an admin has
# deactivated their account. The name comes from Django; this field
# isn't related to presence or to whether the user has recently used Zulip.
#
# See also `long_term_idle`.
is_active: bool = models.BooleanField(default=True, db_index=True)
is_billing_admin: bool = models.BooleanField(default=False, db_index=True)
is_bot: bool = models.BooleanField(default=False, db_index=True)
bot_type: Optional[int] = models.PositiveSmallIntegerField(null=True, db_index=True)
bot_owner: Optional["UserProfile"] = models.ForeignKey(
"self", null=True, on_delete=models.SET_NULL
)
# Each role has a superset of the permissions of the next higher
# numbered role. When adding new roles, leave enough space for
# future roles to be inserted between currently adjacent
# roles. These constants appear in RealmAuditLog.extra_data, so
# changes to them will require a migration of RealmAuditLog.
ROLE_REALM_OWNER = 100
ROLE_REALM_ADMINISTRATOR = 200
ROLE_MODERATOR = 300
ROLE_MEMBER = 400
ROLE_GUEST = 600
role: int = models.PositiveSmallIntegerField(default=ROLE_MEMBER, db_index=True)
ROLE_TYPES = [
ROLE_REALM_OWNER,
ROLE_REALM_ADMINISTRATOR,
ROLE_MODERATOR,
ROLE_MEMBER,
ROLE_GUEST,
]
# Whether the user has been "soft-deactivated" due to weeks of inactivity.
# For these users we avoid doing UserMessage table work, as an optimization
# for large Zulip organizations with lots of single-visit users.
long_term_idle: bool = models.BooleanField(default=False, db_index=True)
# When we last added basic UserMessage rows for a long_term_idle user.
last_active_message_id: Optional[int] = models.IntegerField(null=True)
# Mirror dummies are fake (!is_active) users used to provide
# message senders in our cross-protocol Zephyr<->Zulip content
# mirroring integration, so that we can display mirrored content
# like native Zulip messages (with a name + avatar, etc.).
is_mirror_dummy: bool = models.BooleanField(default=False)
# Users with this flag set are allowed to forge messages as sent by another
# user and to send to private streams; also used for Zephyr/Jabber mirroring.
can_forge_sender: bool = models.BooleanField(default=False, db_index=True)
# Users with this flag set can create other users via API.
can_create_users: bool = models.BooleanField(default=False, db_index=True)
# Used for rate-limiting certain automated messages generated by bots
last_reminder: Optional[datetime.datetime] = models.DateTimeField(default=None, null=True)
# Minutes to wait before warning a bot owner that their bot sent a message
# to a nonexistent stream
BOT_OWNER_STREAM_ALERT_WAITPERIOD = 1
# API rate limits, formatted as a comma-separated list of range:max pairs
rate_limits: str = models.CharField(default="", max_length=100)
# Hours to wait before sending another email to a user
EMAIL_REMINDER_WAITPERIOD = 24
# Default streams for some deprecated/legacy classes of bot users.
default_sending_stream: Optional["Stream"] = models.ForeignKey(
"zerver.Stream",
null=True,
related_name="+",
on_delete=models.SET_NULL,
)
default_events_register_stream: Optional["Stream"] = models.ForeignKey(
"zerver.Stream",
null=True,
related_name="+",
on_delete=models.SET_NULL,
)
default_all_public_streams: bool = models.BooleanField(default=False)
# A timezone name from the `tzdata` database, as found in pytz.all_timezones.
#
# The longest existing name is 32 characters long, so max_length=40 seems
# like a safe choice.
#
# In Django, the convention is to use an empty string instead of NULL/None
# for text-based fields. For more information, see
# https://docs.djangoproject.com/en/1.10/ref/models/fields/#django.db.models.Field.null.
timezone: str = models.CharField(max_length=40, default="")
AVATAR_FROM_GRAVATAR = "G"
AVATAR_FROM_USER = "U"
AVATAR_SOURCES = (
(AVATAR_FROM_GRAVATAR, "Hosted by Gravatar"),
(AVATAR_FROM_USER, "Uploaded by user"),
)
avatar_source: str = models.CharField(
default=AVATAR_FROM_GRAVATAR, choices=AVATAR_SOURCES, max_length=1
)
avatar_version: int = models.PositiveSmallIntegerField(default=1)
avatar_hash: Optional[str] = models.CharField(null=True, max_length=64)
TUTORIAL_WAITING = "W"
TUTORIAL_STARTED = "S"
TUTORIAL_FINISHED = "F"
TUTORIAL_STATES = (
(TUTORIAL_WAITING, "Waiting"),
(TUTORIAL_STARTED, "Started"),
(TUTORIAL_FINISHED, "Finished"),
)
tutorial_status: str = models.CharField(
default=TUTORIAL_WAITING, choices=TUTORIAL_STATES, max_length=1
)
# Contains serialized JSON of the form:
# [("step 1", true), ("step 2", false)]
# where the second element of each tuple is if the step has been
# completed.
onboarding_steps: str = models.TextField(default="[]")
zoom_token: Optional[object] = models.JSONField(default=None, null=True)
objects: UserManager = UserManager()
ROLE_ID_TO_NAME_MAP = {
ROLE_REALM_OWNER: gettext_lazy("Organization owner"),
ROLE_REALM_ADMINISTRATOR: gettext_lazy("Organization administrator"),
ROLE_MODERATOR: gettext_lazy("Moderator"),
ROLE_MEMBER: gettext_lazy("Member"),
ROLE_GUEST: gettext_lazy("Guest"),
}
def get_role_name(self) -> str:
return self.ROLE_ID_TO_NAME_MAP[self.role]
@property
def profile_data(self) -> ProfileData:
values = CustomProfileFieldValue.objects.filter(user_profile=self)
user_data = {
v.field_id: {"value": v.value, "rendered_value": v.rendered_value} for v in values
}
data: ProfileData = []
for field in custom_profile_fields_for_realm(self.realm_id):
field_values = user_data.get(field.id, None)
if field_values:
value, rendered_value = field_values.get("value"), field_values.get(
"rendered_value"
)
else:
value, rendered_value = None, None
field_type = field.field_type
if value is not None:
converter = field.FIELD_CONVERTERS[field_type]
value = converter(value)
field_data = field.as_dict()
data.append(
{
"id": field_data["id"],
"name": field_data["name"],
"type": field_data["type"],
"hint": field_data["hint"],
"field_data": field_data["field_data"],
"order": field_data["order"],
"value": value,
"rendered_value": rendered_value,
}
)
return data
def can_admin_user(self, target_user: "UserProfile") -> bool:
"""Returns whether this user has permission to modify target_user"""
if target_user.bot_owner == self:
return True
elif self.is_realm_admin and self.realm == target_user.realm:
return True
else:
return False
def __str__(self) -> str:
return f"<UserProfile: {self.email} {self.realm}>"
@property
def is_provisional_member(self) -> bool:
if self.is_moderator:
return False
diff = (timezone_now() - self.date_joined).days
if diff < self.realm.waiting_period_threshold:
return True
return False
@property
def is_realm_admin(self) -> bool:
return (
self.role == UserProfile.ROLE_REALM_ADMINISTRATOR
or self.role == UserProfile.ROLE_REALM_OWNER
)
@is_realm_admin.setter
def is_realm_admin(self, value: bool) -> None:
if value:
self.role = UserProfile.ROLE_REALM_ADMINISTRATOR
elif self.role == UserProfile.ROLE_REALM_ADMINISTRATOR:
# We need to be careful to not accidentally change
# ROLE_GUEST to ROLE_MEMBER here.
self.role = UserProfile.ROLE_MEMBER
@property
def has_billing_access(self) -> bool:
return self.is_realm_owner or self.is_billing_admin
@property
def is_realm_owner(self) -> bool:
return self.role == UserProfile.ROLE_REALM_OWNER
@property
def is_guest(self) -> bool:
return self.role == UserProfile.ROLE_GUEST
@is_guest.setter
def is_guest(self, value: bool) -> None:
if value:
self.role = UserProfile.ROLE_GUEST
elif self.role == UserProfile.ROLE_GUEST:
# We need to be careful to not accidentally change
# ROLE_REALM_ADMINISTRATOR to ROLE_MEMBER here.
self.role = UserProfile.ROLE_MEMBER
@property
def is_moderator(self) -> bool:
return self.role == UserProfile.ROLE_MODERATOR
@property
def is_incoming_webhook(self) -> bool:
return self.bot_type == UserProfile.INCOMING_WEBHOOK_BOT
@property
def allowed_bot_types(self) -> List[int]:
allowed_bot_types = []
if (
self.is_realm_admin
or not self.realm.bot_creation_policy == Realm.BOT_CREATION_LIMIT_GENERIC_BOTS
):
allowed_bot_types.append(UserProfile.DEFAULT_BOT)
allowed_bot_types += [
UserProfile.INCOMING_WEBHOOK_BOT,
UserProfile.OUTGOING_WEBHOOK_BOT,
]
if settings.EMBEDDED_BOTS_ENABLED:
allowed_bot_types.append(UserProfile.EMBEDDED_BOT)
return allowed_bot_types
@staticmethod
def emojiset_choices() -> List[Dict[str, str]]:
return [
dict(key=emojiset[0], text=emojiset[1]) for emojiset in UserProfile.EMOJISET_CHOICES
]
@staticmethod
def emails_from_ids(user_ids: Sequence[int]) -> Dict[int, str]:
rows = UserProfile.objects.filter(id__in=user_ids).values("id", "email")
return {row["id"]: row["email"] for row in rows}
def email_address_is_realm_public(self) -> bool:
if self.realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE:
return True
if self.is_bot:
return True
return False
def has_permission(self, policy_name: str) -> bool:
if policy_name not in [
"create_stream_policy",
"edit_topic_policy",
"invite_to_stream_policy",
"invite_to_realm_policy",
"move_messages_between_streams_policy",
"user_group_edit_policy",
]:
raise AssertionError("Invalid policy")
if self.is_realm_admin:
return True
policy_value = getattr(self.realm, policy_name)
if policy_value == Realm.POLICY_ADMINS_ONLY:
return False
if self.is_moderator:
return True
if policy_value == Realm.POLICY_MODERATORS_ONLY:
return False
if self.is_guest:
return False
if policy_value == Realm.POLICY_MEMBERS_ONLY:
return True
assert policy_value == Realm.POLICY_FULL_MEMBERS_ONLY
return not self.is_provisional_member
def can_create_streams(self) -> bool:
return self.has_permission("create_stream_policy")
def can_subscribe_other_users(self) -> bool:
return self.has_permission("invite_to_stream_policy")
def can_invite_others_to_realm(self) -> bool:
return self.has_permission("invite_to_realm_policy")
def can_move_messages_between_streams(self) -> bool:
return self.has_permission("move_messages_between_streams_policy")
def can_edit_user_groups(self) -> bool:
return self.has_permission("user_group_edit_policy")
def can_edit_topic_of_any_message(self) -> bool:
if self.realm.edit_topic_policy == Realm.POLICY_EVERYONE:
return True
return self.has_permission("edit_topic_policy")
def can_access_public_streams(self) -> bool:
return not (self.is_guest or self.realm.is_zephyr_mirror_realm)
def major_tos_version(self) -> int:
if self.tos_version is not None:
return int(self.tos_version.split(".")[0])
else:
return -1
def format_requestor_for_logs(self) -> str:
return "{}@{}".format(self.id, self.realm.string_id or "root")
def set_password(self, password: Optional[str]) -> None:
if password is None:
self.set_unusable_password()
return
from zproject.backends import check_password_strength
if not check_password_strength(password):
raise PasswordTooWeakError
super().set_password(password)
class PasswordTooWeakError(Exception):
pass
class UserGroup(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
name: str = models.CharField(max_length=100)
members: Manager = models.ManyToManyField(UserProfile, through="UserGroupMembership")
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
description: str = models.TextField(default="")
class Meta:
unique_together = (("realm", "name"),)
class UserGroupMembership(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user_group: UserGroup = models.ForeignKey(UserGroup, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
class Meta:
unique_together = (("user_group", "user_profile"),)
def receives_offline_push_notifications(user_profile: UserProfile) -> bool:
return user_profile.enable_offline_push_notifications and not user_profile.is_bot
def receives_offline_email_notifications(user_profile: UserProfile) -> bool:
return user_profile.enable_offline_email_notifications and not user_profile.is_bot
def receives_online_push_notifications(user_profile: UserProfile) -> bool:
return user_profile.enable_online_push_notifications and not user_profile.is_bot
def remote_user_to_email(remote_user: str) -> str:
if settings.SSO_APPEND_DOMAIN is not None:
remote_user += "@" + settings.SSO_APPEND_DOMAIN
return remote_user
# Make sure we flush the UserProfile object from our remote cache
# whenever we save it.
post_save.connect(flush_user_profile, sender=UserProfile)
class PreregistrationUser(models.Model):
# Data on a partially created user, before the completion of
# registration. This is used in at least three major code paths:
# * Realm creation, in which case realm is None.
#
# * Invitations, in which case referred_by will always be set.
#
# * Social authentication signup, where it's used to store data
# from the authentication step and pass it to the registration
# form.
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
email: str = models.EmailField()
# If the pre-registration process provides a suggested full name for this user,
# store it here to use it to prepopulate the full name field in the registration form:
full_name: Optional[str] = models.CharField(max_length=UserProfile.MAX_NAME_LENGTH, null=True)
full_name_validated: bool = models.BooleanField(default=False)
referred_by: Optional[UserProfile] = models.ForeignKey(
UserProfile, null=True, on_delete=CASCADE
)
streams: Manager = models.ManyToManyField("Stream")
invited_at: datetime.datetime = models.DateTimeField(auto_now=True)
realm_creation: bool = models.BooleanField(default=False)
# Indicates whether the user needs a password. Users who were
# created via SSO style auth (e.g. GitHub/Google) generally do not.
password_required: bool = models.BooleanField(default=True)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status: int = models.IntegerField(default=0)
# The realm should only ever be None for PreregistrationUser
# objects created as part of realm creation.
realm: Optional[Realm] = models.ForeignKey(Realm, null=True, on_delete=CASCADE)
# These values should be consistent with the values
# in settings_config.user_role_values.
INVITE_AS = dict(
REALM_OWNER=100,
REALM_ADMIN=200,
MODERATOR=300,
MEMBER=400,
GUEST_USER=600,
)
invited_as: int = models.PositiveSmallIntegerField(default=INVITE_AS["MEMBER"])
def filter_to_valid_prereg_users(query: QuerySet) -> QuerySet:
days_to_activate = settings.INVITATION_LINK_VALIDITY_DAYS
active_value = confirmation_settings.STATUS_ACTIVE
revoked_value = confirmation_settings.STATUS_REVOKED
lowest_datetime = timezone_now() - datetime.timedelta(days=days_to_activate)
return query.exclude(status__in=[active_value, revoked_value]).filter(
invited_at__gte=lowest_datetime
)
class MultiuseInvite(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
referred_by: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
streams: Manager = models.ManyToManyField("Stream")
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
invited_as: int = models.PositiveSmallIntegerField(
default=PreregistrationUser.INVITE_AS["MEMBER"]
)
class EmailChangeStatus(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
new_email: str = models.EmailField()
old_email: str = models.EmailField()
updated_at: datetime.datetime = models.DateTimeField(auto_now=True)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status: int = models.IntegerField(default=0)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
class AbstractPushDeviceToken(models.Model):
APNS = 1
GCM = 2
KINDS = (
(APNS, "apns"),
(GCM, "gcm"),
)
kind: int = models.PositiveSmallIntegerField(choices=KINDS)
# The token is a unique device-specific token that is
# sent to us from each device:
# - APNS token if kind == APNS
# - GCM registration id if kind == GCM
token: str = models.CharField(max_length=4096, db_index=True)
# TODO: last_updated should be renamed date_created, since it is
# no longer maintained as a last_updated value.
last_updated: datetime.datetime = models.DateTimeField(auto_now=True)
# [optional] Contains the app id of the device if it is an iOS device
ios_app_id: Optional[str] = models.TextField(null=True)
class Meta:
abstract = True
class PushDeviceToken(AbstractPushDeviceToken):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
# The user whose device this is
user: UserProfile = models.ForeignKey(UserProfile, db_index=True, on_delete=CASCADE)
class Meta:
unique_together = ("user", "kind", "token")
def generate_email_token_for_stream() -> str:
return secrets.token_hex(16)
class Stream(models.Model):
MAX_NAME_LENGTH = 60
MAX_DESCRIPTION_LENGTH = 1024
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
name: str = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True)
realm: Realm = models.ForeignKey(Realm, db_index=True, on_delete=CASCADE)
date_created: datetime.datetime = models.DateTimeField(default=timezone_now)
deactivated: bool = models.BooleanField(default=False)
description: str = models.CharField(max_length=MAX_DESCRIPTION_LENGTH, default="")
rendered_description: str = models.TextField(default="")
# Foreign key to the Recipient object for STREAM type messages to this stream.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
invite_only: Optional[bool] = models.BooleanField(null=True, default=False)
history_public_to_subscribers: bool = models.BooleanField(default=False)
# Whether this stream's content should be published by the web-public archive features
is_web_public: bool = models.BooleanField(default=False)
STREAM_POST_POLICY_EVERYONE = 1
STREAM_POST_POLICY_ADMINS = 2
STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS = 3
STREAM_POST_POLICY_MODERATORS = 4
# TODO: Implement policy to restrict posting to a user group or admins.
# Who in the organization has permission to send messages to this stream.
stream_post_policy: int = models.PositiveSmallIntegerField(default=STREAM_POST_POLICY_EVERYONE)
STREAM_POST_POLICY_TYPES = [
STREAM_POST_POLICY_EVERYONE,
STREAM_POST_POLICY_ADMINS,
STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS,
STREAM_POST_POLICY_MODERATORS,
]
# The unique thing about Zephyr public streams is that we never list their
# users. We may try to generalize this concept later, but for now
# we just use a concrete field. (Zephyr public streams aren't exactly like
# invite-only streams--while both are private in terms of listing users,
# for Zephyr we don't even list users to stream members, yet membership
# is more public in the sense that you don't need a Zulip invite to join.
# This field is populated directly from UserProfile.is_zephyr_mirror_realm,
# and the reason for denormalizing field is performance.
is_in_zephyr_realm: bool = models.BooleanField(default=False)
# Used by the e-mail forwarder. The e-mail RFC specifies a maximum
# e-mail length of 254, and our max stream length is 30, so we
# have plenty of room for the token.
email_token: str = models.CharField(
max_length=32,
default=generate_email_token_for_stream,
unique=True,
)
# For old messages being automatically deleted.
# Value NULL means "use retention policy of the realm".
# Value -1 means "disable retention policy for this stream unconditionally".
# Non-negative values have the natural meaning of "archive messages older than <value> days".
MESSAGE_RETENTION_SPECIAL_VALUES_MAP = {
"forever": -1,
"realm_default": None,
}
message_retention_days: Optional[int] = models.IntegerField(null=True, default=None)
# The very first message ID in the stream. Used to help clients
# determine whether they might need to display "more topics" for a
# stream based on what messages they have cached.
first_message_id: Optional[int] = models.IntegerField(null=True, db_index=True)
def __str__(self) -> str:
return f"<Stream: {self.name}>"
def is_public(self) -> bool:
# All streams are private in Zephyr mirroring realms.
return not self.invite_only and not self.is_in_zephyr_realm
def is_history_realm_public(self) -> bool:
return self.is_public()
def is_history_public_to_subscribers(self) -> bool:
return self.history_public_to_subscribers
# Stream fields included whenever a Stream object is provided to
# Zulip clients via the API. A few details worth noting:
# * "id" is represented as "stream_id" in most API interfaces.
# * "email_token" is not realm-public and thus is not included here.
# * is_in_zephyr_realm is a backend-only optimization.
# * "deactivated" streams are filtered from the API entirely.
# * "realm" and "recipient" are not exposed to clients via the API.
API_FIELDS = [
"name",
"id",
"description",
"rendered_description",
"invite_only",
"is_web_public",
"stream_post_policy",
"history_public_to_subscribers",
"first_message_id",
"message_retention_days",
"date_created",
]
@staticmethod
def get_client_data(query: QuerySet) -> List[Dict[str, Any]]:
query = query.only(*Stream.API_FIELDS)
return [row.to_dict() for row in query]
def to_dict(self) -> Dict[str, Any]:
result = {}
for field_name in self.API_FIELDS:
if field_name == "id":
result["stream_id"] = self.id
continue
elif field_name == "date_created":
result["date_created"] = datetime_to_timestamp(self.date_created)
continue
result[field_name] = getattr(self, field_name)
result["is_announcement_only"] = self.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS
return result
post_save.connect(flush_stream, sender=Stream)
post_delete.connect(flush_stream, sender=Stream)
class MutedTopic(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
stream: Stream = models.ForeignKey(Stream, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
topic_name: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH)
# The default value for date_muted is a few weeks before tracking
# of when topics were muted was first introduced. It's designed
# to be obviously incorrect so that users can tell it's backfilled data.
date_muted: datetime.datetime = models.DateTimeField(
default=datetime.datetime(2020, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)
)
class Meta:
unique_together = ("user_profile", "stream", "topic_name")
def __str__(self) -> str:
return f"<MutedTopic: ({self.user_profile.email}, {self.stream.name}, {self.topic_name}, {self.date_muted})>"
class MutedUser(models.Model):
user_profile = models.ForeignKey(UserProfile, related_name="+", on_delete=CASCADE)
muted_user = models.ForeignKey(UserProfile, related_name="+", on_delete=CASCADE)
date_muted: datetime.datetime = models.DateTimeField(default=timezone_now)
class Meta:
unique_together = ("user_profile", "muted_user")
def __str__(self) -> str:
return f"<MutedUser: {self.user_profile.email} -> {self.muted_user.email}>"
post_save.connect(flush_muting_users_cache, sender=MutedUser)
post_delete.connect(flush_muting_users_cache, sender=MutedUser)
class Client(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
name: str = models.CharField(max_length=30, db_index=True, unique=True)
def __str__(self) -> str:
return f"<Client: {self.name}>"
get_client_cache: Dict[str, Client] = {}
def clear_client_cache() -> None: # nocoverage
global get_client_cache
get_client_cache = {}
def get_client(name: str) -> Client:
# Accessing KEY_PREFIX through the module is necessary
# because we need the updated value of the variable.
cache_name = cache.KEY_PREFIX + name
if cache_name not in get_client_cache:
result = get_client_remote_cache(name)
get_client_cache[cache_name] = result
return get_client_cache[cache_name]
def get_client_cache_key(name: str) -> str:
return f"get_client:{make_safe_digest(name)}"
@cache_with_key(get_client_cache_key, timeout=3600 * 24 * 7)
def get_client_remote_cache(name: str) -> Client:
(client, _) = Client.objects.get_or_create(name=name)
return client
@cache_with_key(get_stream_cache_key, timeout=3600 * 24 * 7)
def get_realm_stream(stream_name: str, realm_id: int) -> Stream:
return Stream.objects.select_related().get(name__iexact=stream_name.strip(), realm_id=realm_id)
def get_active_streams(realm: Optional[Realm]) -> QuerySet:
# TODO: Change return type to QuerySet[Stream]
# NOTE: Return value is used as a QuerySet, so cannot currently be Sequence[QuerySet]
"""
Return all streams (including invite-only streams) that have not been deactivated.
"""
return Stream.objects.filter(realm=realm, deactivated=False)
def get_stream(stream_name: str, realm: Realm) -> Stream:
"""
Callers that don't have a Realm object already available should use
get_realm_stream directly, to avoid unnecessarily fetching the
Realm object.
"""
return get_realm_stream(stream_name, realm.id)
def get_stream_by_id_in_realm(stream_id: int, realm: Realm) -> Stream:
return Stream.objects.select_related().get(id=stream_id, realm=realm)
def bulk_get_streams(realm: Realm, stream_names: STREAM_NAMES) -> Dict[str, Any]:
def fetch_streams_by_name(stream_names: List[str]) -> Sequence[Stream]:
#
# This should be just
#
# Stream.objects.select_related().filter(name__iexact__in=stream_names,
# realm_id=realm_id)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
where_clause = (
"upper(zerver_stream.name::text) IN (SELECT upper(name) FROM unnest(%s) AS name)"
)
return (
get_active_streams(realm)
.select_related()
.extra(where=[where_clause], params=(list(stream_names),))
)
def stream_name_to_cache_key(stream_name: str) -> str:
return get_stream_cache_key(stream_name, realm.id)
def stream_to_lower_name(stream: Stream) -> str:
return stream.name.lower()
return bulk_cached_fetch(
stream_name_to_cache_key,
fetch_streams_by_name,
[stream_name.lower() for stream_name in stream_names],
id_fetcher=stream_to_lower_name,
)
def get_huddle_recipient(user_profile_ids: Set[int]) -> Recipient:
# The caller should ensure that user_profile_ids includes
# the sender. Note that get_huddle hits the cache, and then
# we hit another cache to get the recipient. We may want to
# unify our caching strategy here.
huddle = get_huddle(list(user_profile_ids))
return huddle.recipient
def get_huddle_user_ids(recipient: Recipient) -> List[int]:
assert recipient.type == Recipient.HUDDLE
return (
Subscription.objects.filter(
recipient=recipient,
)
.order_by("user_profile_id")
.values_list("user_profile_id", flat=True)
)
def bulk_get_huddle_user_ids(recipients: List[Recipient]) -> Dict[int, List[int]]:
"""
Takes a list of huddle-type recipients, returns a dict
mapping recipient id to list of user ids in the huddle.
"""
assert all(recipient.type == Recipient.HUDDLE for recipient in recipients)
if not recipients:
return {}
subscriptions = Subscription.objects.filter(
recipient__in=recipients,
).order_by("user_profile_id")
result_dict: Dict[int, List[int]] = {}
for recipient in recipients:
result_dict[recipient.id] = [
subscription.user_profile_id
for subscription in subscriptions
if subscription.recipient_id == recipient.id
]
return result_dict
class AbstractMessage(models.Model):
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
# The message's topic.
#
# Early versions of Zulip called this concept a "subject", as in an email
# "subject line", before changing to "topic" in 2013 (commit dac5a46fa).
# UI and user documentation now consistently say "topic". New APIs and
# new code should generally also say "topic".
#
# See also the `topic_name` method on `Message`.
subject: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH, db_index=True)
content: str = models.TextField()
rendered_content: Optional[str] = models.TextField(null=True)
rendered_content_version: Optional[int] = models.IntegerField(null=True)
date_sent: datetime.datetime = models.DateTimeField("date sent", db_index=True)
sending_client: Client = models.ForeignKey(Client, on_delete=CASCADE)
last_edit_time: Optional[datetime.datetime] = models.DateTimeField(null=True)
# A JSON-encoded list of objects describing any past edits to this
# message, oldest first.
edit_history: Optional[str] = models.TextField(null=True)
has_attachment: bool = models.BooleanField(default=False, db_index=True)
has_image: bool = models.BooleanField(default=False, db_index=True)
has_link: bool = models.BooleanField(default=False, db_index=True)
class Meta:
abstract = True
def __str__(self) -> str:
display_recipient = get_display_recipient(self.recipient)
return f"<{self.__class__.__name__}: {display_recipient} / {self.subject} / {self.sender}>"
class ArchiveTransaction(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
timestamp: datetime.datetime = models.DateTimeField(default=timezone_now, db_index=True)
# Marks if the data archived in this transaction has been restored:
restored: bool = models.BooleanField(default=False, db_index=True)
type: int = models.PositiveSmallIntegerField(db_index=True)
# Valid types:
RETENTION_POLICY_BASED = 1 # Archiving was executed due to automated retention policies
MANUAL = 2 # Archiving was run manually, via move_messages_to_archive function
# ForeignKey to the realm with which objects archived in this transaction are associated.
# If type is set to MANUAL, this should be null.
realm: Optional[Realm] = models.ForeignKey(Realm, null=True, on_delete=CASCADE)
def __str__(self) -> str:
return "ArchiveTransaction id: {id}, type: {type}, realm: {realm}, timestamp: {timestamp}".format(
id=self.id,
type="MANUAL" if self.type == self.MANUAL else "RETENTION_POLICY_BASED",
realm=self.realm.string_id if self.realm else None,
timestamp=self.timestamp,
)
class ArchivedMessage(AbstractMessage):
"""Used as a temporary holding place for deleted messages before they
are permanently deleted. This is an important part of a robust
'message retention' feature.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
archive_transaction: ArchiveTransaction = models.ForeignKey(
ArchiveTransaction, on_delete=CASCADE
)
class Message(AbstractMessage):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
def topic_name(self) -> str:
"""
Please start using this helper to facilitate an
eventual switch over to a separate topic table.
"""
return self.subject
def set_topic_name(self, topic_name: str) -> None:
self.subject = topic_name
def is_stream_message(self) -> bool:
"""
Find out whether a message is a stream message by
looking up its recipient.type. TODO: Make this
an easier operation by denormalizing the message
type onto Message, either explicitly (message.type)
or implicitly (message.stream_id is not None).
"""
return self.recipient.type == Recipient.STREAM
def get_realm(self) -> Realm:
return self.sender.realm
def save_rendered_content(self) -> None:
self.save(update_fields=["rendered_content", "rendered_content_version"])
@staticmethod
def need_to_render_content(
rendered_content: Optional[str],
rendered_content_version: Optional[int],
markdown_version: int,
) -> bool:
return (
rendered_content is None
or rendered_content_version is None
or rendered_content_version < markdown_version
)
def sent_by_human(self) -> bool:
"""Used to determine whether a message was sent by a full Zulip UI
style client (and thus whether the message should be treated
as sent by a human and automatically marked as read for the
sender). The purpose of this distinction is to ensure that
message sent to the user by e.g. a Google Calendar integration
using the user's own API key don't get marked as read
automatically.
"""
sending_client = self.sending_client.name.lower()
return (
sending_client
in (
"zulipandroid",
"zulipios",
"zulipdesktop",
"zulipmobile",
"zulipelectron",
"zulipterminal",
"snipe",
"website",
"ios",
"android",
)
) or ("desktop app" in sending_client)
@staticmethod
def is_status_message(content: str, rendered_content: str) -> bool:
"""
"status messages" start with /me and have special rendering:
/me loves chocolate -> Full Name loves chocolate
"""
if content.startswith("/me "):
return True
return False
def get_context_for_message(message: Message) -> Sequence[Message]:
# TODO: Change return type to QuerySet[Message]
return Message.objects.filter(
recipient_id=message.recipient_id,
subject=message.subject,
id__lt=message.id,
date_sent__gt=message.date_sent - timedelta(minutes=15),
).order_by("-id")[:10]
post_save.connect(flush_message, sender=Message)
class AbstractSubMessage(models.Model):
# We can send little text messages that are associated with a regular
# Zulip message. These can be used for experimental widgets like embedded
# games, surveys, mini threads, etc. These are designed to be pretty
# generic in purpose.
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
msg_type: str = models.TextField()
content: str = models.TextField()
class Meta:
abstract = True
class SubMessage(AbstractSubMessage):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
@staticmethod
def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
fields = ["id", "message_id", "sender_id", "msg_type", "content"]
query = SubMessage.objects.filter(message_id__in=needed_ids).values(*fields)
query = query.order_by("message_id", "id")
return list(query)
class ArchivedSubMessage(AbstractSubMessage):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
message: ArchivedMessage = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
post_save.connect(flush_submessage, sender=SubMessage)
class Draft(models.Model):
"""Server-side storage model for storing drafts so that drafts can be synced across
multiple clients/devices.
"""
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
recipient: Optional[Recipient] = models.ForeignKey(
Recipient, null=True, on_delete=models.SET_NULL
)
topic: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH, db_index=True)
content: str = models.TextField() # Length should not exceed MAX_MESSAGE_LENGTH
last_edit_time: datetime.datetime = models.DateTimeField(db_index=True)
def __str__(self) -> str:
return f"<{self.__class__.__name__}: {self.user_profile.email} / {self.id} / {self.last_edit_time}>"
def to_dict(self) -> Dict[str, Any]:
if self.recipient is None:
_type = ""
to = []
elif self.recipient.type == Recipient.STREAM:
_type = "stream"
to = [self.recipient.type_id]
else:
_type = "private"
if self.recipient.type == Recipient.PERSONAL:
to = [self.recipient.type_id]
else:
to = []
for r in get_display_recipient(self.recipient):
assert not isinstance(r, str) # It will only be a string for streams
if not r["id"] == self.user_profile_id:
to.append(r["id"])
return {
"id": self.id,
"type": _type,
"to": to,
"topic": self.topic,
"content": self.content,
"timestamp": int(self.last_edit_time.timestamp()),
}
class AbstractReaction(models.Model):
"""For emoji reactions to messages (and potentially future reaction types).
Emoji are surprisingly complicated to implement correctly. For details
on how this subsystem works, see:
https://zulip.readthedocs.io/en/latest/subsystems/emoji.html
"""
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# The user-facing name for an emoji reaction. With emoji aliases,
# there may be multiple accepted names for a given emoji; this
# field encodes which one the user selected.
emoji_name: str = models.TextField()
UNICODE_EMOJI = "unicode_emoji"
REALM_EMOJI = "realm_emoji"
ZULIP_EXTRA_EMOJI = "zulip_extra_emoji"
REACTION_TYPES = (
(UNICODE_EMOJI, gettext_lazy("Unicode emoji")),
(REALM_EMOJI, gettext_lazy("Custom emoji")),
(ZULIP_EXTRA_EMOJI, gettext_lazy("Zulip extra emoji")),
)
reaction_type: str = models.CharField(
default=UNICODE_EMOJI, choices=REACTION_TYPES, max_length=30
)
# A string that uniquely identifies a particular emoji. The format varies
# by type:
#
# * For Unicode emoji, a dash-separated hex encoding of the sequence of
# Unicode codepoints that define this emoji in the Unicode
# specification. For examples, see "non_qualified" or "unified" in the
# following data, with "non_qualified" taking precedence when both present:
# https://raw.githubusercontent.com/iamcal/emoji-data/master/emoji_pretty.json
#
# * For realm emoji (aka user uploaded custom emoji), the ID
# (in ASCII decimal) of the RealmEmoji object.
#
# * For "Zulip extra emoji" (like :zulip:), the filename of the emoji.
emoji_code: str = models.TextField()
class Meta:
abstract = True
unique_together = (
("user_profile", "message", "emoji_name"),
("user_profile", "message", "reaction_type", "emoji_code"),
)
class Reaction(AbstractReaction):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
@staticmethod
def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
fields = [
"message_id",
"emoji_name",
"emoji_code",
"reaction_type",
"user_profile__email",
"user_profile_id",
"user_profile__full_name",
]
return Reaction.objects.filter(message_id__in=needed_ids).values(*fields)
def __str__(self) -> str:
return f"{self.user_profile.email} / {self.message.id} / {self.emoji_name}"
class ArchivedReaction(AbstractReaction):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
message: ArchivedMessage = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
# Whenever a message is sent, for each user subscribed to the
# corresponding Recipient object (that is not long-term idle), we add
# a row to the UserMessage table indicating that that user received
# that message. This table allows us to quickly query any user's last
# 1000 messages to generate the home view and search exactly the
# user's message history.
#
# The long-term idle optimization is extremely important for large,
# open organizations, and is described in detail here:
# https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation
#
# In particular, new messages to public streams will only generate
# UserMessage rows for Members who are long_term_idle if they would
# have nonzero flags for the message (E.g. a mention, alert word, or
# mobile push notification).
#
# The flags field stores metadata like whether the user has read the
# message, starred or collapsed the message, was mentioned in the
# message, etc. We use of postgres partial indexes on flags to make
# queries for "User X's messages with flag Y" extremely fast without
# consuming much storage space.
#
# UserMessage is the largest table in many Zulip installations, even
# though each row is only 4 integers.
class AbstractUserMessage(models.Model):
id: int = models.BigAutoField(primary_key=True)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# The order here is important! It's the order of fields in the bitfield.
ALL_FLAGS = [
"read",
"starred",
"collapsed",
"mentioned",
"wildcard_mentioned",
# These next 4 flags are from features that have since been removed.
"summarize_in_home",
"summarize_in_stream",
"force_expand",
"force_collapse",
# Whether the message contains any of the user's alert words.
"has_alert_word",
# The historical flag is used to mark messages which the user
# did not receive when they were sent, but later added to
# their history via e.g. starring the message. This is
# important accounting for the "Subscribed to stream" dividers.
"historical",
# Whether the message is a private message; this flag is a
# denormalization of message.recipient.type to support an
# efficient index on UserMessage for a user's private messages.
"is_private",
# Whether we've sent a push notification to the user's mobile
# devices for this message that has not been revoked.
"active_mobile_push_notification",
]
# Certain flags are used only for internal accounting within the
# Zulip backend, and don't make sense to expose to the API.
NON_API_FLAGS = {"is_private", "active_mobile_push_notification"}
# Certain additional flags are just set once when the UserMessage
# row is created.
NON_EDITABLE_FLAGS = {
# These flags are bookkeeping and don't make sense to edit.
"has_alert_word",
"mentioned",
"wildcard_mentioned",
"historical",
# Unused flags can't be edited.
"force_expand",
"force_collapse",
"summarize_in_home",
"summarize_in_stream",
}
flags: BitHandler = BitField(flags=ALL_FLAGS, default=0)
class Meta:
abstract = True
unique_together = ("user_profile", "message")
@staticmethod
def where_unread() -> str:
# Use this for Django ORM queries to access unread message.
# This custom SQL plays nice with our partial indexes. Grep
# the code for example usage.
return "flags & 1 = 0"
@staticmethod
def where_starred() -> str:
# Use this for Django ORM queries to access starred messages.
# This custom SQL plays nice with our partial indexes. Grep
# the code for example usage.
#
# The key detail is that e.g.
# UserMessage.objects.filter(user_profile=user_profile, flags=UserMessage.flags.starred)
# will generate a query involving `flags & 2 = 2`, which doesn't match our index.
return "flags & 2 <> 0"
@staticmethod
def where_active_push_notification() -> str:
# See where_starred for documentation.
return "flags & 4096 <> 0"
def flags_list(self) -> List[str]:
flags = int(self.flags)
return self.flags_list_for_flags(flags)
@staticmethod
def flags_list_for_flags(val: int) -> List[str]:
"""
This function is highly optimized, because it actually slows down
sending messages in a naive implementation.
"""
flags = []
mask = 1
for flag in UserMessage.ALL_FLAGS:
if (val & mask) and flag not in AbstractUserMessage.NON_API_FLAGS:
flags.append(flag)
mask <<= 1
return flags
def __str__(self) -> str:
display_recipient = get_display_recipient(self.message.recipient)
return f"<{self.__class__.__name__}: {display_recipient} / {self.user_profile.email} ({self.flags_list()})>"
class UserMessage(AbstractUserMessage):
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
def get_usermessage_by_message_id(
user_profile: UserProfile, message_id: int
) -> Optional[UserMessage]:
try:
return UserMessage.objects.select_related().get(
user_profile=user_profile, message_id=message_id
)
except UserMessage.DoesNotExist:
return None
class ArchivedUserMessage(AbstractUserMessage):
"""Used as a temporary holding place for deleted UserMessages objects
before they are permanently deleted. This is an important part of
a robust 'message retention' feature.
"""
message: Message = models.ForeignKey(ArchivedMessage, on_delete=CASCADE)
class AbstractAttachment(models.Model):
file_name: str = models.TextField(db_index=True)
# path_id is a storage location agnostic representation of the path of the file.
# If the path of a file is http://localhost:9991/user_uploads/a/b/abc/temp_file.py
# then its path_id will be a/b/abc/temp_file.py.
path_id: str = models.TextField(db_index=True, unique=True)
owner: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
realm: Optional[Realm] = models.ForeignKey(Realm, blank=True, null=True, on_delete=CASCADE)
create_time: datetime.datetime = models.DateTimeField(
default=timezone_now,
db_index=True,
)
# Size of the uploaded file, in bytes
size: int = models.IntegerField()
# The two fields below lets us avoid looking up the corresponding
# messages/streams to check permissions before serving these files.
# Whether this attachment has been posted to a public stream, and
# thus should be available to all non-guest users in the
# organization (even if they weren't a recipient of a message
# linking to it).
is_realm_public: bool = models.BooleanField(default=False)
# Whether this attachment has been posted to a web-public stream,
# and thus should be available to everyone on the internet, even
# if the person isn't logged in.
is_web_public: bool = models.BooleanField(default=False)
class Meta:
abstract = True
def __str__(self) -> str:
return f"<{self.__class__.__name__}: {self.file_name}>"
class ArchivedAttachment(AbstractAttachment):
"""Used as a temporary holding place for deleted Attachment objects
before they are permanently deleted. This is an important part of
a robust 'message retention' feature.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
messages: Manager = models.ManyToManyField(ArchivedMessage)
class Attachment(AbstractAttachment):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
messages: Manager = models.ManyToManyField(Message)
def is_claimed(self) -> bool:
return self.messages.count() > 0
def to_dict(self) -> Dict[str, Any]:
return {
"id": self.id,
"name": self.file_name,
"path_id": self.path_id,
"size": self.size,
# convert to JavaScript-style UNIX timestamp so we can take
# advantage of client timezones.
"create_time": int(time.mktime(self.create_time.timetuple()) * 1000),
"messages": [
{
"id": m.id,
"date_sent": int(time.mktime(m.date_sent.timetuple()) * 1000),
}
for m in self.messages.all()
],
}
post_save.connect(flush_used_upload_space_cache, sender=Attachment)
post_delete.connect(flush_used_upload_space_cache, sender=Attachment)
def validate_attachment_request(user_profile: UserProfile, path_id: str) -> Optional[bool]:
try:
attachment = Attachment.objects.get(path_id=path_id)
except Attachment.DoesNotExist:
return None
if user_profile == attachment.owner:
# If you own the file, you can access it.
return True
if (
attachment.is_realm_public
and attachment.realm == user_profile.realm
and user_profile.can_access_public_streams()
):
# Any user in the realm can access realm-public files
return True
messages = attachment.messages.all()
if UserMessage.objects.filter(user_profile=user_profile, message__in=messages).exists():
# If it was sent in a private message or private stream
# message, then anyone who received that message can access it.
return True
# The user didn't receive any of the messages that included this
# attachment. But they might still have access to it, if it was
# sent to a stream they are on where history is public to
# subscribers.
# These are subscriptions to a stream one of the messages was sent to
relevant_stream_ids = Subscription.objects.filter(
user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM,
recipient__in=[m.recipient_id for m in messages],
).values_list("recipient__type_id", flat=True)
if len(relevant_stream_ids) == 0:
return False
return Stream.objects.filter(
id__in=relevant_stream_ids, history_public_to_subscribers=True
).exists()
def get_old_unclaimed_attachments(weeks_ago: int) -> Sequence[Attachment]:
# TODO: Change return type to QuerySet[Attachment]
delta_weeks_ago = timezone_now() - datetime.timedelta(weeks=weeks_ago)
old_attachments = Attachment.objects.filter(messages=None, create_time__lt=delta_weeks_ago)
return old_attachments
class Subscription(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
# Whether the user has since unsubscribed. We mark Subscription
# objects as inactive, rather than deleting them, when a user
# unsubscribes, so we can preserve user customizations like
# notification settings, stream color, etc., if the user later
# resubscribes.
active: bool = models.BooleanField(default=True)
# This is a denormalization designed to improve the performance of
# bulk queries of Subscription objects, Whether the subscribed user
# is active tends to be a key condition in those queries.
# We intentionally don't specify a default value to promote thinking
# about this explicitly, as in some special cases, such as data import,
# we may be creating Subscription objects for a user that's deactivated.
is_user_active: bool = models.BooleanField()
ROLE_STREAM_ADMINISTRATOR = 20
ROLE_MEMBER = 50
ROLE_TYPES = [
ROLE_STREAM_ADMINISTRATOR,
ROLE_MEMBER,
]
role: int = models.PositiveSmallIntegerField(default=ROLE_MEMBER, db_index=True)
# Whether this user had muted this stream.
is_muted: Optional[bool] = models.BooleanField(null=True, default=False)
DEFAULT_STREAM_COLOR = "#c2c2c2"
color: str = models.CharField(max_length=10, default=DEFAULT_STREAM_COLOR)
pin_to_top: bool = models.BooleanField(default=False)
# These fields are stream-level overrides for the user's default
# configuration for notification, configured in UserProfile. The
# default, None, means we just inherit the user-level default.
desktop_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
audible_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
push_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
email_notifications: Optional[bool] = models.BooleanField(null=True, default=None)
wildcard_mentions_notify: Optional[bool] = models.BooleanField(null=True, default=None)
class Meta:
unique_together = ("user_profile", "recipient")
indexes = [
models.Index(
fields=("recipient", "user_profile"),
name="zerver_subscription_recipient_id_user_profile_id_idx",
condition=Q(active=True, is_user_active=True),
),
]
def __str__(self) -> str:
return f"<Subscription: {self.user_profile} -> {self.recipient}>"
@property
def is_stream_admin(self) -> bool:
return self.role == Subscription.ROLE_STREAM_ADMINISTRATOR
# Subscription fields included whenever a Subscription object is provided to
# Zulip clients via the API. A few details worth noting:
# * These fields will generally be merged with Stream.API_FIELDS
# data about the stream.
# * "user_profile" is usually implied as full API access to Subscription
# is primarily done for the current user; API access to other users'
# subscriptions is generally limited to boolean yes/no.
# * "id" and "recipient_id" are not included as they are not used
# in the Zulip API; it's an internal implementation detail.
# Subscription objects are always looked up in the API via
# (user_profile, stream) pairs.
# * "active" is often excluded in API use cases where it is implied.
# * "is_muted" often needs to be copied to not "in_home_view" for
# backwards-compatibility.
API_FIELDS = [
"color",
"is_muted",
"pin_to_top",
"audible_notifications",
"desktop_notifications",
"email_notifications",
"push_notifications",
"wildcard_mentions_notify",
"role",
]
@cache_with_key(user_profile_by_id_cache_key, timeout=3600 * 24 * 7)
def get_user_profile_by_id(uid: int) -> UserProfile:
return UserProfile.objects.select_related().get(id=uid)
def get_user_profile_by_email(email: str) -> UserProfile:
"""This function is intended to be used for
manual manage.py shell work; robust code must use get_user or
get_user_by_delivery_email instead, because Zulip supports
multiple users with a given (delivery) email address existing on a
single server (in different realms).
"""
return UserProfile.objects.select_related().get(delivery_email__iexact=email.strip())
@cache_with_key(user_profile_by_api_key_cache_key, timeout=3600 * 24 * 7)
def maybe_get_user_profile_by_api_key(api_key: str) -> Optional[UserProfile]:
try:
return UserProfile.objects.select_related().get(api_key=api_key)
except UserProfile.DoesNotExist:
# We will cache failed lookups with None. The
# use case here is that broken API clients may
# continually ask for the same wrong API key, and
# we want to handle that as quickly as possible.
return None
def get_user_profile_by_api_key(api_key: str) -> UserProfile:
user_profile = maybe_get_user_profile_by_api_key(api_key)
if user_profile is None:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_by_delivery_email(email: str, realm: Realm) -> UserProfile:
"""Fetches a user given their delivery email. For use in
authentication/registration contexts. Do not use for user-facing
views (e.g. Zulip API endpoints) as doing so would violate the
EMAIL_ADDRESS_VISIBILITY_ADMINS security model. Use get_user in
those code paths.
"""
return UserProfile.objects.select_related().get(
delivery_email__iexact=email.strip(), realm=realm
)
def get_users_by_delivery_email(emails: Set[str], realm: Realm) -> QuerySet:
"""This is similar to get_user_by_delivery_email, and
it has the same security caveats. It gets multiple
users and returns a QuerySet, since most callers
will only need two or three fields.
If you are using this to get large UserProfile objects, you are
probably making a mistake, but if you must,
then use `select_related`.
"""
"""
Django doesn't support delivery_email__iexact__in, so
we simply OR all the filters that we'd do for the
one-email case.
"""
email_filter = Q()
for email in emails:
email_filter |= Q(delivery_email__iexact=email.strip())
return UserProfile.objects.filter(realm=realm).filter(email_filter)
@cache_with_key(user_profile_cache_key, timeout=3600 * 24 * 7)
def get_user(email: str, realm: Realm) -> UserProfile:
"""Fetches the user by its visible-to-other users username (in the
`email` field). For use in API contexts; do not use in
authentication/registration contexts as doing so will break
authentication in organizations using
EMAIL_ADDRESS_VISIBILITY_ADMINS. In those code paths, use
get_user_by_delivery_email.
"""
return UserProfile.objects.select_related().get(email__iexact=email.strip(), realm=realm)
def get_active_user(email: str, realm: Realm) -> UserProfile:
"""Variant of get_user_by_email that excludes deactivated users.
See get_user docstring for important usage notes."""
user_profile = get_user(email, realm)
if not user_profile.is_active:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_profile_by_id_in_realm(uid: int, realm: Realm) -> UserProfile:
return UserProfile.objects.select_related().get(id=uid, realm=realm)
def get_active_user_profile_by_id_in_realm(uid: int, realm: Realm) -> UserProfile:
user_profile = get_user_profile_by_id_in_realm(uid, realm)
if not user_profile.is_active:
raise UserProfile.DoesNotExist()
return user_profile
def get_user_including_cross_realm(email: str, realm: Optional[Realm] = None) -> UserProfile:
if is_cross_realm_bot_email(email):
return get_system_bot(email)
assert realm is not None
return get_user(email, realm)
@cache_with_key(bot_profile_cache_key, timeout=3600 * 24 * 7)
def get_system_bot(email: str) -> UserProfile:
return UserProfile.objects.select_related().get(email__iexact=email.strip())
def get_user_by_id_in_realm_including_cross_realm(
uid: int,
realm: Optional[Realm],
) -> UserProfile:
user_profile = get_user_profile_by_id(uid)
if user_profile.realm == realm:
return user_profile
# Note: This doesn't validate whether the `realm` passed in is
# None/invalid for the CROSS_REALM_BOT_EMAILS case.
if user_profile.delivery_email in settings.CROSS_REALM_BOT_EMAILS:
return user_profile
raise UserProfile.DoesNotExist()
@cache_with_key(realm_user_dicts_cache_key, timeout=3600 * 24 * 7)
def get_realm_user_dicts(realm_id: int) -> List[Dict[str, Any]]:
return UserProfile.objects.filter(
realm_id=realm_id,
).values(*realm_user_dict_fields)
@cache_with_key(active_user_ids_cache_key, timeout=3600 * 24 * 7)
def active_user_ids(realm_id: int) -> List[int]:
query = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).values_list("id", flat=True)
return list(query)
@cache_with_key(active_non_guest_user_ids_cache_key, timeout=3600 * 24 * 7)
def active_non_guest_user_ids(realm_id: int) -> List[int]:
query = (
UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
)
.exclude(
role=UserProfile.ROLE_GUEST,
)
.values_list("id", flat=True)
)
return list(query)
def get_source_profile(email: str, realm_id: int) -> Optional[UserProfile]:
try:
return get_user_by_delivery_email(email, get_realm_by_id(realm_id))
except (Realm.DoesNotExist, UserProfile.DoesNotExist):
return None
@cache_with_key(bot_dicts_in_realm_cache_key, timeout=3600 * 24 * 7)
def get_bot_dicts_in_realm(realm: Realm) -> List[Dict[str, Any]]:
return UserProfile.objects.filter(realm=realm, is_bot=True).values(*bot_dict_fields)
def is_cross_realm_bot_email(email: str) -> bool:
return email.lower() in settings.CROSS_REALM_BOT_EMAILS
# The Huddle class represents a group of individuals who have had a
# group private message conversation together. The actual membership
# of the Huddle is stored in the Subscription table just like with
# Streams, and a hash of that list is stored in the huddle_hash field
# below, to support efficiently mapping from a set of users to the
# corresponding Huddle object.
class Huddle(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
# TODO: We should consider whether using
# CommaSeparatedIntegerField would be better.
huddle_hash: str = models.CharField(max_length=40, db_index=True, unique=True)
# Foreign key to the Recipient object for this Huddle.
recipient = models.ForeignKey(Recipient, null=True, on_delete=models.SET_NULL)
def get_huddle_hash(id_list: List[int]) -> str:
id_list = sorted(set(id_list))
hash_key = ",".join(str(x) for x in id_list)
return make_safe_digest(hash_key)
def huddle_hash_cache_key(huddle_hash: str) -> str:
return f"huddle_by_hash:{huddle_hash}"
def get_huddle(id_list: List[int]) -> Huddle:
huddle_hash = get_huddle_hash(id_list)
return get_huddle_backend(huddle_hash, id_list)
@cache_with_key(
lambda huddle_hash, id_list: huddle_hash_cache_key(huddle_hash), timeout=3600 * 24 * 7
)
def get_huddle_backend(huddle_hash: str, id_list: List[int]) -> Huddle:
with transaction.atomic():
(huddle, created) = Huddle.objects.get_or_create(huddle_hash=huddle_hash)
if created:
recipient = Recipient.objects.create(type_id=huddle.id, type=Recipient.HUDDLE)
huddle.recipient = recipient
huddle.save(update_fields=["recipient"])
subs_to_create = [
Subscription(
recipient=recipient,
user_profile_id=user_profile_id,
is_user_active=is_active,
)
for user_profile_id, is_active in UserProfile.objects.filter(id__in=id_list)
.distinct("id")
.values_list("id", "is_active")
]
Subscription.objects.bulk_create(subs_to_create)
return huddle
class UserActivity(models.Model):
"""Data table recording the last time each user hit Zulip endpoints
via which Clients; unlike UserPresence, these data are not exposed
to users via the Zulip API.
Useful for debugging as well as to answer analytics questions like
"How many users have accessed the Zulip mobile app in the last
month?" or "Which users/organizations have recently used API
endpoint X that is about to be desupported" for communications
and database migration purposes.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
query: str = models.CharField(max_length=50, db_index=True)
count: int = models.IntegerField()
last_visit: datetime.datetime = models.DateTimeField("last visit")
class Meta:
unique_together = ("user_profile", "client", "query")
class UserActivityInterval(models.Model):
MIN_INTERVAL_LENGTH = datetime.timedelta(minutes=15)
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
start: datetime.datetime = models.DateTimeField("start time", db_index=True)
end: datetime.datetime = models.DateTimeField("end time", db_index=True)
class UserPresence(models.Model):
"""A record from the last time we heard from a given user on a given client.
NOTE: Users can disable updates to this table (see UserProfile.presence_enabled),
so this cannot be used to determine if a user was recently active on Zulip.
The UserActivity table is recommended for that purpose.
This is a tricky subsystem, because it is highly optimized. See the docs:
https://zulip.readthedocs.io/en/latest/subsystems/presence.html
"""
class Meta:
unique_together = ("user_profile", "client")
index_together = [
("realm", "timestamp"),
]
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
# The time we heard this update from the client.
timestamp: datetime.datetime = models.DateTimeField("presence changed")
# The user was actively using this Zulip client as of `timestamp` (i.e.,
# they had interacted with the client recently). When the timestamp is
# itself recent, this is the green "active" status in the web app.
ACTIVE = 1
# There had been no user activity (keyboard/mouse/etc.) on this client
# recently. So the client was online at the specified time, but it
# could be the user's desktop which they were away from. Displayed as
# orange/idle if the timestamp is current.
IDLE = 2
# Information from the client about the user's recent interaction with
# that client, as of `timestamp`. Possible values above.
#
# There is no "inactive" status, because that is encoded by the
# timestamp being old.
status: int = models.PositiveSmallIntegerField(default=ACTIVE)
@staticmethod
def status_to_string(status: int) -> str:
if status == UserPresence.ACTIVE:
return "active"
elif status == UserPresence.IDLE:
return "idle"
else: # nocoverage # TODO: Add a presence test to cover this.
raise ValueError(f"Unknown status: {status}")
@staticmethod
def to_presence_dict(
client_name: str,
status: int,
dt: datetime.datetime,
push_enabled: bool = False,
has_push_devices: bool = False,
) -> Dict[str, Any]:
presence_val = UserPresence.status_to_string(status)
timestamp = datetime_to_timestamp(dt)
return dict(
client=client_name,
status=presence_val,
timestamp=timestamp,
pushable=(push_enabled and has_push_devices),
)
def to_dict(self) -> Dict[str, Any]:
return UserPresence.to_presence_dict(
self.client.name,
self.status,
self.timestamp,
)
@staticmethod
def status_from_string(status: str) -> Optional[int]:
if status == "active":
# See https://github.com/python/mypy/issues/2611
status_val: Optional[int] = UserPresence.ACTIVE
elif status == "idle":
status_val = UserPresence.IDLE
else:
status_val = None
return status_val
class UserStatus(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user_profile: UserProfile = models.OneToOneField(UserProfile, on_delete=CASCADE)
timestamp: datetime.datetime = models.DateTimeField()
client: Client = models.ForeignKey(Client, on_delete=CASCADE)
NORMAL = 0
AWAY = 1
status: int = models.PositiveSmallIntegerField(default=NORMAL)
status_text: str = models.CharField(max_length=255, default="")
class DefaultStream(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
stream: Stream = models.ForeignKey(Stream, on_delete=CASCADE)
class Meta:
unique_together = ("realm", "stream")
class DefaultStreamGroup(models.Model):
MAX_NAME_LENGTH = 60
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
name: str = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
streams: Manager = models.ManyToManyField("Stream")
description: str = models.CharField(max_length=1024, default="")
class Meta:
unique_together = ("realm", "name")
def to_dict(self) -> Dict[str, Any]:
return dict(
name=self.name,
id=self.id,
description=self.description,
streams=[stream.to_dict() for stream in self.streams.all().order_by("name")],
)
def get_default_stream_groups(realm: Realm) -> List[DefaultStreamGroup]:
return DefaultStreamGroup.objects.filter(realm=realm)
class AbstractScheduledJob(models.Model):
scheduled_timestamp: datetime.datetime = models.DateTimeField(db_index=True)
# JSON representation of arguments to consumer
data: str = models.TextField()
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
class Meta:
abstract = True
class ScheduledEmail(AbstractScheduledJob):
# Exactly one of users or address should be set. These are
# duplicate values, used to efficiently filter the set of
# ScheduledEmails for use in clear_scheduled_emails; the
# recipients used for actually sending messages are stored in the
# data field of AbstractScheduledJob.
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
users: Manager = models.ManyToManyField(UserProfile)
# Just the address part of a full "name <address>" email address
address: Optional[str] = models.EmailField(null=True, db_index=True)
# Valid types are below
WELCOME = 1
DIGEST = 2
INVITATION_REMINDER = 3
type: int = models.PositiveSmallIntegerField()
def __str__(self) -> str:
return f"<ScheduledEmail: {self.type} {self.address or list(self.users.all())} {self.scheduled_timestamp}>"
class MissedMessageEmailAddress(models.Model):
EXPIRY_SECONDS = 60 * 60 * 24 * 5
ALLOWED_USES = 1
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
email_token: str = models.CharField(max_length=34, unique=True, db_index=True)
# Timestamp of when the missed message address generated.
# The address is valid until timestamp + EXPIRY_SECONDS.
timestamp: datetime.datetime = models.DateTimeField(db_index=True, default=timezone_now)
times_used: int = models.PositiveIntegerField(default=0, db_index=True)
def __str__(self) -> str:
return settings.EMAIL_GATEWAY_PATTERN % (self.email_token,)
def is_usable(self) -> bool:
not_expired = timezone_now() <= self.timestamp + timedelta(seconds=self.EXPIRY_SECONDS)
has_uses_left = self.times_used < self.ALLOWED_USES
return has_uses_left and not_expired
def increment_times_used(self) -> None:
self.times_used += 1
self.save(update_fields=["times_used"])
class NotificationTriggers:
# "private_message" is for 1:1 PMs as well as huddles
PRIVATE_MESSAGE = "private_message"
MENTION = "mentioned"
WILDCARD_MENTION = "wildcard_mentioned"
STREAM_PUSH = "stream_push_notify"
STREAM_EMAIL = "stream_email_notify"
class ScheduledMessageNotificationEmail(models.Model):
"""Stores planned outgoing message notification emails. They may be
processed earlier should Zulip choose to batch multiple messages
in a single email, but typically will be processed just after
scheduled_timestamp.
"""
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
message: Message = models.ForeignKey(Message, on_delete=CASCADE)
EMAIL_NOTIFICATION_TRIGGER_CHOICES = [
(NotificationTriggers.PRIVATE_MESSAGE, "Private message"),
(NotificationTriggers.MENTION, "Mention"),
(NotificationTriggers.WILDCARD_MENTION, "Wildcard mention"),
(NotificationTriggers.STREAM_EMAIL, "Stream notifications enabled"),
]
trigger: str = models.TextField(choices=EMAIL_NOTIFICATION_TRIGGER_CHOICES)
mentioned_user_group: Optional[UserGroup] = models.ForeignKey(
UserGroup, null=True, on_delete=CASCADE
)
# Timestamp for when the notification should be processed and sent.
# Calculated from the time the event was received and the batching period.
scheduled_timestamp: datetime.datetime = models.DateTimeField(db_index=True)
class ScheduledMessage(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
sender: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
recipient: Recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
subject: str = models.CharField(max_length=MAX_TOPIC_NAME_LENGTH)
content: str = models.TextField()
sending_client: Client = models.ForeignKey(Client, on_delete=CASCADE)
stream: Optional[Stream] = models.ForeignKey(Stream, null=True, on_delete=CASCADE)
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
scheduled_timestamp: datetime.datetime = models.DateTimeField(db_index=True)
delivered: bool = models.BooleanField(default=False)
SEND_LATER = 1
REMIND = 2
DELIVERY_TYPES = (
(SEND_LATER, "send_later"),
(REMIND, "remind"),
)
delivery_type: int = models.PositiveSmallIntegerField(
choices=DELIVERY_TYPES,
default=SEND_LATER,
)
def topic_name(self) -> str:
return self.subject
def set_topic_name(self, topic_name: str) -> None:
self.subject = topic_name
def __str__(self) -> str:
display_recipient = get_display_recipient(self.recipient)
return f"<ScheduledMessage: {display_recipient} {self.subject} {self.sender} {self.scheduled_timestamp}>"
EMAIL_TYPES = {
"followup_day1": ScheduledEmail.WELCOME,
"followup_day2": ScheduledEmail.WELCOME,
"digest": ScheduledEmail.DIGEST,
"invitation_reminder": ScheduledEmail.INVITATION_REMINDER,
}
class AbstractRealmAuditLog(models.Model):
"""Defines fields common to RealmAuditLog and RemoteRealmAuditLog."""
event_time: datetime.datetime = models.DateTimeField(db_index=True)
# If True, event_time is an overestimate of the true time. Can be used
# by migrations when introducing a new event_type.
backfilled: bool = models.BooleanField(default=False)
# Keys within extra_data, when extra_data is a json dict. Keys are strings because
# json keys must always be strings.
OLD_VALUE = "1"
NEW_VALUE = "2"
ROLE_COUNT = "10"
ROLE_COUNT_HUMANS = "11"
ROLE_COUNT_BOTS = "12"
extra_data: Optional[str] = models.TextField(null=True)
# Event types
USER_CREATED = 101
USER_ACTIVATED = 102
USER_DEACTIVATED = 103
USER_REACTIVATED = 104
USER_ROLE_CHANGED = 105
USER_SOFT_ACTIVATED = 120
USER_SOFT_DEACTIVATED = 121
USER_PASSWORD_CHANGED = 122
USER_AVATAR_SOURCE_CHANGED = 123
USER_FULL_NAME_CHANGED = 124
USER_EMAIL_CHANGED = 125
USER_TOS_VERSION_CHANGED = 126
USER_API_KEY_CHANGED = 127
USER_BOT_OWNER_CHANGED = 128
USER_DEFAULT_SENDING_STREAM_CHANGED = 129
USER_DEFAULT_REGISTER_STREAM_CHANGED = 130
USER_DEFAULT_ALL_PUBLIC_STREAMS_CHANGED = 131
USER_NOTIFICATION_SETTINGS_CHANGED = 132
USER_DIGEST_EMAIL_CREATED = 133
REALM_DEACTIVATED = 201
REALM_REACTIVATED = 202
REALM_SCRUBBED = 203
REALM_PLAN_TYPE_CHANGED = 204
REALM_LOGO_CHANGED = 205
REALM_EXPORTED = 206
REALM_PROPERTY_CHANGED = 207
REALM_ICON_SOURCE_CHANGED = 208
REALM_DISCOUNT_CHANGED = 209
REALM_SPONSORSHIP_APPROVED = 210
REALM_BILLING_METHOD_CHANGED = 211
REALM_REACTIVATION_EMAIL_SENT = 212
REALM_SPONSORSHIP_PENDING_STATUS_CHANGED = 213
REALM_SUBDOMAIN_CHANGED = 214
REALM_CREATED = 215
SUBSCRIPTION_CREATED = 301
SUBSCRIPTION_ACTIVATED = 302
SUBSCRIPTION_DEACTIVATED = 303
SUBSCRIPTION_PROPERTY_CHANGED = 304
USER_MUTED = 350
USER_UNMUTED = 351
STRIPE_CUSTOMER_CREATED = 401
STRIPE_CARD_CHANGED = 402
STRIPE_PLAN_CHANGED = 403
STRIPE_PLAN_QUANTITY_RESET = 404
CUSTOMER_CREATED = 501
CUSTOMER_PLAN_CREATED = 502
CUSTOMER_SWITCHED_FROM_MONTHLY_TO_ANNUAL_PLAN = 503
STREAM_CREATED = 601
STREAM_DEACTIVATED = 602
STREAM_NAME_CHANGED = 603
event_type: int = models.PositiveSmallIntegerField()
# event_types synced from on-prem installations to Zulip Cloud when
# billing for mobile push notifications is enabled. Every billing
# event_type should have ROLE_COUNT populated in extra_data.
SYNCED_BILLING_EVENTS = [
USER_CREATED,
USER_ACTIVATED,
USER_DEACTIVATED,
USER_REACTIVATED,
USER_ROLE_CHANGED,
REALM_DEACTIVATED,
REALM_REACTIVATED,
]
class Meta:
abstract = True
class RealmAuditLog(AbstractRealmAuditLog):
"""
RealmAuditLog tracks important changes to users, streams, and
realms in Zulip. It is intended to support both
debugging/introspection (e.g. determining when a user's left a
given stream?) as well as help with some database migrations where
we might be able to do a better data backfill with it. Here are a
few key details about how this works:
* acting_user is the user who initiated the state change
* modified_user (if present) is the user being modified
* modified_stream (if present) is the stream being modified
For example:
* When a user subscribes another user to a stream, modified_user,
acting_user, and modified_stream will all be present and different.
* When an administrator changes an organization's realm icon,
acting_user is that administrator and both modified_user and
modified_stream will be None.
"""
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
acting_user: Optional[UserProfile] = models.ForeignKey(
UserProfile,
null=True,
related_name="+",
on_delete=CASCADE,
)
modified_user: Optional[UserProfile] = models.ForeignKey(
UserProfile,
null=True,
related_name="+",
on_delete=CASCADE,
)
modified_stream: Optional[Stream] = models.ForeignKey(
Stream,
null=True,
on_delete=CASCADE,
)
event_last_message_id: Optional[int] = models.IntegerField(null=True)
def __str__(self) -> str:
if self.modified_user is not None:
return f"<RealmAuditLog: {self.modified_user} {self.event_type} {self.event_time} {self.id}>"
if self.modified_stream is not None:
return f"<RealmAuditLog: {self.modified_stream} {self.event_type} {self.event_time} {self.id}>"
return f"<RealmAuditLog: {self.realm} {self.event_type} {self.event_time} {self.id}>"
class UserHotspot(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
hotspot: str = models.CharField(max_length=30)
timestamp: datetime.datetime = models.DateTimeField(default=timezone_now)
class Meta:
unique_together = ("user", "hotspot")
def check_valid_user_ids(realm_id: int, val: object, allow_deactivated: bool = False) -> List[int]:
user_ids = check_list(check_int)("User IDs", val)
realm = Realm.objects.get(id=realm_id)
for user_id in user_ids:
# TODO: Structurally, we should be doing a bulk fetch query to
# get the users here, not doing these in a loop. But because
# this is a rarely used feature and likely to never have more
# than a handful of users, it's probably mostly OK.
try:
user_profile = get_user_profile_by_id_in_realm(user_id, realm)
except UserProfile.DoesNotExist:
raise ValidationError(_("Invalid user ID: {}").format(user_id))
if not allow_deactivated:
if not user_profile.is_active:
raise ValidationError(_("User with ID {} is deactivated").format(user_id))
if user_profile.is_bot:
raise ValidationError(_("User with ID {} is a bot").format(user_id))
return user_ids
class CustomProfileField(models.Model):
"""Defines a form field for the per-realm custom profile fields feature.
See CustomProfileFieldValue for an individual user's values for one of
these fields.
"""
HINT_MAX_LENGTH = 80
NAME_MAX_LENGTH = 40
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
realm: Realm = models.ForeignKey(Realm, on_delete=CASCADE)
name: str = models.CharField(max_length=NAME_MAX_LENGTH)
hint: Optional[str] = models.CharField(max_length=HINT_MAX_LENGTH, default="", null=True)
order: int = models.IntegerField(default=0)
SHORT_TEXT = 1
LONG_TEXT = 2
SELECT = 3
DATE = 4
URL = 5
USER = 6
EXTERNAL_ACCOUNT = 7
# These are the fields whose validators require more than var_name
# and value argument. i.e. SELECT require field_data, USER require
# realm as argument.
SELECT_FIELD_TYPE_DATA: List[ExtendedFieldElement] = [
(SELECT, gettext_lazy("List of options"), validate_select_field, str, "SELECT"),
]
USER_FIELD_TYPE_DATA: List[UserFieldElement] = [
(USER, gettext_lazy("Person picker"), check_valid_user_ids, ast.literal_eval, "USER"),
]
SELECT_FIELD_VALIDATORS: Dict[int, ExtendedValidator] = {
item[0]: item[2] for item in SELECT_FIELD_TYPE_DATA
}
USER_FIELD_VALIDATORS: Dict[int, RealmUserValidator] = {
item[0]: item[2] for item in USER_FIELD_TYPE_DATA
}
FIELD_TYPE_DATA: List[FieldElement] = [
# Type, display name, validator, converter, keyword
(SHORT_TEXT, gettext_lazy("Short text"), check_short_string, str, "SHORT_TEXT"),
(LONG_TEXT, gettext_lazy("Long text"), check_long_string, str, "LONG_TEXT"),
(DATE, gettext_lazy("Date picker"), check_date, str, "DATE"),
(URL, gettext_lazy("Link"), check_url, str, "URL"),
(
EXTERNAL_ACCOUNT,
gettext_lazy("External account"),
check_short_string,
str,
"EXTERNAL_ACCOUNT",
),
]
ALL_FIELD_TYPES = [*FIELD_TYPE_DATA, *SELECT_FIELD_TYPE_DATA, *USER_FIELD_TYPE_DATA]
FIELD_VALIDATORS: Dict[int, Validator[Union[int, str, List[int]]]] = {
item[0]: item[2] for item in FIELD_TYPE_DATA
}
FIELD_CONVERTERS: Dict[int, Callable[[Any], Any]] = {
item[0]: item[3] for item in ALL_FIELD_TYPES
}
FIELD_TYPE_CHOICES: List[Tuple[int, Promise]] = [(item[0], item[1]) for item in ALL_FIELD_TYPES]
field_type: int = models.PositiveSmallIntegerField(
choices=FIELD_TYPE_CHOICES,
default=SHORT_TEXT,
)
# A JSON blob of any additional data needed to define the field beyond
# type/name/hint.
#
# The format depends on the type. Field types SHORT_TEXT, LONG_TEXT,
# DATE, URL, and USER leave this null. Fields of type SELECT store the
# choices' descriptions.
#
# Note: There is no performance overhead of using TextField in PostgreSQL.
# See https://www.postgresql.org/docs/9.0/static/datatype-character.html
field_data: Optional[str] = models.TextField(default="", null=True)
class Meta:
unique_together = ("realm", "name")
def as_dict(self) -> ProfileDataElementBase:
return {
"id": self.id,
"name": self.name,
"type": self.field_type,
"hint": self.hint,
"field_data": self.field_data,
"order": self.order,
}
def is_renderable(self) -> bool:
if self.field_type in [CustomProfileField.SHORT_TEXT, CustomProfileField.LONG_TEXT]:
return True
return False
def __str__(self) -> str:
return f"<CustomProfileField: {self.realm} {self.name} {self.field_type} {self.order}>"
def custom_profile_fields_for_realm(realm_id: int) -> List[CustomProfileField]:
return CustomProfileField.objects.filter(realm=realm_id).order_by("order")
class CustomProfileFieldValue(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
field: CustomProfileField = models.ForeignKey(CustomProfileField, on_delete=CASCADE)
value: str = models.TextField()
rendered_value: Optional[str] = models.TextField(null=True, default=None)
class Meta:
unique_together = ("user_profile", "field")
def __str__(self) -> str:
return f"<CustomProfileFieldValue: {self.user_profile} {self.field} {self.value}>"
# Interfaces for services
# They provide additional functionality like parsing message to obtain query URL, data to be sent to URL,
# and parsing the response.
GENERIC_INTERFACE = "GenericService"
SLACK_INTERFACE = "SlackOutgoingWebhookService"
# A Service corresponds to either an outgoing webhook bot or an embedded bot.
# The type of Service is determined by the bot_type field of the referenced
# UserProfile.
#
# If the Service is an outgoing webhook bot:
# - name is any human-readable identifier for the Service
# - base_url is the address of the third-party site
# - token is used for authentication with the third-party site
#
# If the Service is an embedded bot:
# - name is the canonical name for the type of bot (e.g. 'xkcd' for an instance
# of the xkcd bot); multiple embedded bots can have the same name, but all
# embedded bots with the same name will run the same code
# - base_url and token are currently unused
class Service(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
name: str = models.CharField(max_length=UserProfile.MAX_NAME_LENGTH)
# Bot user corresponding to the Service. The bot_type of this user
# deterines the type of service. If non-bot services are added later,
# user_profile can also represent the owner of the Service.
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
base_url: str = models.TextField()
token: str = models.TextField()
# Interface / API version of the service.
interface: int = models.PositiveSmallIntegerField(default=1)
# Valid interfaces are {generic, zulip_bot_service, slack}
GENERIC = 1
SLACK = 2
ALLOWED_INTERFACE_TYPES = [
GENERIC,
SLACK,
]
# N.B. If we used Django's choice=... we would get this for free (kinda)
_interfaces: Dict[int, str] = {
GENERIC: GENERIC_INTERFACE,
SLACK: SLACK_INTERFACE,
}
def interface_name(self) -> str:
# Raises KeyError if invalid
return self._interfaces[self.interface]
def get_bot_services(user_profile_id: int) -> List[Service]:
return list(Service.objects.filter(user_profile_id=user_profile_id))
def get_service_profile(user_profile_id: int, service_name: str) -> Service:
return Service.objects.get(user_profile_id=user_profile_id, name=service_name)
class BotStorageData(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
bot_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
key: str = models.TextField(db_index=True)
value: str = models.TextField()
class Meta:
unique_together = ("bot_profile", "key")
class BotConfigData(models.Model):
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
bot_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
key: str = models.TextField(db_index=True)
value: str = models.TextField()
class Meta:
unique_together = ("bot_profile", "key")
class InvalidFakeEmailDomain(Exception):
pass
def get_fake_email_domain(realm: Realm) -> str:
try:
# Check that realm.host can be used to form valid email addresses.
validate_email(f"bot@{realm.host}")
return realm.host
except ValidationError:
pass
try:
# Check that the fake email domain can be used to form valid email addresses.
validate_email("bot@" + settings.FAKE_EMAIL_DOMAIN)
except ValidationError:
raise InvalidFakeEmailDomain(
settings.FAKE_EMAIL_DOMAIN + " is not a valid domain. "
"Consider setting the FAKE_EMAIL_DOMAIN setting."
)
return settings.FAKE_EMAIL_DOMAIN
class AlertWord(models.Model):
# Realm isn't necessary, but it's a nice denormalization. Users
# never move to another realm, so it's static, and having Realm
# here optimizes the main query on this table, which is fetching
# all the alert words in a realm.
id: int = models.AutoField(auto_created=True, primary_key=True, verbose_name="ID")
realm: Realm = models.ForeignKey(Realm, db_index=True, on_delete=CASCADE)
user_profile: UserProfile = models.ForeignKey(UserProfile, on_delete=CASCADE)
# Case-insensitive name for the alert word.
word: str = models.TextField()
class Meta:
unique_together = ("user_profile", "word")
def flush_realm_alert_words(realm: Realm) -> None:
cache_delete(realm_alert_words_cache_key(realm))
cache_delete(realm_alert_words_automaton_cache_key(realm))
def flush_alert_word(sender: Any, **kwargs: Any) -> None:
realm = kwargs["instance"].realm
flush_realm_alert_words(realm)
post_save.connect(flush_alert_word, sender=AlertWord)
post_delete.connect(flush_alert_word, sender=AlertWord)
|
punchagan/zulip
|
zerver/models.py
|
Python
|
apache-2.0
| 144,942
|
[
"VisIt"
] |
a8967e4c5455b162391ebfa4bc3c8e4547b2b87e4d245c71ed7ee9cf398f3555
|
"""Dirac notation for states."""
from __future__ import print_function, division
from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt,
Tuple)
from sympy.core.compatibility import range
from sympy.printing.pretty.stringpict import stringPict
from sympy.physics.quantum.qexpr import QExpr, dispatch_method
__all__ = [
'KetBase',
'BraBase',
'StateBase',
'State',
'Ket',
'Bra',
'TimeDepState',
'TimeDepBra',
'TimeDepKet',
'Wavefunction'
]
#-----------------------------------------------------------------------------
# States, bras and kets.
#-----------------------------------------------------------------------------
# ASCII brackets
_lbracket = "<"
_rbracket = ">"
_straight_bracket = "|"
# Unicode brackets
# MATHEMATICAL ANGLE BRACKETS
_lbracket_ucode = u"\N{MATHEMATICAL LEFT ANGLE BRACKET}"
_rbracket_ucode = u"\N{MATHEMATICAL RIGHT ANGLE BRACKET}"
# LIGHT VERTICAL BAR
_straight_bracket_ucode = u"\N{LIGHT VERTICAL BAR}"
# Other options for unicode printing of <, > and | for Dirac notation.
# LEFT-POINTING ANGLE BRACKET
# _lbracket = u"\u2329"
# _rbracket = u"\u232A"
# LEFT ANGLE BRACKET
# _lbracket = u"\u3008"
# _rbracket = u"\u3009"
# VERTICAL LINE
# _straight_bracket = u"\u007C"
class StateBase(QExpr):
"""Abstract base class for general abstract states in quantum mechanics.
All other state classes defined will need to inherit from this class. It
carries the basic structure for all other states such as dual, _eval_adjoint
and label.
This is an abstract base class and you should not instantiate it directly,
instead use State.
"""
@classmethod
def _operators_to_state(self, ops, **options):
""" Returns the eigenstate instance for the passed operators.
This method should be overridden in subclasses. It will handle being
passed either an Operator instance or set of Operator instances. It
should return the corresponding state INSTANCE or simply raise a
NotImplementedError. See cartesian.py for an example.
"""
raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!")
def _state_to_operators(self, op_classes, **options):
""" Returns the operators which this state instance is an eigenstate
of.
This method should be overridden in subclasses. It will be called on
state instances and be passed the operator classes that we wish to make
into instances. The state instance will then transform the classes
appropriately, or raise a NotImplementedError if it cannot return
operator instances. See cartesian.py for examples,
"""
raise NotImplementedError(
"Cannot map this state to operators. Method not implemented!")
@property
def operators(self):
"""Return the operator(s) that this state is an eigenstate of"""
from .operatorset import state_to_operators # import internally to avoid circular import errors
return state_to_operators(self)
def _enumerate_state(self, num_states, **options):
raise NotImplementedError("Cannot enumerate this state!")
def _represent_default_basis(self, **options):
return self._represent(basis=self.operators)
#-------------------------------------------------------------------------
# Dagger/dual
#-------------------------------------------------------------------------
@property
def dual(self):
"""Return the dual state of this one."""
return self.dual_class()._new_rawargs(self.hilbert_space, *self.args)
@classmethod
def dual_class(self):
"""Return the class used to construt the dual."""
raise NotImplementedError(
'dual_class must be implemented in a subclass'
)
def _eval_adjoint(self):
"""Compute the dagger of this state using the dual."""
return self.dual
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _pretty_brackets(self, height, use_unicode=True):
# Return pretty printed brackets for the state
# Ideally, this could be done by pform.parens but it does not support the angled < and >
# Setup for unicode vs ascii
if use_unicode:
lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode
slash, bslash, vert = u'\N{BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT}', \
u'\N{BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT}', \
u'\N{BOX DRAWINGS LIGHT VERTICAL}'
else:
lbracket, rbracket = self.lbracket, self.rbracket
slash, bslash, vert = '/', '\\', '|'
# If height is 1, just return brackets
if height == 1:
return stringPict(lbracket), stringPict(rbracket)
# Make height even
height += (height % 2)
brackets = []
for bracket in lbracket, rbracket:
# Create left bracket
if bracket in {_lbracket, _lbracket_ucode}:
bracket_args = [ ' ' * (height//2 - i - 1) +
slash for i in range(height // 2)]
bracket_args.extend(
[ ' ' * i + bslash for i in range(height // 2)])
# Create right bracket
elif bracket in {_rbracket, _rbracket_ucode}:
bracket_args = [ ' ' * i + bslash for i in range(height // 2)]
bracket_args.extend([ ' ' * (
height//2 - i - 1) + slash for i in range(height // 2)])
# Create straight bracket
elif bracket in {_straight_bracket, _straight_bracket_ucode}:
bracket_args = [vert for i in range(height)]
else:
raise ValueError(bracket)
brackets.append(
stringPict('\n'.join(bracket_args), baseline=height//2))
return brackets
def _sympystr(self, printer, *args):
contents = self._print_contents(printer, *args)
return '%s%s%s' % (self.lbracket, contents, self.rbracket)
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
# Get brackets
pform = self._print_contents_pretty(printer, *args)
lbracket, rbracket = self._pretty_brackets(
pform.height(), printer._use_unicode)
# Put together state
pform = prettyForm(*pform.left(lbracket))
pform = prettyForm(*pform.right(rbracket))
return pform
def _latex(self, printer, *args):
contents = self._print_contents_latex(printer, *args)
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex)
class KetBase(StateBase):
"""Base class for Kets.
This class defines the dual property and the brackets for printing. This is
an abstract base class and you should not instantiate it directly, instead
use Ket.
"""
lbracket = _straight_bracket
rbracket = _rbracket
lbracket_ucode = _straight_bracket_ucode
rbracket_ucode = _rbracket_ucode
lbracket_latex = r'\left|'
rbracket_latex = r'\right\rangle '
@classmethod
def default_args(self):
return ("psi",)
@classmethod
def dual_class(self):
return BraBase
def __mul__(self, other):
"""KetBase*other"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, BraBase):
return OuterProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*KetBase"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, BraBase):
return InnerProduct(other, self)
else:
return Expr.__rmul__(self, other)
#-------------------------------------------------------------------------
# _eval_* methods
#-------------------------------------------------------------------------
def _eval_innerproduct(self, bra, **hints):
"""Evaluate the inner product betweeen this ket and a bra.
This is called to compute <bra|ket>, where the ket is ``self``.
This method will dispatch to sub-methods having the format::
``def _eval_innerproduct_BraClass(self, **hints):``
Subclasses should define these methods (one for each BraClass) to
teach the ket how to take inner products with bras.
"""
return dispatch_method(self, '_eval_innerproduct', bra, **hints)
def _apply_operator(self, op, **options):
"""Apply an Operator to this Ket.
This method will dispatch to methods having the format::
``def _apply_operator_OperatorName(op, **options):``
Subclasses should define these methods (one for each OperatorName) to
teach the Ket how operators act on it.
Parameters
==========
op : Operator
The Operator that is acting on the Ket.
options : dict
A dict of key/value pairs that control how the operator is applied
to the Ket.
"""
return dispatch_method(self, '_apply_operator', op, **options)
class BraBase(StateBase):
"""Base class for Bras.
This class defines the dual property and the brackets for printing. This
is an abstract base class and you should not instantiate it directly,
instead use Bra.
"""
lbracket = _lbracket
rbracket = _straight_bracket
lbracket_ucode = _lbracket_ucode
rbracket_ucode = _straight_bracket_ucode
lbracket_latex = r'\left\langle '
rbracket_latex = r'\right|'
@classmethod
def _operators_to_state(self, ops, **options):
state = self.dual_class().operators_to_state(ops, **options)
return state.dual
def _state_to_operators(self, op_classes, **options):
return self.dual._state_to_operators(op_classes, **options)
def _enumerate_state(self, num_states, **options):
dual_states = self.dual._enumerate_state(num_states, **options)
return [x.dual for x in dual_states]
@classmethod
def default_args(self):
return self.dual_class().default_args()
@classmethod
def dual_class(self):
return KetBase
def __mul__(self, other):
"""BraBase*other"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, KetBase):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*BraBase"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, KetBase):
return OuterProduct(other, self)
else:
return Expr.__rmul__(self, other)
def _represent(self, **options):
"""A default represent that uses the Ket's version."""
from sympy.physics.quantum.dagger import Dagger
return Dagger(self.dual._represent(**options))
class State(StateBase):
"""General abstract quantum state used as a base class for Ket and Bra."""
pass
class Ket(State, KetBase):
"""A general time-independent Ket in quantum mechanics.
Inherits from State and KetBase. This class should be used as the base
class for all physical, time-independent Kets in a system. This class
and its subclasses will be the main classes that users will use for
expressing Kets in Dirac notation [1]_.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Ket and looking at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> k = Ket('psi')
>>> k
|psi>
>>> k.hilbert_space
H
>>> k.is_commutative
False
>>> k.label
(psi,)
Ket's know about their associated bra::
>>> k.dual
<psi|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.Bra'>
Take a linear combination of two kets::
>>> k0 = Ket(0)
>>> k1 = Ket(1)
>>> 2*I*k0 - 4*k1
2*I*|0> - 4*|1>
Compound labels are passed as tuples::
>>> n, m = symbols('n,m')
>>> k = Ket(n,m)
>>> k
|nm>
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Bra
class Bra(State, BraBase):
"""A general time-independent Bra in quantum mechanics.
Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This
class and its subclasses will be the main classes that users will use for
expressing Bras in Dirac notation.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Bra and look at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> b = Bra('psi')
>>> b
<psi|
>>> b.hilbert_space
H
>>> b.is_commutative
False
Bra's know about their dual Ket's::
>>> b.dual
|psi>
>>> b.dual_class()
<class 'sympy.physics.quantum.state.Ket'>
Like Kets, Bras can have compound labels and be manipulated in a similar
manner::
>>> n, m = symbols('n,m')
>>> b = Bra(n,m) - I*Bra(m,n)
>>> b
-I*<mn| + <nm|
Symbols in a Bra can be substituted using ``.subs``::
>>> b.subs(n,m)
<mm| - I*<mm|
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Ket
#-----------------------------------------------------------------------------
# Time dependent states, bras and kets.
#-----------------------------------------------------------------------------
class TimeDepState(StateBase):
"""Base class for a general time-dependent quantum state.
This class is used as a base class for any time-dependent state. The main
difference between this class and the time-independent state is that this
class takes a second argument that is the time in addition to the usual
label argument.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
"""
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def default_args(self):
return ("psi", "t")
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def label(self):
"""The label of the state."""
return self.args[:-1]
@property
def time(self):
"""The time of the state."""
return self.args[-1]
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print_time(self, printer, *args):
return printer._print(self.time, *args)
_print_time_repr = _print_time
_print_time_latex = _print_time
def _print_time_pretty(self, printer, *args):
pform = printer._print(self.time, *args)
return pform
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
time = self._print_time(printer, *args)
return '%s;%s' % (label, time)
def _print_label_repr(self, printer, *args):
label = self._print_sequence(self.label, ',', printer, *args)
time = self._print_time_repr(printer, *args)
return '%s,%s' % (label, time)
def _print_contents_pretty(self, printer, *args):
label = self._print_label_pretty(printer, *args)
time = self._print_time_pretty(printer, *args)
return printer._print_seq((label, time), delimiter=';')
def _print_contents_latex(self, printer, *args):
label = self._print_sequence(
self.label, self._label_separator, printer, *args)
time = self._print_time_latex(printer, *args)
return '%s;%s' % (label, time)
class TimeDepKet(TimeDepState, KetBase):
"""General time-dependent Ket in quantum mechanics.
This inherits from ``TimeDepState`` and ``KetBase`` and is the main class
that should be used for Kets that vary with time. Its dual is a
``TimeDepBra``.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
Create a TimeDepKet and look at its attributes::
>>> from sympy.physics.quantum import TimeDepKet
>>> k = TimeDepKet('psi', 't')
>>> k
|psi;t>
>>> k.time
t
>>> k.label
(psi,)
>>> k.hilbert_space
H
TimeDepKets know about their dual bra::
>>> k.dual
<psi;t|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.TimeDepBra'>
"""
@classmethod
def dual_class(self):
return TimeDepBra
class TimeDepBra(TimeDepState, BraBase):
"""General time-dependent Bra in quantum mechanics.
This inherits from TimeDepState and BraBase and is the main class that
should be used for Bras that vary with time. Its dual is a TimeDepBra.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
>>> from sympy.physics.quantum import TimeDepBra
>>> from sympy import symbols, I
>>> b = TimeDepBra('psi', 't')
>>> b
<psi;t|
>>> b.time
t
>>> b.label
(psi,)
>>> b.hilbert_space
H
>>> b.dual
|psi;t>
"""
@classmethod
def dual_class(self):
return TimeDepKet
class Wavefunction(Function):
"""Class for representations in continuous bases
This class takes an expression and coordinates in its constructor. It can
be used to easily calculate normalizations and probabilities.
Parameters
==========
expr : Expr
The expression representing the functional form of the w.f.
coords : Symbol or tuple
The coordinates to be integrated over, and their bounds
Examples
========
Particle in a box, specifying bounds in the more primitive way of using
Piecewise:
>>> from sympy import Symbol, Piecewise, pi, N
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = Symbol('x', real=True)
>>> n = 1
>>> L = 1
>>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True))
>>> f = Wavefunction(g, x)
>>> f.norm
1
>>> f.is_normalized
True
>>> p = f.prob()
>>> p(0)
0
>>> p(L)
0
>>> p(0.5)
2
>>> p(0.85*L)
2*sin(0.85*pi)**2
>>> N(p(0.85*L))
0.412214747707527
Additionally, you can specify the bounds of the function and the indices in
a more compact way:
>>> from sympy import symbols, pi, diff
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> f(L+1)
0
>>> f(L-1)
sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L)
>>> f(-1)
0
>>> f(0.85)
sqrt(2)*sin(0.85*pi*n/L)/sqrt(L)
>>> f(0.85, n=1, L=1)
sqrt(2)*sin(0.85*pi)
>>> f.is_commutative
False
All arguments are automatically sympified, so you can define the variables
as strings rather than symbols:
>>> expr = x**2
>>> f = Wavefunction(expr, 'x')
>>> type(f.variables[0])
<class 'sympy.core.symbol.Symbol'>
Derivatives of Wavefunctions will return Wavefunctions:
>>> diff(f, x)
Wavefunction(2*x, x)
"""
#Any passed tuples for coordinates and their bounds need to be
#converted to Tuples before Function's constructor is called, to
#avoid errors from calling is_Float in the constructor
def __new__(cls, *args, **options):
new_args = [None for i in args]
ct = 0
for arg in args:
if isinstance(arg, tuple):
new_args[ct] = Tuple(*arg)
else:
new_args[ct] = arg
ct += 1
return super(Function, cls).__new__(cls, *new_args, **options)
def __call__(self, *args, **options):
var = self.variables
if len(args) != len(var):
raise NotImplementedError(
"Incorrect number of arguments to function!")
ct = 0
#If the passed value is outside the specified bounds, return 0
for v in var:
lower, upper = self.limits[v]
#Do the comparison to limits only if the passed symbol is actually
#a symbol present in the limits;
#Had problems with a comparison of x > L
if isinstance(args[ct], Expr) and \
not (lower in args[ct].free_symbols
or upper in args[ct].free_symbols):
continue
if (args[ct] < lower) == True or (args[ct] > upper) == True:
return 0
ct += 1
expr = self.expr
#Allows user to make a call like f(2, 4, m=1, n=1)
for symbol in list(expr.free_symbols):
if str(symbol) in options.keys():
val = options[str(symbol)]
expr = expr.subs(symbol, val)
return expr.subs(zip(var, args))
def _eval_derivative(self, symbol):
expr = self.expr
deriv = expr._eval_derivative(symbol)
return Wavefunction(deriv, *self.args[1:])
def _eval_conjugate(self):
return Wavefunction(conjugate(self.expr), *self.args[1:])
def _eval_transpose(self):
return self
@property
def free_symbols(self):
return self.expr.free_symbols
@property
def is_commutative(self):
"""
Override Function's is_commutative so that order is preserved in
represented expressions
"""
return False
@classmethod
def eval(self, *args):
return None
@property
def variables(self):
"""
Return the coordinates which the wavefunction depends on
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x,y = symbols('x,y')
>>> f = Wavefunction(x*y, x, y)
>>> f.variables
(x, y)
>>> g = Wavefunction(x*y, x)
>>> g.variables
(x,)
"""
var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]]
return tuple(var)
@property
def limits(self):
"""
Return the limits of the coordinates which the w.f. depends on If no
limits are specified, defaults to ``(-oo, oo)``.
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, (x, 0, 1))
>>> f.limits
{x: (0, 1)}
>>> f = Wavefunction(x**2, x)
>>> f.limits
{x: (-oo, oo)}
>>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2))
>>> f.limits
{x: (-oo, oo), y: (-1, 2)}
"""
limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo)
for g in self._args[1:]]
return dict(zip(self.variables, tuple(limits)))
@property
def expr(self):
"""
Return the expression which is the functional form of the Wavefunction
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, x)
>>> f.expr
x**2
"""
return self._args[0]
@property
def is_normalized(self):
"""
Returns true if the Wavefunction is properly normalized
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.is_normalized
True
"""
return (self.norm == 1.0)
@property
@cacheit
def norm(self):
"""
Return the normalization of the specified functional form.
This function integrates over the coordinates of the Wavefunction, with
the bounds specified.
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
sqrt(2)*sqrt(L)/2
"""
exp = self.expr*conjugate(self.expr)
var = self.variables
limits = self.limits
for v in var:
curr_limits = limits[v]
exp = integrate(exp, (v, curr_limits[0], curr_limits[1]))
return sqrt(exp)
def normalize(self):
"""
Return a normalized version of the Wavefunction
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = symbols('x', real=True)
>>> L = symbols('L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.normalize()
Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L))
"""
const = self.norm
if const == oo:
raise NotImplementedError("The function is not normalizable!")
else:
return Wavefunction((const)**(-1)*self.expr, *self.args[1:])
def prob(self):
"""
Return the absolute magnitude of the w.f., `|\psi(x)|^2`
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', real=True)
>>> n = symbols('n', integer=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.prob()
Wavefunction(sin(pi*n*x/L)**2, x)
"""
return Wavefunction(self.expr*conjugate(self.expr), *self.variables)
|
NikNitro/Python-iBeacon-Scan
|
sympy/physics/quantum/state.py
|
Python
|
gpl-3.0
| 29,156
|
[
"DIRAC"
] |
61b6e9de04276cbcabea9480343c5ac94823ebda932b9121572223bc6c48051f
|
'''
Created on Oct 21, 2014
@author: sergio
'''
import numpy as np
import ctypes
import numpy.ctypeslib as npct
import matplotlib.pyplot as plt
#cfsfd = ctypes.cdll.LoadLibrary('/home/sergio/iibm/sandbox/t.so')
#cfsfd.get_dc.restype = ctypes.c_float
#dc = cfsfd.get_dc("dbname=demo host=192.168.2.2 user=postgres password=postgres", "54")
#print dc
array_1d_double = npct.ndpointer(dtype=np.double, ndim=1, flags='CONTIGUOUS')
array_1d_int = npct.ndpointer(dtype=np.int64, ndim=1, flags='CONTIGUOUS')
libcd = npct.load_library("cfsfdp", "/home/sergio/iibm/workspace/NeuroDB/NeuroDB/cfunctions/cfsfdp")
libcd.get_local_density.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_float, array_1d_double, ctypes.c_char_p]
libcd.get_local_density.restype = ctypes.c_int
libcd.get_distance_to_higher_density.argtypes = [ctypes.c_char_p, ctypes.c_char_p, array_1d_double, array_1d_double, ctypes.c_int]
libcd.get_distance_to_higher_density.restype = ctypes.c_int
libcd.get_dc.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_float]
libcd.get_dc.restype = ctypes.c_float
libcd.get_cluster_dp.argtypes = [ctypes.c_char_p, ctypes.c_char_p, array_1d_double]
libcd.get_cluster_dp.restype = array_1d_double
dc = libcd.get_dc("dbname=demo host=192.168.2.2 user=postgres password=postgres", "54", 2.0)
local_density = np.empty(1026)
distance_to_higher_density = np.empty(1026)
print "dc: ", dc, type(dc)
libcd.get_local_density("dbname=demo host=192.168.2.2 user=postgres password=postgres", "54", dc, local_density, "gaussian")
libcd.get_distance_to_higher_density("dbname=demo host=192.168.2.2 user=postgres password=postgres", "54", local_density, distance_to_higher_density, len(local_density))
gamma = local_density*distance_to_higher_density
dp = distance_to_higher_density[local_density.argsort()]
dp2 = np.empty(1026)
for i in range(len(dp)):
dp2[i] = i * dp[i]
#gamma2 = libcd.get_cluster_dp("dbname=demo host=192.168.2.2 user=postgres password=postgres", "54")
# plt.subplot(4,1,1)
# plt.plot(local_density, 'o')
# plt.subplot(4,1,2)
# plt.plot(distance_to_higher_density, 'o')
plt.subplot(2,1,1)
plt.plot(local_density, distance_to_higher_density, 'o')
plt.subplot(2,1,2)
plt.plot(dp2, 'o')
plt.show()
#print dc("dbname=demo user=postgres password=postgres hostaddr=192.168.2.2 port=5432")
pass
|
sergio2pi/NeuroDB
|
test/test5.py
|
Python
|
gpl-2.0
| 2,324
|
[
"Gaussian"
] |
bc4487b44497305eba850ed2756feb9ea78b1088b2860d9fc4c62e4fad8fbda5
|
# Copyright (C) 2009 by Eric Talevich (eric.talevich@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""PhyloXML reader/parser, writer, and associated functions.
Instantiates tree elements from a parsed PhyloXML file, and constructs an XML
file from a `Bio.Phylo.PhyloXML` object.
About capitalization:
- phyloXML means the file format specification
- PhyloXML means the Biopython module `Bio.Phylo.PhyloXML` and its classes
- Phyloxml means the top-level class used by `PhyloXMLIO.read` (but not
`Bio.Phylo.read`!), containing a list of Phylogenies (objects derived from
`BaseTree.Tree`)
"""
__docformat__ = "restructuredtext en"
import sys
import warnings
from Bio.Phylo import PhyloXML as PX
if (3, 0, 0) <= sys.version_info[:3] <= (3, 1, 3):
# Workaround for cElementTree regression in python 3.0--3.1.3
# See http://bugs.python.org/issue9257
from xml.etree import ElementTree
else:
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
# Alternative Python implementation, perhaps?
from xml.etree import ElementTree as ElementTree
# Recognize the phyloXML namespace when parsing
# See http://effbot.org/zone/element-namespaces.htm
NAMESPACES = {
'phy': 'http://www.phyloxml.org',
}
try:
register_namespace = ElementTree.register_namespace
except AttributeError:
if not hasattr(ElementTree, '_namespace_map'):
# cElementTree needs the pure-Python xml.etree.ElementTree
from xml.etree import ElementTree as ET_py
ElementTree._namespace_map = ET_py._namespace_map
def register_namespace(prefix, uri):
ElementTree._namespace_map[uri] = prefix
for prefix, uri in NAMESPACES.iteritems():
register_namespace(prefix, uri)
class PhyloXMLError(Exception):
"""Exception raised when PhyloXML object construction cannot continue.
XML syntax errors will be found and raised by the underlying ElementTree
module; this exception is for valid XML that breaks the phyloXML
specification.
"""
pass
# ---------------------------------------------------------
# Public API
def read(file):
"""Parse a phyloXML file or stream and build a tree of Biopython objects.
The children of the root node are phylogenies and possibly other arbitrary
(non-phyloXML) objects.
:returns: a single `Bio.Phylo.PhyloXML.Phyloxml` object.
"""
return Parser(file).read()
def parse(file):
"""Iterate over the phylogenetic trees in a phyloXML file.
This ignores any additional data stored at the top level, but may be more
memory-efficient than the `read` function.
:returns: a generator of `Bio.Phylo.PhyloXML.Phylogeny` objects.
"""
return Parser(file).parse()
def write(obj, file, encoding='utf-8', indent=True):
"""Write a phyloXML file.
:Parameters:
obj
an instance of `Phyloxml`, `Phylogeny` or `BaseTree.Tree`, or an
iterable of either of the latter two. The object will be converted
to a Phyloxml object before serialization.
file
either an open handle or a file name.
"""
def fix_single(tree):
if isinstance(tree, PX.Phylogeny):
return tree
if isinstance(tree, PX.Clade):
return tree.to_phylogeny()
if isinstance(tree, PX.BaseTree.Tree):
return PX.Phylogeny.from_tree(tree)
if isinstance(tree, PX.BaseTree.Clade):
return PX.Phylogeny.from_tree(PX.BaseTree.Tree(root=tree))
else:
raise ValueError("iterable must contain Tree or Clade types")
if isinstance(obj, PX.Phyloxml):
pass
elif (isinstance(obj, PX.BaseTree.Tree) or
isinstance(obj, PX.BaseTree.Clade)):
obj = fix_single(obj).to_phyloxml()
elif hasattr(obj, '__iter__'):
obj = PX.Phyloxml({}, phylogenies=(fix_single(t) for t in obj))
else:
raise ValueError("First argument must be a Phyloxml, Phylogeny, "
"Tree, or iterable of Trees or Phylogenies.")
return Writer(obj).write(file, encoding=encoding, indent=indent)
# ---------------------------------------------------------
# Functions I wish ElementTree had
def _local(tag):
"""Extract the local tag from a namespaced tag name."""
if tag[0] == '{':
return tag[tag.index('}')+1:]
return tag
def _split_namespace(tag):
"""Split a tag into namespace and local tag strings."""
try:
return tag[1:].split('}', 1)
except:
return ('', tag)
def _ns(tag, namespace=NAMESPACES['phy']):
"""Format an XML tag with the given namespace."""
return '{%s}%s' % (namespace, tag)
def _get_child_as(parent, tag, construct):
"""Find a child node by tag, and pass it through a constructor.
Returns None if no matching child is found.
"""
child = parent.find(_ns(tag))
if child is not None:
return construct(child)
def _get_child_text(parent, tag, construct=unicode):
"""Find a child node by tag; pass its text through a constructor.
Returns None if no matching child is found.
"""
child = parent.find(_ns(tag))
if child is not None and child.text:
return construct(child.text)
def _get_children_as(parent, tag, construct):
"""Find child nodes by tag; pass each through a constructor.
Returns an empty list if no matching child is found.
"""
return [construct(child) for child in
parent.findall(_ns(tag))]
def _get_children_text(parent, tag, construct=unicode):
"""Find child nodes by tag; pass each node's text through a constructor.
Returns an empty list if no matching child is found.
"""
return [construct(child.text) for child in
parent.findall(_ns(tag))
if child.text]
def _indent(elem, level=0):
"""Add line breaks and indentation to ElementTree in-place.
Sources:
- http://effbot.org/zone/element-lib.htm#prettyprint
- http://infix.se/2007/02/06/gentlemen-indent-your-xml
"""
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for e in elem:
_indent(e, level+1)
if not e.tail or not e.tail.strip():
e.tail = i + " "
if not e.tail or not e.tail.strip():
e.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
# ---------------------------------------------------------
# INPUT
# ---------------------------------------------------------
def _str2bool(text):
if text == 'true':
return True
if text == 'false':
return False
raise ValueError('String could not be converted to boolean: ' + text)
def _dict_str2bool(dct, keys):
out = dct.copy()
for key in keys:
if key in out:
out[key] = _str2bool(out[key])
return out
def _int(text):
if text is not None:
try:
return int(text)
except Exception:
return None
def _float(text):
if text is not None:
try:
return float(text)
except Exception:
return None
def _collapse_wspace(text):
"""Replace all spans of whitespace with a single space character.
Also remove leading and trailing whitespace. See "Collapse Whitespace
Policy" in the phyloXML spec glossary:
http://phyloxml.org/documentation/version_100/phyloxml.xsd.html#Glossary
"""
if text is not None:
return ' '.join(text.split())
# NB: Not currently used
def _replace_wspace(text):
"""Replace tab, LF and CR characters with spaces, but don't collapse.
See "Replace Whitespace Policy" in the phyloXML spec glossary:
http://phyloxml.org/documentation/version_100/phyloxml.xsd.html#Glossary
"""
for char in ('\t', '\n', '\r'):
if char in text:
text = text.replace(char, ' ')
return text
class Parser(object):
"""Methods for parsing all phyloXML nodes from an XML stream.
To minimize memory use, the tree of ElementTree parsing events is cleared
after completing each phylogeny, clade, and top-level 'other' element.
Elements below the clade level are kept in memory until parsing of the
current clade is finished -- this shouldn't be a problem because clade is
the only recursive element, and non-clade nodes below this level are of
bounded size.
"""
def __init__(self, file):
# Get an iterable context for XML parsing events
context = iter(ElementTree.iterparse(file, events=('start', 'end')))
event, root = context.next()
self.root = root
self.context = context
def read(self):
"""Parse the phyloXML file and create a single Phyloxml object."""
phyloxml = PX.Phyloxml(dict((_local(key), val)
for key, val in self.root.items()))
other_depth = 0
for event, elem in self.context:
namespace, localtag = _split_namespace(elem.tag)
if event == 'start':
if namespace != NAMESPACES['phy']:
other_depth += 1
continue
if localtag == 'phylogeny':
phylogeny = self._parse_phylogeny(elem)
phyloxml.phylogenies.append(phylogeny)
if event == 'end' and namespace != NAMESPACES['phy']:
# Deal with items not specified by phyloXML
other_depth -= 1
if other_depth == 0:
# We're directly under the root node -- evaluate
otr = self.other(elem, namespace, localtag)
phyloxml.other.append(otr)
self.root.clear()
return phyloxml
def parse(self):
"""Parse the phyloXML file incrementally and return each phylogeny."""
phytag = _ns('phylogeny')
for event, elem in self.context:
if event == 'start' and elem.tag == phytag:
yield self._parse_phylogeny(elem)
# Special parsing cases -- incremental, using self.context
def _parse_phylogeny(self, parent):
"""Parse a single phylogeny within the phyloXML tree.
Recursively builds a phylogenetic tree with help from parse_clade, then
clears the XML event history for the phylogeny element and returns
control to the top-level parsing function.
"""
phylogeny = PX.Phylogeny(**_dict_str2bool(parent.attrib,
['rooted', 'rerootable']))
list_types = {
# XML tag, plural attribute
'confidence': 'confidences',
'property': 'properties',
'clade_relation': 'clade_relations',
'sequence_relation': 'sequence_relations',
}
for event, elem in self.context:
namespace, tag = _split_namespace(elem.tag)
if event == 'start' and tag == 'clade':
assert phylogeny.root is None, \
"Phylogeny object should only have 1 clade"
phylogeny.root = self._parse_clade(elem)
continue
if event == 'end':
if tag == 'phylogeny':
parent.clear()
break
# Handle the other non-recursive children
if tag in list_types:
getattr(phylogeny, list_types[tag]).append(
getattr(self, tag)(elem))
# Complex types
elif tag in ('date', 'id'):
setattr(phylogeny, tag, getattr(self, tag)(elem))
# Simple types
elif tag in ('name', 'description'):
setattr(phylogeny, tag, _collapse_wspace(elem.text))
# Unknown tags
elif namespace != NAMESPACES['phy']:
phylogeny.other.append(self.other(elem, namespace, tag))
parent.clear()
else:
# NB: This shouldn't happen in valid files
raise PhyloXMLError('Misidentified tag: ' + tag)
return phylogeny
_clade_complex_types = ['color', 'events', 'binary_characters', 'date']
_clade_list_types = {
'confidence': 'confidences',
'distribution': 'distributions',
'reference': 'references',
'property': 'properties',
}
_clade_tracked_tags = set(_clade_complex_types + _clade_list_types.keys()
+ ['branch_length', 'name', 'node_id', 'width'])
def _parse_clade(self, parent):
"""Parse a Clade node and its children, recursively."""
clade = PX.Clade(**parent.attrib)
if clade.branch_length is not None:
clade.branch_length = float(clade.branch_length)
# NB: Only evaluate nodes at the current level
tag_stack = []
for event, elem in self.context:
namespace, tag = _split_namespace(elem.tag)
if event == 'start':
if tag == 'clade':
clade.clades.append(self._parse_clade(elem))
continue
if tag == 'taxonomy':
clade.taxonomies.append(self._parse_taxonomy(elem))
continue
if tag == 'sequence':
clade.sequences.append(self._parse_sequence(elem))
continue
if tag in self._clade_tracked_tags:
tag_stack.append(tag)
if event == 'end':
if tag == 'clade':
elem.clear()
break
if tag != tag_stack[-1]:
continue
tag_stack.pop()
# Handle the other non-recursive children
if tag in self._clade_list_types:
getattr(clade, self._clade_list_types[tag]).append(
getattr(self, tag)(elem))
elif tag in self._clade_complex_types:
setattr(clade, tag, getattr(self, tag)(elem))
elif tag == 'branch_length':
# NB: possible collision with the attribute
if clade.branch_length is not None:
raise PhyloXMLError(
'Attribute branch_length was already set '
'for this Clade.')
clade.branch_length = _float(elem.text)
elif tag == 'width':
clade.width = _float(elem.text)
elif tag == 'name':
clade.name = _collapse_wspace(elem.text)
elif tag == 'node_id':
clade.node_id = PX.Id(elem.text.strip(),
elem.attrib.get('provider'))
elif namespace != NAMESPACES['phy']:
clade.other.append(self.other(elem, namespace, tag))
elem.clear()
else:
raise PhyloXMLError('Misidentified tag: ' + tag)
return clade
def _parse_sequence(self, parent):
sequence = PX.Sequence(**parent.attrib)
for event, elem in self.context:
namespace, tag = _split_namespace(elem.tag)
if event == 'end':
if tag == 'sequence':
parent.clear()
break
if tag in ('accession', 'mol_seq', 'uri',
'domain_architecture'):
setattr(sequence, tag, getattr(self, tag)(elem))
elif tag == 'annotation':
sequence.annotations.append(self.annotation(elem))
elif tag == 'name':
sequence.name = _collapse_wspace(elem.text)
elif tag in ('symbol', 'location'):
setattr(sequence, tag, elem.text)
elif namespace != NAMESPACES['phy']:
sequence.other.append(self.other(elem, namespace, tag))
parent.clear()
return sequence
def _parse_taxonomy(self, parent):
taxonomy = PX.Taxonomy(**parent.attrib)
for event, elem in self.context:
namespace, tag = _split_namespace(elem.tag)
if event == 'end':
if tag == 'taxonomy':
parent.clear()
break
if tag in ('id', 'uri'):
setattr(taxonomy, tag, getattr(self, tag)(elem))
elif tag == 'common_name':
taxonomy.common_names.append(_collapse_wspace(elem.text))
elif tag == 'synonym':
taxonomy.synonyms.append(elem.text)
elif tag in ('code', 'scientific_name', 'authority', 'rank'):
# ENH: check_str on rank
setattr(taxonomy, tag, elem.text)
elif namespace != NAMESPACES['phy']:
taxonomy.other.append(self.other(elem, namespace, tag))
parent.clear()
return taxonomy
def other(self, elem, namespace, localtag):
return PX.Other(localtag, namespace, elem.attrib,
value=elem.text and elem.text.strip() or None,
children=[self.other(child, *_split_namespace(child.tag))
for child in elem])
# Complex types
def accession(self, elem):
return PX.Accession(elem.text.strip(), elem.get('source'))
def annotation(self, elem):
return PX.Annotation(
desc=_collapse_wspace(_get_child_text(elem, 'desc')),
confidence=_get_child_as(elem, 'confidence', self.confidence),
properties=_get_children_as(elem, 'property', self.property),
uri=_get_child_as(elem, 'uri', self.uri),
**elem.attrib)
def binary_characters(self, elem):
def bc_getter(elem):
return _get_children_text(elem, 'bc')
return PX.BinaryCharacters(
type=elem.get('type'),
gained_count=_int(elem.get('gained_count')),
lost_count=_int(elem.get('lost_count')),
present_count=_int(elem.get('present_count')),
absent_count=_int(elem.get('absent_count')),
# Flatten BinaryCharacterList sub-nodes into lists of strings
gained=_get_child_as(elem, 'gained', bc_getter),
lost=_get_child_as(elem, 'lost', bc_getter),
present=_get_child_as(elem, 'present', bc_getter),
absent=_get_child_as(elem, 'absent', bc_getter))
def clade_relation(self, elem):
return PX.CladeRelation(
elem.get('type'), elem.get('id_ref_0'), elem.get('id_ref_1'),
distance=elem.get('distance'),
confidence=_get_child_as(elem, 'confidence', self.confidence))
def color(self, elem):
red, green, blue = (_get_child_text(elem, color, int) for color in
('red', 'green', 'blue'))
return PX.BranchColor(red, green, blue)
def confidence(self, elem):
return PX.Confidence(
_float(elem.text),
elem.get('type'))
def date(self, elem):
return PX.Date(
unit=elem.get('unit'),
desc=_collapse_wspace(_get_child_text(elem, 'desc')),
value=_get_child_text(elem, 'value', float),
minimum=_get_child_text(elem, 'minimum', float),
maximum=_get_child_text(elem, 'maximum', float),
)
def distribution(self, elem):
return PX.Distribution(
desc=_collapse_wspace(_get_child_text(elem, 'desc')),
points=_get_children_as(elem, 'point', self.point),
polygons=_get_children_as(elem, 'polygon', self.polygon))
def domain(self, elem):
return PX.ProteinDomain(elem.text.strip(),
int(elem.get('from')) - 1,
int(elem.get('to')),
confidence=_float(elem.get('confidence')),
id=elem.get('id'))
def domain_architecture(self, elem):
return PX.DomainArchitecture(
length=int(elem.get('length')),
domains=_get_children_as(elem, 'domain', self.domain))
def events(self, elem):
return PX.Events(
type=_get_child_text(elem, 'type'),
duplications=_get_child_text(elem, 'duplications', int),
speciations=_get_child_text(elem, 'speciations', int),
losses=_get_child_text(elem, 'losses', int),
confidence=_get_child_as(elem, 'confidence', self.confidence))
def id(self, elem):
provider = elem.get('provider') or elem.get('type')
return PX.Id(elem.text.strip(), provider)
def mol_seq(self, elem):
is_aligned = elem.get('is_aligned')
if is_aligned is not None:
is_aligned = _str2bool(is_aligned)
return PX.MolSeq(elem.text.strip(), is_aligned=is_aligned)
def point(self, elem):
return PX.Point(
elem.get('geodetic_datum'),
_get_child_text(elem, 'lat', float),
_get_child_text(elem, 'long', float),
alt=_get_child_text(elem, 'alt', float),
alt_unit=elem.get('alt_unit'))
def polygon(self, elem):
return PX.Polygon(
points=_get_children_as(elem, 'point', self.point))
def property(self, elem):
return PX.Property(elem.text.strip(),
elem.get('ref'), elem.get('applies_to'), elem.get('datatype'),
unit=elem.get('unit'),
id_ref=elem.get('id_ref'))
def reference(self, elem):
return PX.Reference(
doi=elem.get('doi'),
desc=_get_child_text(elem, 'desc'))
def sequence_relation(self, elem):
return PX.SequenceRelation(
elem.get('type'), elem.get('id_ref_0'), elem.get('id_ref_1'),
distance=_float(elem.get('distance')),
confidence=_get_child_as(elem, 'confidence', self.confidence))
def uri(self, elem):
return PX.Uri(elem.text.strip(),
desc=_collapse_wspace(elem.get('desc')),
type=elem.get('type'))
# ---------------------------------------------------------
# OUTPUT
# ---------------------------------------------------------
def _serialize(value):
"""Convert a Python primitive to a phyloXML-compatible Unicode string."""
if isinstance(value, float):
return unicode(value).upper()
elif isinstance(value, bool):
return unicode(value).lower()
return unicode(value)
def _clean_attrib(obj, attrs):
"""Create a dictionary from an object's specified, non-None attributes."""
out = {}
for key in attrs:
val = getattr(obj, key)
if val is not None:
out[key] = _serialize(val)
return out
def _handle_complex(tag, attribs, subnodes, has_text=False):
def wrapped(self, obj):
elem = ElementTree.Element(tag, _clean_attrib(obj, attribs))
for subn in subnodes:
if isinstance(subn, basestring):
# singular object: method and attribute names are the same
if getattr(obj, subn) is not None:
elem.append(getattr(self, subn)(getattr(obj, subn)))
else:
# list: singular method, pluralized attribute name
method, plural = subn
for item in getattr(obj, plural):
elem.append(getattr(self, method)(item))
if has_text:
elem.text = _serialize(obj.value)
return elem
wrapped.__doc__ = "Serialize a %s and its subnodes, in order." % tag
return wrapped
def _handle_simple(tag):
def wrapped(self, obj):
elem = ElementTree.Element(tag)
elem.text = _serialize(obj)
return elem
wrapped.__doc__ = "Serialize a simple %s node." % tag
return wrapped
class Writer(object):
"""Methods for serializing a PhyloXML object to XML."""
def __init__(self, phyloxml):
"""Build an ElementTree from a PhyloXML object."""
assert isinstance(phyloxml, PX.Phyloxml), "Not a Phyloxml object"
self._tree = ElementTree.ElementTree(self.phyloxml(phyloxml))
def write(self, file, encoding='utf-8', indent=True):
if indent:
_indent(self._tree.getroot())
self._tree.write(file, encoding)
return len(self._tree.getroot())
# Convert classes to ETree elements
def phyloxml(self, obj):
elem = ElementTree.Element('phyloxml', obj.attributes) # Namespaces
for tree in obj.phylogenies:
elem.append(self.phylogeny(tree))
for otr in obj.other:
elem.append(self.other(otr))
return elem
def other(self, obj):
elem = ElementTree.Element(_ns(obj.tag, obj.namespace), obj.attributes)
elem.text = obj.value
for child in obj.children:
elem.append(self.other(child))
return elem
phylogeny = _handle_complex('phylogeny',
('rooted', 'rerootable', 'branch_length_unit', 'type'),
( 'name',
'id',
'description',
'date',
('confidence', 'confidences'),
'clade',
('clade_relation', 'clade_relations'),
('sequence_relation', 'sequence_relations'),
('property', 'properties'),
('other', 'other'),
))
clade = _handle_complex('clade', ('id_source',),
( 'name',
'branch_length',
('confidence', 'confidences'),
'width',
'color',
'node_id',
('taxonomy', 'taxonomies'),
('sequence', 'sequences'),
'events',
'binary_characters',
('distribution', 'distributions'),
'date',
('reference', 'references'),
('property', 'properties'),
('clade', 'clades'),
('other', 'other'),
))
accession = _handle_complex('accession', ('source',),
(), has_text=True)
annotation = _handle_complex('annotation',
('ref', 'source', 'evidence', 'type'),
( 'desc',
'confidence',
('property', 'properties'),
'uri',
))
def binary_characters(self, obj):
"""Serialize a binary_characters node and its subnodes."""
elem = ElementTree.Element('binary_characters',
_clean_attrib(obj,
('type', 'gained_count', 'lost_count',
'present_count', 'absent_count')))
for subn in ('gained', 'lost', 'present', 'absent'):
subelem = ElementTree.Element(subn)
for token in getattr(obj, subn):
subelem.append(self.bc(token))
elem.append(subelem)
return elem
clade_relation = _handle_complex('clade_relation',
('id_ref_0', 'id_ref_1', 'distance', 'type'),
('confidence',))
color = _handle_complex('color', (), ('red', 'green', 'blue'))
confidence = _handle_complex('confidence', ('type',),
(), has_text=True)
date = _handle_complex('date', ('unit',),
('desc', 'value', 'minimum', 'maximum'))
distribution = _handle_complex('distribution', (),
( 'desc',
('point', 'points'),
('polygon', 'polygons'),
))
def domain(self, obj):
"""Serialize a domain node."""
elem = ElementTree.Element('domain',
{'from': str(obj.start + 1), 'to': str(obj.end)})
if obj.confidence is not None:
elem.set('confidence', _serialize(obj.confidence))
if obj.id is not None:
elem.set('id', obj.id)
elem.text = _serialize(obj.value)
return elem
domain_architecture = _handle_complex('domain_architecture',
('length',),
(('domain', 'domains'),))
events = _handle_complex('events', (),
( 'type',
'duplications',
'speciations',
'losses',
'confidence',
))
id = _handle_complex('id', ('provider',), (), has_text=True)
mol_seq = _handle_complex('mol_seq', ('is_aligned',),
(), has_text=True)
node_id = _handle_complex('node_id', ('provider',), (), has_text=True)
point = _handle_complex('point', ('geodetic_datum', 'alt_unit'),
('lat', 'long', 'alt'))
polygon = _handle_complex('polygon', (), (('point', 'points'),))
property = _handle_complex('property',
('ref', 'unit', 'datatype', 'applies_to', 'id_ref'),
(), has_text=True)
reference = _handle_complex('reference', ('doi',), ('desc',))
sequence = _handle_complex('sequence',
('type', 'id_ref', 'id_source'),
( 'symbol',
'accession',
'name',
'location',
'mol_seq',
'uri',
('annotation', 'annotations'),
'domain_architecture',
('other', 'other'),
))
sequence_relation = _handle_complex('sequence_relation',
('id_ref_0', 'id_ref_1', 'distance', 'type'),
('confidence',))
taxonomy = _handle_complex('taxonomy',
('id_source',),
( 'id',
'code',
'scientific_name',
'authority',
('common_name', 'common_names'),
('synonym', 'synonyms'),
'rank',
'uri',
('other', 'other'),
))
uri = _handle_complex('uri', ('desc', 'type'), (), has_text=True)
# Primitive types
# Floating point
alt = _handle_simple('alt')
branch_length = _handle_simple('branch_length')
lat = _handle_simple('lat')
long = _handle_simple('long')
maximum = _handle_simple('maximum')
minimum = _handle_simple('minimum')
value = _handle_simple('value')
width = _handle_simple('width')
# Integers
blue = _handle_simple('blue')
duplications = _handle_simple('duplications')
green = _handle_simple('green')
losses = _handle_simple('losses')
red = _handle_simple('red')
speciations = _handle_simple('speciations')
# Strings
bc = _handle_simple('bc')
code = _handle_simple('code')
common_name = _handle_simple('common_name')
desc = _handle_simple('desc')
description = _handle_simple('description')
location = _handle_simple('location')
name = _handle_simple('name')
rank = _handle_simple('rank')
scientific_name = _handle_simple('scientific_name')
symbol = _handle_simple('symbol')
synonym = _handle_simple('synonym')
type = _handle_simple('type')
|
bryback/quickseq
|
genescript/Bio/Phylo/PhyloXMLIO.py
|
Python
|
mit
| 31,591
|
[
"Biopython"
] |
120ae5cab5a6bb71c221a369f5fb65791dd63fec83093e6fc1936745235c5a26
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# Christian Brueffer <christian@brueffer.de> wrote this file. As long as you
# retain this notice you can do whatever you want with this stuff. If we meet
# some day, and you think this stuff is worth it, you can buy me a beer in
# return.
# ----------------------------------------------------------------------------
# This script fixes the reads in a TopHat unmapped.bam to make them compatible
# with other tools, e.g. Picard and samtools.
import os
import pysam
import sys
def get_index_pos(index, unmapped_reads, read):
"""Returns the position of a read in the index or None."""
if read.qname in index:
return index[read.qname]
else:
return None
def main(path, outdir, mapped_file="accepted_hits.bam", unmapped_file="unmapped.bam"):
bam_mapped = pysam.Samfile(os.path.join(path, mapped_file))
bam_unmapped = pysam.Samfile(os.path.join(path, unmapped_file))
unmapped_reads = list(bam_unmapped.fetch(until_eof=True))
# Fix things that relate to all unmapped reads.
unmapped_dict = {}
unmapped_index = {}
for i in range(len(unmapped_reads)):
read = unmapped_reads[i]
# remove /1 and /2 suffixes
if read.qname.find("/") != -1:
read.qname = read.qname[:-2]
unmapped_index[read.qname] = i
# work around "mate is unmapped" bug in TopHat
if read.qname in unmapped_dict:
unmapped_reads[unmapped_dict[read.qname]].mate_is_unmapped = True
read.mate_is_unmapped = True
else:
unmapped_dict[read.qname] = i
read.mapq = 0
unmapped_reads[i] = read
# Fix things that relate only to unmapped reads with a mapped mate.
for mapped in bam_mapped:
if mapped.mate_is_unmapped:
i = get_index_pos(unmapped_index, unmapped_reads, mapped)
if i is not None:
unmapped = unmapped_reads[i]
# map chromosome TIDs from mapped to unmapped file
mapped_rname = bam_mapped.getrname(mapped.tid)
unmapped_new_tid = bam_unmapped.gettid(mapped_rname)
unmapped.tid = unmapped_new_tid
unmapped.rnext = unmapped_new_tid
unmapped.pos = mapped.pos
unmapped.pnext = 0
unmapped_reads[i] = unmapped
bam_mapped.close()
# for the output file, take the headers from the unmapped file
base, ext = os.path.splitext(unmapped_file)
out_filename = "".join([base, "_fixup", ext])
bam_out = pysam.Samfile(os.path.join(outdir, out_filename), "wb",
template=bam_unmapped)
bam_unmapped.close()
for read in unmapped_reads:
bam_out.write(read)
bam_out.close()
def usage(scriptname):
print "Usage:"
print scriptname, "tophat_output_dir"
sys.exit(1)
if __name__ == "__main__":
if len(sys.argv) == 2:
path = sys.argv[1]
if os.path.exists(path) and os.path.isdir(path):
# no tmpdir specified, use the bam dir
main(path, path)
else:
usage(sys.argv[0])
elif len(sys.argv) == 3:
path = sys.argv[1]
outdir = sys.argv[2]
if os.path.exists(path) and os.path.isdir(path) and os.path.exists(outdir) and os.path.isdir(outdir):
main(path, outdir)
else:
usage(sys.argv[0])
else:
usage(sys.argv[0])
|
jessicachung/rna_seq_pipeline
|
scripts/fix_tophat_unmapped_reads.py
|
Python
|
mit
| 3,571
|
[
"pysam"
] |
f46f59d18cfdafb63b0ff1b405700a452f29372bb4a83f689d051cb9c0cc248c
|
''' Some tests for filters '''
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from numpy.testing import (assert_equal, assert_allclose,
assert_array_equal, assert_almost_equal)
from pytest import raises as assert_raises
import scipy.ndimage as sndi
from scipy.ndimage.filters import _gaussian_kernel1d
def test_ticket_701():
# Test generic filter sizes
arr = np.arange(4).reshape((2,2))
func = lambda x: np.min(x)
res = sndi.generic_filter(arr, func, size=(1,1))
# The following raises an error unless ticket 701 is fixed
res2 = sndi.generic_filter(arr, func, size=1)
assert_equal(res, res2)
def test_gh_5430():
# At least one of these raises an error unless gh-5430 is
# fixed. In py2k an int is implemented using a C long, so
# which one fails depends on your system. In py3k there is only
# one arbitrary precision integer type, so both should fail.
sigma = np.int32(1)
out = sndi._ni_support._normalize_sequence(sigma, 1)
assert_equal(out, [sigma])
sigma = np.int64(1)
out = sndi._ni_support._normalize_sequence(sigma, 1)
assert_equal(out, [sigma])
# This worked before; make sure it still works
sigma = 1
out = sndi._ni_support._normalize_sequence(sigma, 1)
assert_equal(out, [sigma])
# This worked before; make sure it still works
sigma = [1, 1]
out = sndi._ni_support._normalize_sequence(sigma, 2)
assert_equal(out, sigma)
# Also include the OPs original example to make sure we fixed the issue
x = np.random.normal(size=(256, 256))
perlin = np.zeros_like(x)
for i in 2**np.arange(6):
perlin += sndi.filters.gaussian_filter(x, i, mode="wrap") * i**2
# This also fixes gh-4106, show that the OPs example now runs.
x = np.int64(21)
sndi._ni_support._normalize_sequence(x, 0)
def test_gaussian_kernel1d():
radius = 10
sigma = 2
sigma2 = sigma * sigma
x = np.arange(-radius, radius + 1, dtype=np.double)
phi_x = np.exp(-0.5 * x * x / sigma2)
phi_x /= phi_x.sum()
assert_allclose(phi_x, _gaussian_kernel1d(sigma, 0, radius))
assert_allclose(-phi_x * x / sigma2, _gaussian_kernel1d(sigma, 1, radius))
assert_allclose(phi_x * (x * x / sigma2 - 1) / sigma2,
_gaussian_kernel1d(sigma, 2, radius))
assert_allclose(phi_x * (3 - x * x / sigma2) * x / (sigma2 * sigma2),
_gaussian_kernel1d(sigma, 3, radius))
def test_orders_gauss():
# Check order inputs to Gaussians
arr = np.zeros((1,))
assert_equal(0, sndi.gaussian_filter(arr, 1, order=0))
assert_equal(0, sndi.gaussian_filter(arr, 1, order=3))
assert_raises(ValueError, sndi.gaussian_filter, arr, 1, -1)
assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=0))
assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=3))
assert_raises(ValueError, sndi.gaussian_filter1d, arr, 1, -1, -1)
def test_valid_origins():
"""Regression test for #1311."""
func = lambda x: np.mean(x)
data = np.array([1,2,3,4,5], dtype=np.float64)
assert_raises(ValueError, sndi.generic_filter, data, func, size=3,
origin=2)
func2 = lambda x, y: np.mean(x + y)
assert_raises(ValueError, sndi.generic_filter1d, data, func,
filter_size=3, origin=2)
assert_raises(ValueError, sndi.percentile_filter, data, 0.2, size=3,
origin=2)
for filter in [sndi.uniform_filter, sndi.minimum_filter,
sndi.maximum_filter, sndi.maximum_filter1d,
sndi.median_filter, sndi.minimum_filter1d]:
# This should work, since for size == 3, the valid range for origin is
# -1 to 1.
list(filter(data, 3, origin=-1))
list(filter(data, 3, origin=1))
# Just check this raises an error instead of silently accepting or
# segfaulting.
assert_raises(ValueError, filter, data, 3, origin=2)
def test_multiple_modes():
# Test that the filters with multiple mode cababilities for different
# dimensions give the same result as applying a single mode.
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
mode1 = 'reflect'
mode2 = ['reflect', 'reflect']
assert_equal(sndi.gaussian_filter(arr, 1, mode=mode1),
sndi.gaussian_filter(arr, 1, mode=mode2))
assert_equal(sndi.prewitt(arr, mode=mode1),
sndi.prewitt(arr, mode=mode2))
assert_equal(sndi.sobel(arr, mode=mode1),
sndi.sobel(arr, mode=mode2))
assert_equal(sndi.laplace(arr, mode=mode1),
sndi.laplace(arr, mode=mode2))
assert_equal(sndi.gaussian_laplace(arr, 1, mode=mode1),
sndi.gaussian_laplace(arr, 1, mode=mode2))
assert_equal(sndi.maximum_filter(arr, size=5, mode=mode1),
sndi.maximum_filter(arr, size=5, mode=mode2))
assert_equal(sndi.minimum_filter(arr, size=5, mode=mode1),
sndi.minimum_filter(arr, size=5, mode=mode2))
assert_equal(sndi.gaussian_gradient_magnitude(arr, 1, mode=mode1),
sndi.gaussian_gradient_magnitude(arr, 1, mode=mode2))
assert_equal(sndi.uniform_filter(arr, 5, mode=mode1),
sndi.uniform_filter(arr, 5, mode=mode2))
def test_multiple_modes_sequentially():
# Test that the filters with multiple mode cababilities for different
# dimensions give the same result as applying the filters with
# different modes sequentially
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
modes = ['reflect', 'wrap']
expected = sndi.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
expected = sndi.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
assert_equal(expected,
sndi.gaussian_filter(arr, 1, mode=modes))
expected = sndi.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
expected = sndi.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
assert_equal(expected,
sndi.uniform_filter(arr, 5, mode=modes))
expected = sndi.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
expected = sndi.maximum_filter1d(expected, size=5, axis=1, mode=modes[1])
assert_equal(expected,
sndi.maximum_filter(arr, size=5, mode=modes))
expected = sndi.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
expected = sndi.minimum_filter1d(expected, size=5, axis=1, mode=modes[1])
assert_equal(expected,
sndi.minimum_filter(arr, size=5, mode=modes))
def test_multiple_modes_prewitt():
# Test prewitt filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[1., -3., 2.],
[1., -2., 1.],
[1., -1., 0.]])
modes = ['reflect', 'wrap']
assert_equal(expected,
sndi.prewitt(arr, mode=modes))
def test_multiple_modes_sobel():
# Test sobel filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[1., -4., 3.],
[2., -3., 1.],
[1., -1., 0.]])
modes = ['reflect', 'wrap']
assert_equal(expected,
sndi.sobel(arr, mode=modes))
def test_multiple_modes_laplace():
# Test laplace filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[-2., 2., 1.],
[-2., -3., 2.],
[1., 1., 0.]])
modes = ['reflect', 'wrap']
assert_equal(expected,
sndi.laplace(arr, mode=modes))
def test_multiple_modes_gaussian_laplace():
# Test gaussian_laplace filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[-0.28438687, 0.01559809, 0.19773499],
[-0.36630503, -0.20069774, 0.07483620],
[0.15849176, 0.18495566, 0.21934094]])
modes = ['reflect', 'wrap']
assert_almost_equal(expected,
sndi.gaussian_laplace(arr, 1, mode=modes))
def test_multiple_modes_gaussian_gradient_magnitude():
# Test gaussian_gradient_magnitude filter for multiple
# extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[0.04928965, 0.09745625, 0.06405368],
[0.23056905, 0.14025305, 0.04550846],
[0.19894369, 0.14950060, 0.06796850]])
modes = ['reflect', 'wrap']
calculated = sndi.gaussian_gradient_magnitude(arr, 1, mode=modes)
assert_almost_equal(expected, calculated)
def test_multiple_modes_uniform():
# Test uniform filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[0.32, 0.40, 0.48],
[0.20, 0.28, 0.32],
[0.28, 0.32, 0.40]])
modes = ['reflect', 'wrap']
assert_almost_equal(expected,
sndi.uniform_filter(arr, 5, mode=modes))
def test_gaussian_truncate():
# Test that Gaussian filters can be truncated at different widths.
# These tests only check that the result has the expected number
# of nonzero elements.
arr = np.zeros((100, 100), float)
arr[50, 50] = 1
num_nonzeros_2 = (sndi.gaussian_filter(arr, 5, truncate=2) > 0).sum()
assert_equal(num_nonzeros_2, 21**2)
num_nonzeros_5 = (sndi.gaussian_filter(arr, 5, truncate=5) > 0).sum()
assert_equal(num_nonzeros_5, 51**2)
# Test truncate when sigma is a sequence.
f = sndi.gaussian_filter(arr, [0.5, 2.5], truncate=3.5)
fpos = f > 0
n0 = fpos.any(axis=0).sum()
# n0 should be 2*int(2.5*3.5 + 0.5) + 1
assert_equal(n0, 19)
n1 = fpos.any(axis=1).sum()
# n1 should be 2*int(0.5*3.5 + 0.5) + 1
assert_equal(n1, 5)
# Test gaussian_filter1d.
x = np.zeros(51)
x[25] = 1
f = sndi.gaussian_filter1d(x, sigma=2, truncate=3.5)
n = (f > 0).sum()
assert_equal(n, 15)
# Test gaussian_laplace
y = sndi.gaussian_laplace(x, sigma=2, truncate=3.5)
nonzero_indices = np.where(y != 0)[0]
n = nonzero_indices.ptp() + 1
assert_equal(n, 15)
# Test gaussian_gradient_magnitude
y = sndi.gaussian_gradient_magnitude(x, sigma=2, truncate=3.5)
nonzero_indices = np.where(y != 0)[0]
n = nonzero_indices.ptp() + 1
assert_equal(n, 15)
class TestThreading(object):
def check_func_thread(self, n, fun, args, out):
from threading import Thread
thrds = [Thread(target=fun, args=args, kwargs={'output': out[x]}) for x in range(n)]
[t.start() for t in thrds]
[t.join() for t in thrds]
def check_func_serial(self, n, fun, args, out):
for i in range(n):
fun(*args, output=out[i])
def test_correlate1d(self):
d = np.random.randn(5000)
os = np.empty((4, d.size))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.correlate1d, (d, np.arange(5)), os)
self.check_func_thread(4, sndi.correlate1d, (d, np.arange(5)), ot)
assert_array_equal(os, ot)
def test_correlate(self):
d = np.random.randn(500, 500)
k = np.random.randn(10, 10)
os = np.empty([4] + list(d.shape))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.correlate, (d, k), os)
self.check_func_thread(4, sndi.correlate, (d, k), ot)
assert_array_equal(os, ot)
def test_median_filter(self):
d = np.random.randn(500, 500)
os = np.empty([4] + list(d.shape))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.median_filter, (d, 3), os)
self.check_func_thread(4, sndi.median_filter, (d, 3), ot)
assert_array_equal(os, ot)
def test_uniform_filter1d(self):
d = np.random.randn(5000)
os = np.empty((4, d.size))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.uniform_filter1d, (d, 5), os)
self.check_func_thread(4, sndi.uniform_filter1d, (d, 5), ot)
assert_array_equal(os, ot)
def test_minmax_filter(self):
d = np.random.randn(500, 500)
os = np.empty([4] + list(d.shape))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.maximum_filter, (d, 3), os)
self.check_func_thread(4, sndi.maximum_filter, (d, 3), ot)
assert_array_equal(os, ot)
self.check_func_serial(4, sndi.minimum_filter, (d, 3), os)
self.check_func_thread(4, sndi.minimum_filter, (d, 3), ot)
assert_array_equal(os, ot)
def test_minmaximum_filter1d():
# Regression gh-3898
in_ = np.arange(10)
out = sndi.minimum_filter1d(in_, 1)
assert_equal(in_, out)
out = sndi.maximum_filter1d(in_, 1)
assert_equal(in_, out)
# Test reflect
out = sndi.minimum_filter1d(in_, 5, mode='reflect')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
out = sndi.maximum_filter1d(in_, 5, mode='reflect')
assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
#Test constant
out = sndi.minimum_filter1d(in_, 5, mode='constant', cval=-1)
assert_equal([-1, -1, 0, 1, 2, 3, 4, 5, -1, -1], out)
out = sndi.maximum_filter1d(in_, 5, mode='constant', cval=10)
assert_equal([10, 10, 4, 5, 6, 7, 8, 9, 10, 10], out)
# Test nearest
out = sndi.minimum_filter1d(in_, 5, mode='nearest')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
out = sndi.maximum_filter1d(in_, 5, mode='nearest')
assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
# Test wrap
out = sndi.minimum_filter1d(in_, 5, mode='wrap')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 0, 0], out)
out = sndi.maximum_filter1d(in_, 5, mode='wrap')
assert_equal([9, 9, 4, 5, 6, 7, 8, 9, 9, 9], out)
def test_uniform_filter1d_roundoff_errors():
# gh-6930
in_ = np.repeat([0, 1, 0], [9, 9, 9])
for filter_size in range(3, 10):
out = sndi.uniform_filter1d(in_, filter_size)
assert_equal(out.sum(), 10 - filter_size)
def test_footprint_all_zeros():
# regression test for gh-6876: footprint of all zeros segfaults
arr = np.random.randint(0, 100, (100, 100))
kernel = np.zeros((3, 3), bool)
with assert_raises(ValueError):
sndi.maximum_filter(arr, footprint=kernel)
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/scipy/ndimage/tests/test_filters.py
|
Python
|
mit
| 14,796
|
[
"Gaussian"
] |
891598c78114ac96eb196f966bce246929326ba3b0a620d4e51d6a63d1a78edb
|
import os
from ase import Atom, Atoms
from ase.optimize import BFGS
from ase.io import read
from gpaw import GPAW
from gpaw.test import equal
a = 4. # Size of unit cell (Angstrom)
c = a / 2
d = 0.74 # Experimental bond length
molecule = Atoms('H2',
[(c - d / 2, c, c),
(c + d / 2, c, c)],
cell=(a, a, a),
pbc=False)
calc = GPAW(h=0.2, nbands=1, xc='PBE', txt=None)
molecule.set_calculator(calc)
e1 = molecule.get_potential_energy()
niter1 = calc.get_number_of_iterations()
calc.write('H2.gpw')
calc.write('H2a.gpw', mode='all')
molecule.get_forces()
calc.write('H2f.gpw')
calc.write('H2fa.gpw', mode='all')
from time import time
def timer(func, *args, **kwargs):
t0 = time()
ret = func(*args, **kwargs)
return ret, time()-t0
molecule = GPAW('H2.gpw', txt=None).get_atoms()
f1, t1 = timer(molecule.get_forces)
molecule = GPAW('H2a.gpw', txt=None).get_atoms()
f2, t2 = timer(molecule.get_forces)
molecule = GPAW('H2f.gpw', txt=None).get_atoms()
f3, t3 = timer(molecule.get_forces)
molecule = GPAW('H2fa.gpw', txt=None).get_atoms()
f4, t4 = timer(molecule.get_forces)
print 'timing:', t1, t2, t3, t4
assert t2 < 0.6 * t1
assert t3 < 0.5
assert t4 < 0.5
print f1
print f2
print f3
print f4
assert sum((f1 - f4).ravel()**2) < 1e-6
assert sum((f2 - f4).ravel()**2) < 1e-6
assert sum((f3 - f4).ravel()**2) < 1e-6
positions = molecule.get_positions()
# x-coordinate x-coordinate
# v v
d0 = positions[1, 0] - positions[0, 0]
# ^ ^
# second atom first atom
print 'experimental bond length:'
print 'hydrogen molecule energy: %7.3f eV' % e1
print 'bondlength : %7.3f Ang' % d0
# Find the theoretical bond length:
relax = BFGS(molecule)
relax.run(fmax=0.05)
e2 = molecule.get_potential_energy()
niter2 = calc.get_number_of_iterations()
positions = molecule.get_positions()
# x-coordinate x-coordinate
# v v
d0 = positions[1, 0] - positions[0, 0]
# ^ ^
# second atom first atom
print 'PBE energy minimum:'
print 'hydrogen molecule energy: %7.3f eV' % e2
print 'bondlength : %7.3f Ang' % d0
molecule = GPAW('H2fa.gpw', txt='H2.txt').get_atoms()
relax = BFGS(molecule)
relax.run(fmax=0.05)
e2q = molecule.get_potential_energy()
niter2q = calc.get_number_of_iterations()
positions = molecule.get_positions()
d0q = positions[1, 0] - positions[0, 0]
assert abs(e2 - e2q) < 2e-6
assert abs(d0q - d0) < 4e-4
f0 = molecule.get_forces()
del relax, molecule
from gpaw.mpi import world
world.barrier() # syncronize before reading text output file
f = read('H2.txt').get_forces()
assert abs(f - f0).max() < 5e-6 # 5 digits in txt file
energy_tolerance = 0.00005
niter_tolerance = 0
equal(e1, -6.287873, energy_tolerance)
equal(e2, -6.290744, energy_tolerance)
equal(e2q, -6.290744, energy_tolerance)
|
robwarm/gpaw-symm
|
gpaw/test/relax.py
|
Python
|
gpl-3.0
| 3,003
|
[
"ASE",
"GPAW"
] |
c0bd4b975e5364aad0ffa39d0216bb2680a50ce4b2fc77a9e03c39dac85aa104
|
"""Test check utilities."""
# Authors: MNE Developers
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD-3-Clause
import os
import os.path as op
import sys
import numpy as np
import pytest
from pathlib import Path
import mne
from mne import read_vectorview_selection
from mne.datasets import testing
from mne.io.pick import pick_channels_cov
from mne.utils import (check_random_state, _check_fname, check_fname,
_check_subject, requires_mayavi, traits_test,
_check_mayavi_version, _check_info_inv, _check_option,
check_version, _path_like, _validate_type,
_suggest, _on_missing, requires_nibabel, _safe_input)
data_path = testing.data_path(download=False)
base_dir = op.join(data_path, 'MEG', 'sample')
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_event = op.join(base_dir, 'sample_audvis_trunc_raw-eve.fif')
fname_fwd = op.join(base_dir, 'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_mgz = op.join(data_path, 'subjects', 'sample', 'mri', 'aseg.mgz')
reject = dict(grad=4000e-13, mag=4e-12)
@testing.requires_testing_data
def test_check(tmpdir):
"""Test checking functions."""
pytest.raises(ValueError, check_random_state, 'foo')
pytest.raises(TypeError, _check_fname, 1)
_check_fname(Path('./foo'))
fname = str(tmpdir.join('foo'))
with open(fname, 'wb'):
pass
assert op.isfile(fname)
_check_fname(fname, overwrite='read', must_exist=True)
orig_perms = os.stat(fname).st_mode
os.chmod(fname, 0)
if not sys.platform.startswith('win'):
with pytest.raises(PermissionError, match='read permissions'):
_check_fname(fname, overwrite='read', must_exist=True)
os.chmod(fname, orig_perms)
os.remove(fname)
assert not op.isfile(fname)
pytest.raises(IOError, check_fname, 'foo', 'tets-dip.x', (), ('.fif',))
pytest.raises(ValueError, _check_subject, None, None)
pytest.raises(TypeError, _check_subject, None, 1)
pytest.raises(TypeError, _check_subject, 1, None)
# smoke tests for permitted types
check_random_state(None).choice(1)
check_random_state(0).choice(1)
check_random_state(np.random.RandomState(0)).choice(1)
if check_version('numpy', '1.17'):
check_random_state(np.random.default_rng(0)).choice(1)
@testing.requires_testing_data
@pytest.mark.parametrize('suffix',
('_meg.fif', '_eeg.fif', '_ieeg.fif',
'_meg.fif.gz', '_eeg.fif.gz', '_ieeg.fif.gz'))
def test_check_fname_suffixes(suffix, tmpdir):
"""Test checking for valid filename suffixes."""
new_fname = str(tmpdir.join(op.basename(fname_raw)
.replace('_raw.fif', suffix)))
raw = mne.io.read_raw_fif(fname_raw).crop(0, 0.1)
raw.save(new_fname)
mne.io.read_raw_fif(new_fname)
@requires_mayavi
@traits_test
def test_check_mayavi():
"""Test mayavi version check."""
pytest.raises(RuntimeError, _check_mayavi_version, '100.0.0')
def _get_data():
"""Read in data used in tests."""
# read forward model
forward = mne.read_forward_solution(fname_fwd)
# read data
raw = mne.io.read_raw_fif(fname_raw, preload=True)
events = mne.read_events(fname_event)
event_id, tmin, tmax = 1, -0.1, 0.15
# decimate for speed
left_temporal_channels = read_vectorview_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True,
selection=left_temporal_channels)
picks = picks[::2]
raw.pick_channels([raw.ch_names[ii] for ii in picks])
del picks
raw.info.normalize_proj() # avoid projection warnings
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
baseline=(None, 0.), preload=True, reject=reject)
noise_cov = mne.compute_covariance(epochs, tmin=None, tmax=0.)
data_cov = mne.compute_covariance(epochs, tmin=0.01, tmax=0.15)
return epochs, data_cov, noise_cov, forward
@testing.requires_testing_data
def test_check_info_inv():
"""Test checks for common channels across fwd model and cov matrices."""
epochs, data_cov, noise_cov, forward = _get_data()
# make sure same channel lists exist in data to make testing life easier
assert epochs.info['ch_names'] == data_cov.ch_names
assert epochs.info['ch_names'] == noise_cov.ch_names
# check whether bad channels get excluded from the channel selection
# info
info_bads = epochs.info.copy()
info_bads['bads'] = info_bads['ch_names'][1:3] # include two bad channels
picks = _check_info_inv(info_bads, forward, noise_cov=noise_cov)
assert [1, 2] not in picks
# covariance matrix
data_cov_bads = data_cov.copy()
data_cov_bads['bads'] = data_cov_bads.ch_names[0]
picks = _check_info_inv(epochs.info, forward, data_cov=data_cov_bads)
assert 0 not in picks
# noise covariance matrix
noise_cov_bads = noise_cov.copy()
noise_cov_bads['bads'] = noise_cov_bads.ch_names[1]
picks = _check_info_inv(epochs.info, forward, noise_cov=noise_cov_bads)
assert 1 not in picks
# test whether reference channels get deleted
info_ref = epochs.info.copy()
info_ref['chs'][0]['kind'] = 301 # pretend to have a ref channel
picks = _check_info_inv(info_ref, forward, noise_cov=noise_cov)
assert 0 not in picks
# pick channels in all inputs and make sure common set is returned
epochs.pick_channels([epochs.ch_names[ii] for ii in range(10)])
data_cov = pick_channels_cov(data_cov, include=[data_cov.ch_names[ii]
for ii in range(5, 20)])
noise_cov = pick_channels_cov(noise_cov, include=[noise_cov.ch_names[ii]
for ii in range(7, 12)])
picks = _check_info_inv(epochs.info, forward, noise_cov=noise_cov,
data_cov=data_cov)
assert list(range(7, 10)) == picks
def test_check_option():
"""Test checking the value of a parameter against a list of options."""
allowed_values = ['valid', 'good', 'ok']
# Value is allowed
assert _check_option('option', 'valid', allowed_values)
assert _check_option('option', 'good', allowed_values)
assert _check_option('option', 'ok', allowed_values)
assert _check_option('option', 'valid', ['valid'])
# Check error message for invalid value
msg = ("Invalid value for the 'option' parameter. Allowed values are "
"'valid', 'good', and 'ok', but got 'bad' instead.")
with pytest.raises(ValueError, match=msg):
assert _check_option('option', 'bad', allowed_values)
# Special error message if only one value is allowed
msg = ("Invalid value for the 'option' parameter. The only allowed value "
"is 'valid', but got 'bad' instead.")
with pytest.raises(ValueError, match=msg):
assert _check_option('option', 'bad', ['valid'])
def test_path_like():
"""Test _path_like()."""
str_path = str(base_dir)
pathlib_path = Path(base_dir)
no_path = dict(foo='bar')
assert _path_like(str_path) is True
assert _path_like(pathlib_path) is True
assert _path_like(no_path) is False
def test_validate_type():
"""Test _validate_type."""
_validate_type(1, 'int-like')
with pytest.raises(TypeError, match='int-like'):
_validate_type(False, 'int-like')
@requires_nibabel()
@testing.requires_testing_data
def test_suggest():
"""Test suggestions."""
names = mne.get_volume_labels_from_aseg(fname_mgz)
sug = _suggest('', names)
assert sug == '' # nothing
sug = _suggest('Left-cerebellum', names)
assert sug == " Did you mean 'Left-Cerebellum-Cortex'?"
sug = _suggest('Cerebellum-Cortex', names)
assert sug == " Did you mean one of ['Left-Cerebellum-Cortex', 'Right-Cerebellum-Cortex', 'Left-Cerebral-Cortex']?" # noqa: E501
def test_on_missing():
"""Test _on_missing."""
msg = 'test'
with pytest.raises(ValueError, match=msg):
_on_missing('raise', msg)
with pytest.warns(RuntimeWarning, match=msg):
_on_missing('warn', msg)
_on_missing('ignore', msg)
with pytest.raises(ValueError,
match='Invalid value for the \'on_missing\' parameter'):
_on_missing('foo', msg)
def _matlab_input(msg):
raise EOFError()
def test_safe_input(monkeypatch):
"""Test _safe_input."""
monkeypatch.setattr(mne.utils.check, 'input', _matlab_input)
with pytest.raises(RuntimeError, match='Could not use input'):
_safe_input('whatever', alt='nothing')
assert _safe_input('whatever', use='nothing') == 'nothing'
|
pravsripad/mne-python
|
mne/utils/tests/test_check.py
|
Python
|
bsd-3-clause
| 8,752
|
[
"Mayavi"
] |
723033fafc06ea8016915ea6fc2b67e3701dae1845a6a1ccd8ec0b853d460f8e
|
# Copyright 2006-2013 by Peter Cock. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.AlignIO support for "stockholm" format (used in the PFAM database).
You are expected to use this module via the Bio.AlignIO functions (or the
Bio.SeqIO functions if you want to work directly with the gapped sequences).
For example, consider a Stockholm alignment file containing the following::
# STOCKHOLM 1.0
#=GC SS_cons .................<<<<<<<<...<<<<<<<........>>>>>>>..
AP001509.1 UUAAUCGAGCUCAACACUCUUCGUAUAUCCUC-UCAAUAUGG-GAUGAGGGU
#=GR AP001509.1 SS -----------------<<<<<<<<---..<<-<<-------->>->>..--
AE007476.1 AAAAUUGAAUAUCGUUUUACUUGUUUAU-GUCGUGAAU-UGG-CACGA-CGU
#=GR AE007476.1 SS -----------------<<<<<<<<-----<<.<<-------->>.>>----
#=GC SS_cons ......<<<<<<<.......>>>>>>>..>>>>>>>>...............
AP001509.1 CUCUAC-AGGUA-CCGUAAA-UACCUAGCUACGAAAAGAAUGCAGUUAAUGU
#=GR AP001509.1 SS -------<<<<<--------->>>>>--->>>>>>>>---------------
AE007476.1 UUCUACAAGGUG-CCGG-AA-CACCUAACAAUAAGUAAGUCAGCAGUGAGAU
#=GR AE007476.1 SS ------.<<<<<--------->>>>>.-->>>>>>>>---------------
//
This is a single multiple sequence alignment, so you would probably load this
using the Bio.AlignIO.read() function:
>>> from Bio import AlignIO
>>> align = AlignIO.read("Stockholm/simple.sth", "stockholm")
>>> print(align)
SingleLetterAlphabet() alignment with 2 rows and 104 columns
UUAAUCGAGCUCAACACUCUUCGUAUAUCCUC-UCAAUAUGG-G...UGU AP001509.1
AAAAUUGAAUAUCGUUUUACUUGUUUAU-GUCGUGAAU-UGG-C...GAU AE007476.1
>>> for record in align:
... print("%s %i" % (record.id, len(record)))
AP001509.1 104
AE007476.1 104
This example file is clearly using RNA, so you might want the alignment object
(and the SeqRecord objects it holds) to reflect this, rather than simple using
the default single letter alphabet as shown above. You can do this with an
optional argument to the Bio.AlignIO.read() function:
>>> from Bio import AlignIO
>>> from Bio.Alphabet import generic_rna
>>> align = AlignIO.read("Stockholm/simple.sth", "stockholm",
... alphabet=generic_rna)
>>> print(align)
RNAAlphabet() alignment with 2 rows and 104 columns
UUAAUCGAGCUCAACACUCUUCGUAUAUCCUC-UCAAUAUGG-G...UGU AP001509.1
AAAAUUGAAUAUCGUUUUACUUGUUUAU-GUCGUGAAU-UGG-C...GAU AE007476.1
In addition to the sequences themselves, this example alignment also includes
some GR lines for the secondary structure of the sequences. These are
strings, with one character for each letter in the associated sequence:
>>> for record in align:
... print(record.id)
... print(record.seq)
... print(record.letter_annotations['secondary_structure'])
AP001509.1
UUAAUCGAGCUCAACACUCUUCGUAUAUCCUC-UCAAUAUGG-GAUGAGGGUCUCUAC-AGGUA-CCGUAAA-UACCUAGCUACGAAAAGAAUGCAGUUAAUGU
-----------------<<<<<<<<---..<<-<<-------->>->>..---------<<<<<--------->>>>>--->>>>>>>>---------------
AE007476.1
AAAAUUGAAUAUCGUUUUACUUGUUUAU-GUCGUGAAU-UGG-CACGA-CGUUUCUACAAGGUG-CCGG-AA-CACCUAACAAUAAGUAAGUCAGCAGUGAGAU
-----------------<<<<<<<<-----<<.<<-------->>.>>----------.<<<<<--------->>>>>.-->>>>>>>>---------------
Any general annotation for each row is recorded in the SeqRecord's annotations
dictionary. You can output this alignment in many different file formats
using Bio.AlignIO.write(), or the MultipleSeqAlignment object's format method:
>>> print(align.format("fasta"))
>AP001509.1
UUAAUCGAGCUCAACACUCUUCGUAUAUCCUC-UCAAUAUGG-GAUGAGGGUCUCUAC-A
GGUA-CCGUAAA-UACCUAGCUACGAAAAGAAUGCAGUUAAUGU
>AE007476.1
AAAAUUGAAUAUCGUUUUACUUGUUUAU-GUCGUGAAU-UGG-CACGA-CGUUUCUACAA
GGUG-CCGG-AA-CACCUAACAAUAAGUAAGUCAGCAGUGAGAU
<BLANKLINE>
Most output formats won't be able to hold the annotation possible in a
Stockholm file:
>>> print(align.format("stockholm"))
# STOCKHOLM 1.0
#=GF SQ 2
AP001509.1 UUAAUCGAGCUCAACACUCUUCGUAUAUCCUC-UCAAUAUGG-GAUGAGGGUCUCUAC-AGGUA-CCGUAAA-UACCUAGCUACGAAAAGAAUGCAGUUAAUGU
#=GS AP001509.1 AC AP001509.1
#=GS AP001509.1 DE AP001509.1
#=GR AP001509.1 SS -----------------<<<<<<<<---..<<-<<-------->>->>..---------<<<<<--------->>>>>--->>>>>>>>---------------
AE007476.1 AAAAUUGAAUAUCGUUUUACUUGUUUAU-GUCGUGAAU-UGG-CACGA-CGUUUCUACAAGGUG-CCGG-AA-CACCUAACAAUAAGUAAGUCAGCAGUGAGAU
#=GS AE007476.1 AC AE007476.1
#=GS AE007476.1 DE AE007476.1
#=GR AE007476.1 SS -----------------<<<<<<<<-----<<.<<-------->>.>>----------.<<<<<--------->>>>>.-->>>>>>>>---------------
//
<BLANKLINE>
Note that when writing Stockholm files, AlignIO does not break long sequences
up and interleave them (as in the input file shown above). The standard
allows this simpler layout, and it is more likely to be understood by other
tools.
Finally, as an aside, it can sometimes be useful to use Bio.SeqIO.parse() to
iterate over the alignment rows as SeqRecord objects - rather than working
with Alignnment objects. Again, if you want to you can specify this is RNA:
>>> from Bio import SeqIO
>>> from Bio.Alphabet import generic_rna
>>> for record in SeqIO.parse("Stockholm/simple.sth", "stockholm",
... alphabet=generic_rna):
... print(record.id)
... print(record.seq)
... print(record.letter_annotations['secondary_structure'])
AP001509.1
UUAAUCGAGCUCAACACUCUUCGUAUAUCCUC-UCAAUAUGG-GAUGAGGGUCUCUAC-AGGUA-CCGUAAA-UACCUAGCUACGAAAAGAAUGCAGUUAAUGU
-----------------<<<<<<<<---..<<-<<-------->>->>..---------<<<<<--------->>>>>--->>>>>>>>---------------
AE007476.1
AAAAUUGAAUAUCGUUUUACUUGUUUAU-GUCGUGAAU-UGG-CACGA-CGUUUCUACAAGGUG-CCGG-AA-CACCUAACAAUAAGUAAGUCAGCAGUGAGAU
-----------------<<<<<<<<-----<<.<<-------->>.>>----------.<<<<<--------->>>>>.-->>>>>>>>---------------
Remember that if you slice a SeqRecord, the per-letter-annotions like the
secondary structure string here, are also sliced:
>>> sub_record = record[10:20]
>>> print(sub_record.seq)
AUCGUUUUAC
>>> print(sub_record.letter_annotations['secondary_structure'])
-------<<<
"""
from __future__ import print_function
import sys
# Add path to Bio
sys.path.append('../..')
__docformat__ = "restructuredtext en" # not just plaintext
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
from Bio.AlignIO.Interfaces import AlignmentIterator, SequentialAlignmentWriter
class StockholmWriter(SequentialAlignmentWriter):
"""Stockholm/PFAM alignment writer."""
# These dictionaries should be kept in sync with those
# defined in the StockholmIterator class.
pfam_gr_mapping = {"secondary_structure": "SS",
"surface_accessibility": "SA",
"transmembrane": "TM",
"posterior_probability": "PP",
"ligand_binding": "LI",
"active_site": "AS",
"intron": "IN"}
# Following dictionary deliberately does not cover AC, DE or DR
pfam_gs_mapping = {"organism": "OS",
"organism_classification": "OC",
"look": "LO"}
def write_alignment(self, alignment):
"""Use this to write (another) single alignment to an open file.
Note that sequences and their annotation are recorded
together (rather than having a block of annotation followed
by a block of aligned sequences).
"""
count = len(alignment)
self._length_of_sequences = alignment.get_alignment_length()
self._ids_written = []
# NOTE - For now, the alignment object does not hold any per column
# or per alignment annotation - only per sequence.
if count == 0:
raise ValueError("Must have at least one sequence")
if self._length_of_sequences == 0:
raise ValueError("Non-empty sequences are required")
self.handle.write("# STOCKHOLM 1.0\n")
self.handle.write("#=GF SQ %i\n" % count)
for record in alignment:
self._write_record(record)
self.handle.write("//\n")
def _write_record(self, record):
"""Write a single SeqRecord to the file"""
if self._length_of_sequences != len(record.seq):
raise ValueError("Sequences must all be the same length")
# For the case for stockholm to stockholm, try and use record.name
seq_name = record.id
if record.name is not None:
if "accession" in record.annotations:
if record.id == record.annotations["accession"]:
seq_name = record.name
# In the Stockholm file format, spaces are not allowed in the id
seq_name = seq_name.replace(" ", "_")
if "start" in record.annotations \
and "end" in record.annotations:
suffix = "/%s-%s" % (str(record.annotations["start"]),
str(record.annotations["end"]))
if seq_name[-len(suffix):] != suffix:
seq_name = "%s/%s-%s" % (seq_name,
str(record.annotations["start"]),
str(record.annotations["end"]))
if seq_name in self._ids_written:
raise ValueError("Duplicate record identifier: %s" % seq_name)
self._ids_written.append(seq_name)
self.handle.write("%s %s\n" % (seq_name, str(record.seq)))
# The recommended placement for GS lines (per sequence annotation)
# is above the alignment (as a header block) or just below the
# corresponding sequence.
#
# The recommended placement for GR lines (per sequence per column
# annotation such as secondary structure) is just below the
# corresponding sequence.
#
# We put both just below the corresponding sequence as this allows
# us to write the file using a single pass through the records.
# AC = Accession
if "accession" in record.annotations:
self.handle.write("#=GS %s AC %s\n"
% (seq_name, self.clean(record.annotations["accession"])))
elif record.id:
self.handle.write("#=GS %s AC %s\n"
% (seq_name, self.clean(record.id)))
# DE = description
if record.description:
self.handle.write("#=GS %s DE %s\n"
% (seq_name, self.clean(record.description)))
# DE = database links
for xref in record.dbxrefs:
self.handle.write("#=GS %s DR %s\n"
% (seq_name, self.clean(xref)))
# GS = other per sequence annotation
for key, value in record.annotations.items():
if key in self.pfam_gs_mapping:
data = self.clean(str(value))
if data:
self.handle.write("#=GS %s %s %s\n"
% (seq_name,
self.clean(self.pfam_gs_mapping[key]),
data))
else:
# It doesn't follow the PFAM standards, but should we record
# this data anyway?
pass
# GR = per row per column sequence annotation
for key, value in record.letter_annotations.items():
if key in self.pfam_gr_mapping and len(str(value)) == len(record.seq):
data = self.clean(str(value))
if data:
self.handle.write("#=GR %s %s %s\n"
% (seq_name,
self.clean(self.pfam_gr_mapping[key]),
data))
else:
# It doesn't follow the PFAM standards, but should we record
# this data anyway?
pass
class StockholmIterator(AlignmentIterator):
"""Loads a Stockholm file from PFAM into MultipleSeqAlignment objects.
The file may contain multiple concatenated alignments, which are loaded
and returned incrementally.
This parser will detect if the Stockholm file follows the PFAM
conventions for sequence specific meta-data (lines starting #=GS
and #=GR) and populates the SeqRecord fields accordingly.
Any annotation which does not follow the PFAM conventions is currently
ignored.
If an accession is provided for an entry in the meta data, IT WILL NOT
be used as the record.id (it will be recorded in the record's
annotations). This is because some files have (sub) sequences from
different parts of the same accession (differentiated by different
start-end positions).
Wrap-around alignments are not supported - each sequences must be on
a single line. However, interlaced sequences should work.
For more information on the file format, please see:
http://www.bioperl.org/wiki/Stockholm_multiple_alignment_format
http://www.cgb.ki.se/cgb/groups/sonnhammer/Stockholm.html
For consistency with BioPerl and EMBOSS we call this the "stockholm"
format.
"""
# These dictionaries should be kept in sync with those
# defined in the PfamStockholmWriter class.
pfam_gr_mapping = {"SS": "secondary_structure",
"SA": "surface_accessibility",
"TM": "transmembrane",
"PP": "posterior_probability",
"LI": "ligand_binding",
"AS": "active_site",
"IN": "intron"}
# Following dictionary deliberately does not cover AC, DE or DR
pfam_gs_mapping = {"OS": "organism",
"OC": "organism_classification",
"LO": "look"}
def __next__(self):
try:
line = self._header
del self._header
except AttributeError:
line = self.handle.readline()
if not line:
# Empty file - just give up.
raise StopIteration
if not line.strip() == '# STOCKHOLM 1.0':
raise ValueError("Did not find STOCKHOLM header")
# Note: If this file follows the PFAM conventions, there should be
# a line containing the number of sequences, e.g. "#=GF SQ 67"
# We do not check for this - perhaps we should, and verify that
# if present it agrees with our parsing.
seqs = {}
ids = []
gs = {}
gr = {}
gf = {}
passed_end_alignment = False
while True:
line = self.handle.readline()
if not line:
break # end of file
line = line.strip() # remove trailing \n
if line == '# STOCKHOLM 1.0':
self._header = line
break
elif line == "//":
# The "//" line indicates the end of the alignment.
# There may still be more meta-data
passed_end_alignment = True
elif line == "":
# blank line, ignore
pass
elif line[0] != "#":
# Sequence
# Format: "<seqname> <sequence>"
assert not passed_end_alignment
parts = [x.strip() for x in line.split(" ", 1)]
if len(parts) != 2:
# This might be someone attempting to store a zero length sequence?
raise ValueError("Could not split line into identifier "
+ "and sequence:\n" + line)
id, seq = parts
if id not in ids:
ids.append(id)
seqs.setdefault(id, '')
seqs[id] += seq.replace(".", "-")
elif len(line) >= 5:
# Comment line or meta-data
if line[:5] == "#=GF ":
# Generic per-File annotation, free text
# Format: #=GF <feature> <free text>
feature, text = line[5:].strip().split(None, 1)
# Each feature key could be used more than once,
# so store the entries as a list of strings.
if feature not in gf:
gf[feature] = [text]
else:
gf[feature].append(text)
elif line[:5] == '#=GC ':
# Generic per-Column annotation, exactly 1 char per column
# Format: "#=GC <feature> <exactly 1 char per column>"
pass
elif line[:5] == '#=GS ':
# Generic per-Sequence annotation, free text
# Format: "#=GS <seqname> <feature> <free text>"
id, feature, text = line[5:].strip().split(None, 2)
# if id not in ids:
# ids.append(id)
if id not in gs:
gs[id] = {}
if feature not in gs[id]:
gs[id][feature] = [text]
else:
gs[id][feature].append(text)
elif line[:5] == "#=GR ":
# Generic per-Sequence AND per-Column markup
# Format: "#=GR <seqname> <feature> <exactly 1 char per column>"
id, feature, text = line[5:].strip().split(None, 2)
# if id not in ids:
# ids.append(id)
if id not in gr:
gr[id] = {}
if feature not in gr[id]:
gr[id][feature] = ""
gr[id][feature] += text.strip() # append to any previous entry
# TODO - Should we check the length matches the alignment length?
# For iterlaced sequences the GR data can be split over
# multiple lines
# Next line...
assert len(seqs) <= len(ids)
# assert len(gs) <= len(ids)
# assert len(gr) <= len(ids)
self.ids = ids
self.sequences = seqs
self.seq_annotation = gs
self.seq_col_annotation = gr
if ids and seqs:
if self.records_per_alignment is not None \
and self.records_per_alignment != len(ids):
raise ValueError("Found %i records in this alignment, told to expect %i"
% (len(ids), self.records_per_alignment))
alignment_length = len(list(seqs.values())[0])
records = [] # Alignment obj will put them all in a list anyway
for id in ids:
seq = seqs[id]
if alignment_length != len(seq):
raise ValueError("Sequences have different lengths, or repeated identifier")
name, start, end = self._identifier_split(id)
record = SeqRecord(Seq(seq, self.alphabet),
id=id, name=name, description=id,
annotations={"accession": name})
# Accession will be overridden by _populate_meta_data if an explicit
# accession is provided:
record.annotations["accession"] = name
if start is not None:
record.annotations["start"] = start
if end is not None:
record.annotations["end"] = end
self._populate_meta_data(id, record)
records.append(record)
alignment = MultipleSeqAlignment(records, self.alphabet)
# TODO - Introduce an annotated alignment class?
# For now, store the annotation a new private property:
alignment._annotations = gr
return alignment
else:
raise StopIteration
def _identifier_split(self, identifier):
"""Returns (name, start, end) string tuple from an identier."""
if '/' in identifier:
name, start_end = identifier.rsplit("/", 1)
if start_end.count("-") == 1:
try:
start, end = start_end.split("-")
return name, int(start), int(end)
except ValueError:
# Non-integers after final '/' - fall through
pass
return identifier, None, None
def _get_meta_data(self, identifier, meta_dict):
"""Takes an itentifier and returns dict of all meta-data matching it.
For example, given "Q9PN73_CAMJE/149-220" will return all matches to
this or "Q9PN73_CAMJE" which the identifier without its /start-end
suffix.
In the example below, the suffix is required to match the AC, but must
be removed to match the OS and OC meta-data::
# STOCKHOLM 1.0
#=GS Q9PN73_CAMJE/149-220 AC Q9PN73
...
Q9PN73_CAMJE/149-220 NKA...
...
#=GS Q9PN73_CAMJE OS Campylobacter jejuni
#=GS Q9PN73_CAMJE OC Bacteria
This function will return an empty dictionary if no data is found."""
name, start, end = self._identifier_split(identifier)
if name == identifier:
identifier_keys = [identifier]
else:
identifier_keys = [identifier, name]
answer = {}
for identifier_key in identifier_keys:
try:
for feature_key in meta_dict[identifier_key]:
answer[feature_key] = meta_dict[identifier_key][feature_key]
except KeyError:
pass
return answer
def _populate_meta_data(self, identifier, record):
"""Adds meta-date to a SecRecord's annotations dictionary.
This function applies the PFAM conventions."""
seq_data = self._get_meta_data(identifier, self.seq_annotation)
for feature in seq_data:
# Note this dictionary contains lists!
if feature == "AC": # ACcession number
assert len(seq_data[feature]) == 1
record.annotations["accession"] = seq_data[feature][0]
elif feature == "DE": # DEscription
record.description = "\n".join(seq_data[feature])
elif feature == "DR": # Database Reference
# Should we try and parse the strings?
record.dbxrefs = seq_data[feature]
elif feature in self.pfam_gs_mapping:
record.annotations[self.pfam_gs_mapping[feature]] = ", ".join(seq_data[feature])
else:
# Ignore it?
record.annotations["GS:" + feature] = ", ".join(seq_data[feature])
# Now record the per-letter-annotations
seq_col_data = self._get_meta_data(identifier, self.seq_col_annotation)
for feature in seq_col_data:
# Note this dictionary contains strings!
if feature in self.pfam_gr_mapping:
record.letter_annotations[self.pfam_gr_mapping[feature]] = seq_col_data[feature]
else:
# Ignore it?
record.letter_annotations["GR:" + feature] = seq_col_data[feature]
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/AlignIO/StockholmIO.py
|
Python
|
gpl-2.0
| 23,641
|
[
"BioPerl",
"Biopython"
] |
b656f78d78800d5ffba2b768116ce536f369656cde0c938c5f9ee7d8c8f7faf1
|
#!/usr/bin/env python
# Author: Brian Tomlinson
# Contact: brian@brianctomlinson.com, darthlukan@gmail.com
# Description: Simple update notifier for Arch Linux,
# meant to be run via ~/.config/autostart/lupdater.desktop but
# can also be run from the applications menu.
# License: GPLv2
import os
import pwd
import sys
import time
import notify2
import logging
import platform
import subprocess
from PyQt4 import QtGui
class SystemTrayIcon(QtGui.QSystemTrayIcon):
def __init__(self, icon, parent=None):
QtGui.QSystemTrayIcon.__init__(self, icon, parent)
menu = QtGui.QMenu(parent)
menu.addAction("About", self.about)
menu.addAction("Check Updates", self.check_updates)
menu.addAction("Exit", self.exit)
self.setContextMenu(menu)
def exit(self):
sys.exit(0)
def check_updates(self):
system = Setup().distro()
if system['distro'] == 'arch':
return Setup().arch()
elif system['distro'] == 'debian':
return Setup().debian()
elif system['distro'] == 'redhat':
return Setup().redhat()
else:
return note_set_send(title="Not yet implemented", body="")
def about(self):
note_set_send(title="Not yet implemented", body="")
class Setup(object):
def __init__(self):
self.paclist = []
self.critical = []
self.numupdates = 0
self.system = {'distro': '',
'pkgmgr': '',
'repoupd': '',
'pkglist': ''}
def distro(self):
debian = ['ubuntu', 'linuxmint', 'soluos', 'debian', 'peppermint']
redhat = ['redhat', 'fedora', 'centos']
distro = platform.linux_distribution()[0].lower()
if distro == 'arch':
self.system['distro'] = 'arch'
self.system['pkgmgr'] = 'pacman'
self.system['repoupd'] = '-Syy'
self.system['pkglist'] = '-Qu'
elif distro in debian:
self.system['distro'] = 'debian'
self.system['pkgmgr'] = 'apt-get'
self.system['repoupd'] = 'update'
self.system['pkglist'] = '-s upgrade'
elif distro in redhat:
self.system['distro'] = 'redhat'
self.system['pkgmgr'] = 'yum'
self.system['repoupd'] = 'update'
self.system['pkglist'] = 'list updates'
else:
print('Your distribution is not supported.')
raise NotImplementedError
return self.system
def user(self):
pass
def arch(self):
p = Pacman()
p.update()
p.list_packs()
return True
def debian(self):
a = Apt()
a.update()
a.list_packs()
return True
def redhat(self):
r = Redhat()
r.update()
r.list_packs()
return True
class Log(object):
def __init__(self):
logging.basicConfig(filename='/tmp/lupdater.log', level=logging.DEBUG)
def updates(self, numupdates):
if numupdates > 0:
logging.info(time.ctime() + ': lupdater had %s updates available.\n' % numupdates)
else:
logging.info(time.ctime() + ': No updates available, system is up to date.')
def critical(self, crit):
if crit > 0:
logging.info(time.ctime() + ': Critical update detected, user notified via notify-send.')
class Pacman(Setup):
"""Provides functions to call pacman and update the repos, as well as
return a list with number of updates. """
def __init__(self):
setup = Setup()
self.system = setup.distro()
self.paclist = setup.paclist
self.critical = setup.critical
self.numupdates = setup.numupdates
def update(self):
"""Updates the repositories, notifies the user."""
pkgmgr = self.system['pkgmgr']
repoupd = self.system['repoupd']
args = ["/usr/bin/sudo", pkgmgr, repoupd]
note_set_send(title="Updating repos", body="Getting latest package lists.")
upd = subprocess.Popen(args, shell=False, stdout=subprocess.PIPE)
stdout, stderr = upd.communicate()
return True
def list_packs(self):
"""Creates a list of packages needing to be updated and counts them,
displays the count in a notification for user action."""
pkgmgr = self.system['pkgmgr']
pkglist = self.system['pkglist']
args = [pkgmgr, pkglist]
note_set_send(title="Checking packages", body="...")
# Clean up the list from previous checks so that we keep an accurate count.
if len(self.paclist) > 0:
for i in self.paclist:
self.paclist.remove(i)
lst = subprocess.Popen(args, shell=False, stdout=subprocess.PIPE)
for line in lst.stdout:
line = str(line, encoding="utf8")
line.rstrip("\r\n")
self.paclist.append(line)
self.numupdates = len(self.paclist)
if self.numupdates >= 1:
note_set_send(title="Updates Available!", body="You have %i updates available." % self.numupdates)
self.check_critical(self.paclist)
else:
note_set_send(title="Nothing to do!", body="Your system is up to date.")
return self.paclist
def check_critical(self, paclist):
"""Checks specifically for linux kernel packages to let the user know
of whether they need to reboot for changes to take effect."""
# TODO: Check for other packages such as modules that require system/service restart.
if len(paclist) > 0:
for i in paclist:
if i.startswith('linux'):
self.critical.append(i)
if len(self.critical) >= 1:
for i in self.critical:
note_set_send(title="Critical Update!", body="%s is a critical update and requires a restart." % i)
return self.critical
class Apt(Setup):
def __init__(self):
setup = Setup()
self.system = setup.distro()
self.paclist = setup.paclist
self.critical = setup.critical
self.numupdates = setup.numupdates
def update(self):
pass
def list_packs(self):
pass
def check_critical(self):
pass
class Redhat(Setup):
def __init__(self):
setup = Setup()
self.system = setup.distro()
self.paclist = setup.paclist
self.critical = setup.critical
self.numupdates = setup.numupdates
def udpate(self):
pass
def list_packs(self):
pass
def check_critical(self):
pass
def note_set_send(title, body):
""" Sends and sends notification to DBUS for display."""
notify2.init('lupdater')
n = notify2.Notification(title, body)
return n.show()
def main():
version = sys.version
if version.startswith('3'):
app = QtGui.QApplication(sys.argv)
w = QtGui.QWidget()
trayIcon = SystemTrayIcon(QtGui.QIcon("lupdater.png"), w)
trayIcon.show()
sys.exit(app.exec_())
else:
sys.exit('This program requires Python 3.x.x!')
if __name__ == '__main__':
main()
|
darthlukan/lupdater
|
lupdater.py
|
Python
|
gpl-2.0
| 7,248
|
[
"Brian"
] |
862814445a12b9073ac402d6755905b11f4b0fdaa118284e83c015927d1078e2
|
#
# Import an entire NetCDF file into memory
#
from boututils import DataFile
def file_import(name):
f = DataFile(name) # Open file
varlist = f.list() # Get list of all variables in file
data = {} # Create empty dictionary
for v in varlist:
data[v] = f.read(v)
f.close()
return data
|
boutproject/BOUT-2.0
|
tools/pylib/boututils/file_import.py
|
Python
|
gpl-3.0
| 328
|
[
"NetCDF"
] |
d6b59940f8680fd5c372dc9fc301b315641852bb8d021aad84e4e418a0ac5616
|
import nose.tools
from unittest import TestCase
from nose import SkipTest
from nose.plugins.attrib import attr
import tempfile
from netCDF4 import Dataset
from ocgis import RequestDataset, OcgOperations
from ocgis.util.large_array import compute
from flyingpigeon.utils import local_path
from tests.common import prepare_env, TESTDATA
prepare_env()
def test_ocgis_import():
from ocgis import constants
def test_cdo_import():
from cdo import Cdo
cdo = Cdo()
class OCGISTestCase(TestCase):
@classmethod
def setUpClass(cls):
pass
def test_ocgis_inspect(self):
rd = RequestDataset(local_path(TESTDATA['cordex_tasmax_nc']), 'tasmax')
rd.inspect()
def test_ocgis_su_tasmax(self):
raise SkipTest
out_dir = tempfile.mkdtemp()
prefix = 'tasmax_su'
calc_icclim = [{'func':'icclim_SU','name':'SU'}]
rd = RequestDataset(local_path(TESTDATA['cordex_tasmax_nc']), "tasmax")
SU_file = OcgOperations(
dataset=rd,
calc=calc_icclim,
calc_grouping=['year'],
prefix=prefix,
output_format='nc',
dir_output=out_dir,
add_auxiliary_files=False).execute()
from os.path import join
result = Dataset(join(out_dir, prefix + '.' + self.output_format))
# SU variable must be in result
nose.tools.ok_('SU' in result.variables, result.variables.keys())
# 5 years
nose.tools.ok_(len(result.variables['time']) == 5, len(result.variables['time']))
def test_eur44_mon_with_compute(self):
raise SkipTest
rd = RequestDataset(local_path(TESTDATA['cordex_tasmax_nc']), variable="tasmax")
calc = [{'func':'icclim_SU','name':'SU'}]
calc_grouping = ['year']
out_dir = tempfile.mkdtemp()
# output must be netCDF. otherwise, all operations should be accepted. it could be fragile with some things, so
# me know if you encounter any issues.
ops = OcgOperations(
dataset=rd,
calc=calc,
calc_grouping=calc_grouping,
output_format='nc',
dir_output=out_dir)
# this is an estimate of the request size (in kilobytes) which could be useful.
print ops.get_base_request_size()['total']
# the tile dimension splits data into squares. edge effects are handled.
tile_dimension = 20
path_nc = compute(ops, tile_dimension, verbose=True)
|
sradanov/flyingpigeon
|
tests/test_ocgis.py
|
Python
|
apache-2.0
| 2,517
|
[
"NetCDF"
] |
e4ecb390eb0c6e5e33161ddb1a0e1c4713888019589998ecacc2d23ace4e7bcc
|
'''
synbiochem (c) University of Manchester 2015
synbiochem is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
class Vertex():
'''Class to represent a vertex.'''
def __init__(self, name, attributes):
self.__attributes = attributes
self.__attributes['name'] = name
self.__in = []
self.__out = []
def predecessors(self):
'''Get predecessors.'''
return self.__in
def attributes(self):
'''Get attributes.'''
return self.__attributes
def add_out(self, vertex_to, attributes):
'''Add edge out.'''
self.__out.append((vertex_to, attributes))
def add_in(self, vertex_from, attributes):
'''Add edge in.'''
self.__in.append((vertex_from, attributes))
def indegree(self):
'''Get indegree.'''
return len(self.__in)
def is_root(self):
'''Is the Vertex a root?'''
return not self.__out
def __repr__(self):
return self.__attributes['name']
class Graph():
'''Class to represent a graph.'''
def __init__(self):
self.__vertices = {}
def find_vertex(self, name):
'''Find vertex.'''
if name in self.__vertices:
return self.__vertices[name]
raise ValueError(name)
def add_vertex(self, name, attributes):
'''Add vertex.'''
self.__vertices[name] = Vertex(name, attributes)
def add_edge(self, vertex_from, vertex_to, attributes):
'''Add edge.'''
vertex_from.add_out(vertex_to, attributes)
vertex_to.add_in(vertex_from, attributes)
def get_roots(self):
'''Get roots.'''
return [vtx for vtx in self.__vertices.values() if vtx.is_root()]
def add_vertex(graph, name, attributes=None):
'''Add vertex.'''
try:
return graph.find_vertex(name)
except ValueError:
if not attributes:
attributes = {}
graph.add_vertex(name, attributes)
return graph.find_vertex(name)
def add_edge(graph, vertex_from, vertex_to, attributes=None):
'''Add edge.'''
if not attributes:
attributes = {}
graph.add_edge(vertex_from, vertex_to, attributes)
def get_roots(graph):
'''Get roots.'''
return graph.get_roots()
|
synbiochem/synbiochem-py
|
synbiochem/utils/graph_utils.py
|
Python
|
mit
| 2,356
|
[
"VisIt"
] |
ff0c3a6be20aa36ef8a9102e4e49b0407589d15777c497dacb73db3f8c4c44a4
|
"""
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Trevor Stephens <trev.stephens@gmail.com>
# Li Li <aiki.nogard@gmail.com>
# License: BSD 3 clause
from numbers import Integral
import numpy as np
from ..externals import six
from ..utils.validation import check_is_fitted
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
class Sentinel(object):
def __repr__(self):
return '"tree.dot"'
SENTINEL = Sentinel()
def export_graphviz(decision_tree, out_file=None, max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False, precision=3):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree regressor or classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default=None)
Handle or name of the output file. If ``None``, the result is
returned as a string.
.. versionchanged:: 0.20
Default of out_file changed from "tree.dot" to None.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
precision : int, optional (default=3)
Number of digits of precision for floating point in the values of
impurity, threshold and value attributes of each node.
Returns
-------
dot_data : string
String representation of the input tree in GraphViz dot format.
Only returned if ``out_file`` is None.
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0
else:
alpha = int(np.round(255 * (sorted_values[0] -
sorted_values[1]) /
(1 - sorted_values[1]), 0))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(np.round(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] -
colors['bounds'][0])), 0))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id],
precision),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], precision)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, precision)
elif proportion:
# Classification
value_text = np.around(value, precision)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, precision)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif (tree.n_classes[0] == 1 and
len(np.unique(tree.value)) != 1):
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
check_is_fitted(decision_tree, 'tree_')
own_file = False
return_string = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
if out_file is None:
return_string = True
out_file = six.StringIO()
if isinstance(precision, Integral):
if precision < 0:
raise ValueError("'precision' should be greater or equal to 0."
" Got {} instead.".format(precision))
else:
raise ValueError("'precision' should be an integer. Got {}"
" instead.".format(type(precision)))
# Check length of feature_names before getting into the tree node
# Raise error if length of feature_names does not match
# n_features_ in the decision_tree
if feature_names is not None:
if len(feature_names) != decision_tree.n_features_:
raise ValueError("Length of feature_names, %d "
"does not match number of features, %d"
% (len(feature_names),
decision_tree.n_features_))
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
if return_string:
return out_file.getvalue()
finally:
if own_file:
out_file.close()
|
vortex-ape/scikit-learn
|
sklearn/tree/export.py
|
Python
|
bsd-3-clause
| 17,978
|
[
"Brian"
] |
48b6a7dc2985c30e6c131bfbee35679e45c0d319235899ab3a6effce248be344
|
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# Stuff for handling Servers
#
# system modules
import string
import sys
from spacewalk.common import rhnFlags
from spacewalk.common.rhnConfig import CFG
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnException import rhnFault, rhnException
from spacewalk.common.rhnTranslate import _
from spacewalk.server import rhnChannel, rhnUser, rhnSQL, rhnLib, rhnAction, \
rhnVirtualization
from search_notify import SearchNotify
# Local Modules
import server_kickstart
import server_lib
import server_token
from server_certificate import Certificate, gen_secret
from server_wrapper import ServerWrapper
class Server(ServerWrapper):
""" Main Server class """
def __init__(self, user, arch=None, org_id=None):
ServerWrapper.__init__(self)
self.user = user
# Use the handy TableRow
self.server = rhnSQL.Row("rhnServer", "id")
self.server["release"] = ""
self.server["os"] = "Red Hat Linux"
self.is_rpm_managed = 0
self.set_arch(arch)
# We only get this passed in when we create a new
# entry. Usually a reload will create a dummy entry first and
# then call self.loadcert()
if user:
self.server["org_id"] = user.customer["id"]
elif org_id:
self.server["org_id"] = org_id
self.cert = None
# Also, at this point we know that this is a real server
self.type = "REAL"
self.default_description()
# custom info values
self.custom_info = None
# uuid
self.uuid = None
self.registration_number = None
_query_lookup_arch = rhnSQL.Statement("""
select sa.id,
case when at.label = 'rpm' then 1 else 0 end is_rpm_managed
from rhnServerArch sa,
rhnArchType at
where sa.label = :archname
and sa.arch_type_id = at.id
""")
def set_arch(self, arch):
self.archname = arch
# try to detect the archid
if arch is None:
return
arch = rhnLib.normalize_server_arch(arch)
h = rhnSQL.prepare(self._query_lookup_arch)
h.execute(archname=arch)
data = h.fetchone_dict()
if not data:
# Log it to disk, it may show interesting things
log_error("Attempt to create server with invalid arch `%s'" %
arch)
raise rhnFault(24,
_("Architecture `%s' is not supported") % arch)
self.server["server_arch_id"] = data["id"]
self.is_rpm_managed = data['is_rpm_managed']
# set the default description...
def default_description(self):
self.server["description"] = "Initial Registration Parameters:\n"\
"OS: %s\n"\
"Release: %s\n"\
"CPU Arch: %s" % (
self.server["os"], self.server["release"],
self.archname)
def __repr__(self):
# misa: looks like id can return negative numbers, so use %d
# instead of %x
# For the gory details,
# http://mail.python.org/pipermail/python-dev/2005-February/051559.html
return "<Server Class at %d: %s>\n" % (
id(self), {
"self.cert": self.cert,
"self.server": self.server.data,
})
__str__ = __repr__
# Return a Digital Certificate that can be placed in a file on the
# client side.
def system_id(self):
log_debug(3, self.server, self.cert)
if self.cert is None:
# need to instantiate it
cert = Certificate()
cert["system_id"] = self.server["digital_server_id"]
cert["os_release"] = self.server["release"]
cert["operating_system"] = self.server["os"]
cert["architecture"] = self.archname
cert["profile_name"] = self.server["name"]
cert["description"] = self.server["description"]
if self.user:
cert["username"] = self.user.contact["login"]
cert["type"] = self.type
cert.set_secret(self.server["secret"])
self.cert = cert
return self.cert.certificate()
# return the id of this system
def getid(self):
if not self.server.has_key("id"):
sysid = rhnSQL.Sequence("rhn_server_id_seq")()
self.server["digital_server_id"] = "ID-%09d" % sysid
# we can't reset the id column, so we need to poke into
# internals. kind of illegal, but it works...
self.server.data["id"] = (sysid, 0)
else:
sysid = self.server["id"]
return sysid
# change the base channel of a server
def change_base_channel(self, new_rel):
log_debug(3, self.server["id"], new_rel)
old_rel = self.server["release"]
current_channels = rhnChannel.channels_for_server(self.server["id"])
# Extract the base channel off of
old_base = filter(lambda x: not x['parent_channel'],
current_channels)
# Quick sanity check
base_channels_count = len(old_base)
if base_channels_count == 1:
old_base = old_base[0]
elif base_channels_count == 0:
old_base = None
else:
raise rhnException("Server %s subscribed to multiple base channels"
% (self.server["id"], ))
# bz 442355
# Leave custom base channels alone, don't alter any of the channel subscriptions
if not CFG.RESET_BASE_CHANNEL and old_base and rhnChannel.isCustomChannel(old_base["id"]):
log_debug(3,
"Custom base channel detected, will not alter channel subscriptions")
self.server["release"] = new_rel
self.server.save()
msg = """The Red Hat Satellite Update Agent has detected a
change in the base version of the operating system running
on your system, additionaly you are subscribed to a custom
channel as your base channel. Due to this configuration
your channel subscriptions will not be altered.
"""
self.add_history("Updated system release from %s to %s" % (
old_rel, new_rel), msg)
self.save_history_byid(self.server["id"])
return 1
s = rhnChannel.LiteServer().init_from_server(self)
s.release = new_rel
s.arch = self.archname
# Let get_server_channels deal with the errors and raise rhnFault
target_channels = rhnChannel.guess_channels_for_server(s, none_ok=True)
if target_channels:
target_base = filter(lambda x: not x['parent_channel'],
target_channels)[0]
else:
target_base = None
channels_to_subscribe = []
channels_to_unsubscribe = []
if old_base and target_base and old_base['id'] == target_base['id']:
# Same base channel. Preserve the currently subscribed child
# channels, just add the ones that are missing
hash = {}
for c in current_channels:
hash[c['id']] = c
for c in target_channels:
channel_id = c['id']
if hash.has_key(channel_id):
# Already subscribed to this one
del hash[channel_id]
continue
# Have to subscribe to this one
channels_to_subscribe.append(c)
# We don't want to lose subscriptions to prior channels, so don't
# do anything with hash.values()
else:
# Different base channel
channels_to_unsubscribe = current_channels
channels_to_subscribe = target_channels
rhnSQL.transaction("change_base_channel")
self.server["release"] = new_rel
self.server.save()
if not (channels_to_subscribe or channels_to_unsubscribe):
# Nothing to do, just add the history entry
self.add_history("Updated system release from %s to %s" % (
old_rel, new_rel))
self.save_history_byid(self.server["id"])
return 1
# XXX: need a way to preserve existing subscriptions to
# families so we can restore access to non-public ones.
rhnChannel.unsubscribe_channels(self.server["id"],
channels_to_unsubscribe)
rhnChannel.subscribe_channels(self.server["id"],
channels_to_subscribe)
# now that we changed, recompute the errata cache for this one
rhnSQL.Procedure("queue_server")(self.server["id"])
# Make a history note
sub_channels = rhnChannel.channels_for_server(self.server["id"])
if sub_channels:
channel_list = map(lambda a: a["name"], sub_channels)
msg = """The Red Hat Satellite Update Agent has detected a
change in the base version of the operating system running
on your system and has updated your channel subscriptions
to reflect that.
Your server has been automatically subscribed to the following
channels:\n%s\n""" % (string.join(channel_list, "\n"),)
else:
msg = """*** ERROR: ***
While trying to subscribe this server to software channels:
There are no channels serving release %s""" % new_rel
self.add_history("Updated system release from %s to %s" % (
old_rel, new_rel), msg)
self.save_history_byid(self.server["id"])
return 1
def take_snapshot(self, reason):
return server_lib.snapshot_server(self.server['id'], reason)
# returns true iff the base channel assigned to this system
# has been end-of-life'd
def base_channel_is_eol(self):
h = rhnSQL.prepare("""
select 1
from rhnChannel c, rhnServerChannel sc
where sc.server_id = :server_id
and sc.channel_id = c.id
and c.parent_channel IS NULL
and sysdate - c.end_of_life > 0
""")
h.execute(server_id=self.getid())
ret = h.fetchone_dict()
if ret:
return 1
return None
_query_server_custom_info = rhnSQL.Statement("""
select cdk.label,
scdv.value
from rhnCustomDataKey cdk,
rhnServerCustomDataValue scdv
where scdv.server_id = :server_id
and scdv.key_id = cdk.id
""")
def load_custom_info(self):
self.custom_info = {}
h = rhnSQL.prepare(self._query_server_custom_info)
h.execute(server_id=self.getid())
rows = h.fetchall_dict()
if not rows:
log_debug(4, "no custom info values")
return
for row in rows:
self.custom_info[row['label']] = row['value']
# load additional server information from the token definition
def load_token(self):
# Fetch token
tokens_obj = rhnFlags.get("registration_token")
if not tokens_obj:
# No tokens present
return 0
# make sure we have reserved a server_id. most likely if this
# is a new server object (just created from
# registration.new_system) then we have no associated a
# server["id"] yet -- and getid() will reserve that for us.
self.getid()
# pull in the extra information needed to fill in the
# required registration fields using tokens
user_id = tokens_obj.get_user_id()
org_id = tokens_obj.get_org_id()
self.user = rhnUser.User("", "")
if user_id is not None:
self.user.reload(user_id)
self.server["creator_id"] = user_id
self.server["org_id"] = org_id
return 0
# perform the actions required by the token (subscribing to
# channels, server groups, etc)
def use_token(self):
# Fetch token
tokens_obj = rhnFlags.get("registration_token")
if not tokens_obj:
# No token present
return 0
is_rereg_token = tokens_obj.is_rereg_token
# We get back a history of what is being done in the
# registration process
history = server_token.process_token(self.server, self.archname,
tokens_obj, self.virt_type)
if is_rereg_token:
event_name = "Reactivation via Token"
event_text = "System reactivated"
else:
event_name = "Subscription via Token"
event_text = "System created"
token_name = tokens_obj.get_names()
# now record that history nicely
self.add_history(event_name,
"%s with token <strong>%s</strong><br />\n%s" %
(event_text, token_name, history))
self.save_history_byid(self.server["id"])
# 6/23/05 wregglej 157262, use get_kickstart session_id() to see if we're in the middle of a kickstart.
ks_id = tokens_obj.get_kickstart_session_id()
# 4/5/05 wregglej, Added for bugzilla: 149932. Actions need to be flushed on reregistration.
# 6/23/05 wregglej 157262, don't call flush_actions() if we're in the middle of a kickstart.
# It would cause all of the remaining kickstart actions to get flushed, which is bad.
if is_rereg_token and ks_id is None:
self.flush_actions()
# XXX: will need to call self.save() later to commit all that
return 0
def disable_token(self):
tokens_obj = rhnFlags.get('registration_token')
if not tokens_obj:
# Nothing to do
return
if not tokens_obj.is_rereg_token:
# Not a re-registration token - nothing to do
return
# Re-registration token - we know for sure there is only one
token_server_id = tokens_obj.get_server_id()
if token_server_id != self.getid():
# Token is not associated with this server (it may actually not be
# associated with any server)
return
server_token.disable_token(tokens_obj)
# save() will commit this
# Auto-entitlement: attempt to entitle this server to the highest
# entitlement that is available
def autoentitle(self):
entitlement_hierarchy = ['enterprise_entitled']
any_base_entitlements = 0
for entitlement in entitlement_hierarchy:
try:
self._entitle(entitlement)
any_base_entitlements = 1
except rhnSQL.SQLSchemaError, e:
if e.errno == 20287:
# ORA-20287: (invalid_entitlement) - The server can not be
# entitled to the specified level
#
# ignore for now, since any_base_entitlements will throw
# an error at the end if not set
continue
# Should not normally happen
log_error("Failed to entitle", self.server["id"], entitlement,
e.errmsg)
raise server_lib.rhnSystemEntitlementException("Unable to entitle"), None, sys.exc_info()[2]
except rhnSQL.SQLError, e:
log_error("Failed to entitle", self.server["id"], entitlement,
str(e))
raise server_lib.rhnSystemEntitlementException("Unable to entitle"), None, sys.exc_info()[2]
else:
if any_base_entitlements:
# All is fine
return
else:
raise server_lib.rhnNoSystemEntitlementsException, None, sys.exc_info()[2]
def _entitle(self, entitlement):
system_entitlements = server_lib.check_entitlement(self.server["id"])
system_entitlements = system_entitlements.keys()
if entitlement not in system_entitlements:
entitle_server = rhnSQL.Procedure("rhn_entitlements.entitle_server")
entitle_server(self.server['id'], entitlement)
def create_perm_cache(self):
log_debug(4)
create_perms = rhnSQL.Procedure("rhn_cache.update_perms_for_server")
create_perms(self.server['id'])
def gen_secret(self):
# Running this invalidates the cert
self.cert = None
self.server["secret"] = gen_secret()
_query_update_uuid = rhnSQL.Statement("""
update rhnServerUuid set uuid = :uuid
where server_id = :server_id
""")
_query_insert_uuid = rhnSQL.Statement("""
insert into rhnServerUuid (server_id, uuid)
values (:server_id, :uuid)
""")
def update_uuid(self, uuid, commit=1):
log_debug(3, uuid)
# XXX Should determine a way to do this dinamically
uuid_col_length = 36
if uuid is not None:
uuid = str(uuid)
if not uuid:
log_debug('Nothing to do')
return
uuid = uuid[:uuid_col_length]
server_id = self.server['id']
log_debug(4, "Trimmed uuid", uuid, server_id)
# Update this server's UUID (unique client identifier)
h = rhnSQL.prepare(self._query_update_uuid)
ret = h.execute(server_id=server_id, uuid=uuid)
log_debug(4, "execute returned", ret)
if ret != 1:
# Row does not exist, have to create it
h = rhnSQL.prepare(self._query_insert_uuid)
h.execute(server_id=server_id, uuid=uuid)
if commit:
rhnSQL.commit()
# Save this record in the database
def __save(self, channel):
if self.server.real:
server_id = self.server["id"]
self.server.save()
else: # create new entry
self.gen_secret()
server_id = self.getid()
org_id = self.server["org_id"]
if self.user:
user_id = self.user.getid()
else:
user_id = None
# some more default values
self.server["auto_update"] = "N"
if self.user and not self.server.has_key("creator_id"):
# save the link to the user that created it if we have
# that information
self.server["creator_id"] = self.user.getid()
# and create the server entry
self.server.create(server_id)
server_lib.create_server_setup(server_id, org_id)
have_reg_token = rhnFlags.test("registration_token")
# if we're using a token, then the following channel
# subscription request can allow no matches since the
# token code will fix up or fail miserably later.
# subscribe the server to applicable channels
# bretm 02/17/2007 -- TODO: refactor activation key codepaths
# to allow us to not have to pass in none_ok=1 in any case
#
# This can now throw exceptions which will be caught at a higher level
if channel is not None:
channel_info = dict(rhnChannel.channel_info(channel))
log_debug(4, "eus channel id %s" % str(channel_info))
rhnChannel.subscribe_sql(server_id, channel_info['id'])
else:
rhnChannel.subscribe_server_channels(self,
none_ok=have_reg_token,
user_id=user_id)
if not have_reg_token:
# Attempt to auto-entitle, can throw the following exceptions:
# rhnSystemEntitlementException
# rhnNoSystemEntitlementsException
self.autoentitle()
# If a new server that was registered by an user (i.e. not
# with a registration token), look for this user's default
# groups
self.join_groups()
server_lib.join_rhn(org_id)
# Handle virtualization specific bits
if self.virt_uuid is not None and \
self.virt_type is not None:
rhnVirtualization._notify_guest(self.getid(),
self.virt_uuid, self.virt_type)
# Update the uuid - but don't commit yet
self.update_uuid(self.uuid, commit=0)
self.create_perm_cache()
# And save the extra profile data...
self.save_packages_byid(server_id, schedule=1)
self.save_hardware_byid(server_id)
self.save_history_byid(server_id)
return 0
# This is a wrapper for the above class that allows us to rollback
# any changes in case we don't succeed completely
def save(self, commit=1, channel=None):
log_debug(3)
# attempt to preserve pending changes before we were called,
# so we set up our own transaction checkpoint
rhnSQL.transaction("save_server")
try:
self.__save(channel)
except: # roll back to what we have before and raise again
rhnSQL.rollback("save_server")
# shoot the exception up the chain
raise
else: # if we want to commit, commit all pending changes
if commit:
rhnSQL.commit()
try:
search = SearchNotify()
search.notify()
except Exception, e:
log_error("Exception caught from SearchNotify.notify().", e)
return 0
# Reload the current configuration from database using a server id.
def reload(self, server, reload_all=0):
log_debug(4, server, "reload_all = %d" % reload_all)
if not self.server.load(int(server)):
log_error("Could not find server record for reload", server)
raise rhnFault(29, "Could not find server record in the database")
self.cert = None
# it is lame that we have to do this
h = rhnSQL.prepare("""
select label from rhnServerArch where id = :archid
""")
h.execute(archid=self.server["server_arch_id"])
data = h.fetchone_dict()
if not data:
raise rhnException("Found server with invalid numeric "
"architecture reference",
self.server.data)
self.archname = data['label']
# we don't know this one anymore (well, we could look for, but
# why would we do that?)
self.user = None
# XXX: Fix me
if reload_all:
if not self.reload_packages_byid(self.server["id"]) == 0:
return -1
if not self.reload_hardware_byid(self.server["id"]) == 0:
return -1
return 0
# Use the values we find in the cert to cause a reload of this
# server from the database.
def loadcert(self, cert, load_user=1):
log_debug(4, cert)
# certificate is presumed to be already verified
if not isinstance(cert, Certificate):
return -1
# reload the whole thing based on the cert data
server = cert["system_id"]
row = server_lib.getServerID(server)
if row is None:
return -1
sid = row["id"]
# standard reload based on an ID
ret = self.reload(sid)
if not ret == 0:
return ret
# the reload() will never be able to fill in the username. It
# would require from the database standpoint insuring that for
# a given server we can have only one owner at any given time.
# cert includes it and it's valid because it has been verified
# through checksuming before we got here
self.user = None
# Load the user if at all possible. If it's not possible,
# self.user will be None, which should be a handled case wherever
# self.user is used.
if load_user:
# Load up the username associated with this profile
self.user = rhnUser.search(cert["username"])
# 4/27/05 wregglej - Commented out this block because it was causing problems
# with rhn_check/up2date when the user that registered the system was deleted.
# if not self.user:
# log_error("Invalid username for server id",
# cert["username"], server, cert["profile_name"])
# raise rhnFault(9, "Invalid username '%s' for server id %s" %(
# cert["username"], server))
# XXX: make sure that the database thinks that the server
# registrnt is the same as this certificate thinks. The
# certificate passed checksum checks, but it never hurts to be
# too careful now with satellites and all.
return 0
# Is this server entitled?
def check_entitlement(self):
if not self.server.has_key("id"):
return None
log_debug(3, self.server["id"])
return server_lib.check_entitlement(self.server['id'])
def checkin(self, commit=1):
""" convenient wrapper for these thing until we clean the code up """
if not self.server.has_key("id"):
return 0 # meaningless if rhnFault not raised
return server_lib.checkin(self.server["id"], commit)
def throttle(self):
""" convenient wrapper for these thing until we clean the code up """
if not self.server.has_key("id"):
return 1 # meaningless if rhnFault not raised
return server_lib.throttle(self.server)
def set_qos(self):
""" convenient wrapper for these thing until we clean the code up """
if not self.server.has_key("id"):
return 1 # meaningless if rhnFault not raised
return server_lib.set_qos(self.server["id"])
def join_groups(self):
""" For a new server, join server groups """
# Sanity check - we should always have a user
if not self.user:
raise rhnException("User not specified")
server_id = self.getid()
user_id = self.user.getid()
h = rhnSQL.prepare("""
select system_group_id
from rhnUserDefaultSystemGroups
where user_id = :user_id
""")
h.execute(user_id=user_id)
while 1:
row = h.fetchone_dict()
if not row:
break
server_group_id = row['system_group_id']
log_debug(5, "Subscribing server to group %s" % server_group_id)
server_lib.join_server_group(server_id, server_group_id)
def fetch_registration_message(self):
return rhnChannel.system_reg_message(self)
def process_kickstart_info(self):
log_debug(4)
tokens_obj = rhnFlags.get("registration_token")
if not tokens_obj:
log_debug(4, "no registration token found")
# Nothing to do here
return
# If there are kickstart sessions associated with this system (other
# than, possibly, the current one), mark them as failed
history = server_kickstart.terminate_kickstart_sessions(self.getid())
for k, v in history:
self.add_history(k, v)
kickstart_session_id = tokens_obj.get_kickstart_session_id()
if kickstart_session_id is None:
log_debug(4, "No kickstart_session_id associated with token %s (%s)"
% (tokens_obj.get_names(), tokens_obj.tokens))
# Nothing to do here
return
# Flush server actions
self.flush_actions()
server_id = self.getid()
action_id = server_kickstart.schedule_kickstart_sync(server_id,
kickstart_session_id)
server_kickstart.subscribe_to_tools_channel(server_id,
kickstart_session_id)
server_kickstart.schedule_virt_pkg_install(server_id,
kickstart_session_id)
# Update the next action to the newly inserted one
server_kickstart.update_ks_session_table(kickstart_session_id,
'registered', action_id, server_id)
def flush_actions(self):
server_id = self.getid()
h = rhnSQL.prepare("""
select action_id
from rhnServerAction
where server_id = :server_id
and status in (0, 1) -- Queued or Picked Up
""")
h.execute(server_id=server_id)
while 1:
row = h.fetchone_dict()
if not row:
break
action_id = row['action_id']
rhnAction.update_server_action(server_id=server_id,
action_id=action_id, status=3, result_code=-100,
result_message="Action canceled: system kickstarted or reregistered") # 4/6/05 wregglej, added the "or reregistered" part.
def server_locked(self):
""" Returns true is the server is locked (for actions that are blocked) """
server_id = self.getid()
h = rhnSQL.prepare("""
select 1
from rhnServerLock
where server_id = :server_id
""")
h.execute(server_id=server_id)
row = h.fetchone_dict()
if row:
return 1
return 0
def register_push_client(self):
""" insert or update rhnPushClient for this server_id """
server_id = self.getid()
ret = server_lib.update_push_client_registration(server_id)
return ret
def register_push_client_jid(self, jid):
""" update the JID in the corresponing entry from rhnPushClient """
server_id = self.getid()
ret = server_lib.update_push_client_jid(server_id, jid)
return ret
|
xkollar/spacewalk
|
backend/server/rhnServer/server_class.py
|
Python
|
gpl-2.0
| 30,794
|
[
"CDK"
] |
1ca20f2f8f895db1811f4fef931c729ba4be2c5c09ae1b2cf2b366fe329885c6
|
#!/usr/bin/env python
import vtk
def main():
# Create 5 points (vtkPolyData)
pointSource = vtk.vtkPointSource()
pointSource.SetNumberOfPoints(5)
pointSource.Update()
polydata = pointSource.GetOutput()
print "points in polydata are",polydata.GetNumberOfPoints()
# Create 2 points in a vtkUnstructuredGrid
points = vtk.vtkPoints()
points.InsertNextPoint(0,0,0)
points.InsertNextPoint(0,0,1)
ug = vtk.vtkUnstructuredGrid()
ug.SetPoints(points)
print "points in unstructured grid are",ug.GetNumberOfPoints()
# Combine the two data sets
appendFilter = vtk.vtkAppendFilter()
appendFilter.AddInputData(polydata)
appendFilter.AddInputData(ug)
appendFilter.Update()
combined = vtk.vtkUnstructuredGrid()
combined = appendFilter.GetOutput()
print "Combined points are", combined.GetNumberOfPoints()
# Create a mapper and actor
colors = vtk.vtkNamedColors()
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(appendFilter.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetPointSize(5)
# Create a renderer, render window, and interactor
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
# Add the actor to the scene
renderer.AddActor(actor)
renderer.SetBackground(colors.GetColor3d("SlateGray"))
# Render and interact
renderWindow.Render()
renderWindowInteractor.Start()
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/Filtering/AppendFilter.py
|
Python
|
apache-2.0
| 1,730
|
[
"VTK"
] |
ead1332d8a391d3e0710026ea95713ac5117797ddaf8dc3ebdfeef4d6a06107a
|
# -*- coding: utf-8 -*-
#
# Zesty Technology documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 25 13:45:35 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zesty Technology'
copyright = u'2017, Zesty Technology'
author = u'Brian Gilbert & Lykle Schepers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ZestyTechnologydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ZestyTechnology.tex', u'Zesty Technology Documentation',
u'Brian Gilbert', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'zestytechnology', u'Zesty Technology Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ZestyTechnology', u'Zesty Technology Documentation',
author, 'ZestyTechnology', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
|
ZestyTechnology/zesty-docs
|
docs/conf.py
|
Python
|
agpl-3.0
| 5,385
|
[
"Brian"
] |
b2b5825d76b38b3c63709a0ebf2e141777d64d6b460252cdedd8ff9a65607b11
|
import pymongo
from pymongo import MongoClient
import uuid
import unittest
import pprint
from pymatgen.db.query_engine import QueryEngine, QueryResults
from pymatgen.db.tests import common
has_mongo = common.has_mongo()
class SandboxTest(unittest.TestCase):
SBX = "testing"
N = 100
def qtx(self, crit, props):
if props == None:
props = {}
crit["sbxd.e_above_hull"] = crit["e_above_hull"]
props["sbxd"] = {"$elemMatch": {"id": self.SBX}}
del crit["e_above_hull"]
def rtx(self, doc):
doc["add_fake_field"] = "test value"
for item in doc["sbxd"]:
if item["id"] == self.SBX:
doc["e_above_hull"] = item["e_above_hull"]
return doc
def setUp(self):
# Try a real mongodb
if has_mongo:
self.conn = pymongo.MongoClient()
self.db_name = "test"
self.db = self.conn[self.db_name]
self.coll_name = f"sandboxes_{uuid.uuid4()}"
self.coll = self.db[self.coll_name]
for i in range(self.N):
core_v, sbx_v = 0.1 * (i + 1), -0.1 * (i + 1)
doc = {
"task_id": f"mp-{1000 + i:d}",
"sbxd": [
{"id": "core", "e_above_hull": core_v},
{"id": self.SBX, "e_above_hull": sbx_v},
],
"sbxn": ["core", self.SBX],
}
doc.update({"state": "successful"})
if i < 2:
pprint.pprint(doc)
self.coll.insert_one(doc)
def tearDown(self):
if has_mongo:
self.db.drop_collection(self.coll_name)
@unittest.skipUnless(has_mongo, "requires MongoDB server")
def test_no_post_funcs(self):
qe = QueryEngine(
connection=self.conn,
database=self.db_name,
collection=self.coll_name,
aliases={},
query_post=[],
result_post=[],
)
cursor = qe.query()
self.assertTrue(isinstance(cursor, QueryResults))
n = 0
for rec in cursor:
pprint.pprint(f"RESULT: {rec}")
# No Post proccessing should be done
self.assertTrue("e_above_hull" not in rec)
self.assertTrue("add_fake_field" not in rec)
self.assertTrue("sbxd" in rec)
n += 1
# should find all tasks
self.assertEqual(n, self.N)
@unittest.skipUnless(has_mongo, "requires MongoDB server")
def test_mongo_find(self):
#
qe = QueryEngine(
connection=self.conn,
database=self.db_name,
collection=self.coll_name,
aliases={},
query_post=[self.qtx],
result_post=[self.rtx],
)
result = self._test_find(qe, criteria={"e_above_hull": {"$lte": 0.0}}, properties={})
@unittest.skipUnless(has_mongo, "requires MongoDB server")
def test_with_properties(self):
#
qe = QueryEngine(
connection=self.conn,
database=self.db_name,
collection=self.coll_name,
aliases={},
query_post=[self.qtx],
result_post=[self.rtx],
)
result = self._test_find(
qe,
criteria={"e_above_hull": {"$lte": 0.0}},
properties=["e_above_hull", "sbxd", "add_fake_field"],
)
def _test_find(self, qe, properties, criteria):
cursor = qe.query(properties=properties, criteria=criteria)
self.assertTrue(isinstance(cursor, QueryResults))
n = 0
for rec in cursor:
pprint.pprint(f"RESULT: {rec}")
self.assertTrue(rec["e_above_hull"] < 0)
self.assertEqual(rec["add_fake_field"], "test value")
n += 1
# should find all tasks
self.assertEqual(n, self.N)
@unittest.skipUnless(has_mongo, "requires MongoDB server")
def test_queryresult(self):
qe = QueryEngine(
connection=self.conn,
database=self.db_name,
collection=self.coll_name,
aliases={},
query_post=[self.qtx],
result_post=[self.rtx],
)
result = qe.query(criteria={"e_above_hull": {"$lte": 0.0}}).sort("sbxd.e_above_hull", pymongo.ASCENDING)
self.assertTrue(isinstance(result, QueryResults))
self.assertEqual(len(result), self.N)
self.assertTrue(result[0]["e_above_hull"] < 0)
if __name__ == "__main__":
unittest.main()
|
materialsproject/pymatgen-db
|
pymatgen/db/tests/test_postfuncs.py
|
Python
|
mit
| 4,608
|
[
"pymatgen"
] |
b3acd6c6f8be166954eb4894cb3e15ef12187ee97d68ac5fe853aecf276a706b
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# jobfeasible - Check current job feasibility for queued job
# Copyright (C) 2003-2011 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.jobfeasible import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
|
heromod/migrid
|
mig/cgi-bin/jobfeasible.py
|
Python
|
gpl-2.0
| 1,099
|
[
"Brian"
] |
ce35a7a0989ed6927e56f8e03fae2e2599b222c98af8a521b212974acc026943
|
"""Collaborative Password Database"""
__author__ = "Brian Wiborg <baccenfutter@c-base.org>"
__license__ = "GNU/GPLv2"
__version__ = "0.1.4-alpha"
|
baccenfutter/cpassdb
|
cpassdb/__init__.py
|
Python
|
gpl-2.0
| 146
|
[
"Brian"
] |
d24de4ec11a87c83e4eb5a43843e1f548931d241399ff281665248b2b2bb1305
|
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import os
import re
import string
from socket import inet_pton, AF_INET6, error as socket_error
from viper.common.abstracts import Module
from viper.common.objects import File
from viper.core.session import __sessions__
from viper.core.database import Database
from viper.core.storage import get_sample_path
DOMAIN_REGEX = re.compile('([a-z0-9][a-z0-9\-]{0,61}[a-z0-9]\.)+[a-z0-9][a-z0-9\-]*[a-z0-9]', re.IGNORECASE)
IPV4_REGEX = re.compile('[1-2]?[0-9]?[0-9]\.[1-2]?[0-9]?[0-9]\.[1-2]?[0-9]?[0-9]\.[1-2]?[0-9]?[0-9]')
IPV6_REGEX = re.compile('((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}'
'|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9'
'A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25['
'0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3'
'})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|['
'1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,'
'4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:'
'))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-'
'5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]'
'{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d'
'\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7}'
')|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d'
'\d|[1-9]?\d)){3}))|:)))(%.+)?', re.IGNORECASE | re.S)
PDB_REGEX = re.compile('\.pdb$', re.IGNORECASE)
URL_REGEX = re.compile('http(s){0,1}://', re.IGNORECASE)
GET_POST_REGEX = re.compile('(GET|POST) ')
HOST_REGEX = re.compile('Host: ')
USERAGENT_REGEX = re.compile('(Mozilla|curl|Wget|Opera)/.+\(.+\;.+\)', re.IGNORECASE)
EMAIL_REGEX = re.compile('[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,}', re.IGNORECASE)
REGKEY_REGEX = re.compile('(HKEY_CLASSES_ROOT|HKEY_CURRENT_USER|HKEY_LOCAL_MACHINE|HKEY_USERS|HKEY_CURRENT_CONFIG|HKCR|HKCU|HKLM|HKU|HKCC)(/|\x5c\x5c)', re.IGNORECASE)
REGKEY2_REGEX = re.compile('(CurrentVersion|Software\\Microsoft|Windows NT|Microsoft\\Interface)')
FILE_REGEX = re.compile('\w+\.(EXE|DLL|BAT|PS|INI|PIF|SCR|DOC|DOCX|DOCM|PPT|PPTX|PPTS|XLS|XLT|XLSX|XLTX|XLSM|XLTM|ZIP|RAR)$', re.U | re.IGNORECASE)
TLD = [
'AC', 'ACADEMY', 'ACTOR', 'AD', 'AE', 'AERO', 'AF', 'AG', 'AGENCY', 'AI', 'AL', 'AM', 'AN', 'AO', 'AQ', 'AR',
'ARPA', 'AS', 'ASIA', 'AT', 'AU', 'AW', 'AX', 'AZ', 'BA', 'BAR', 'BARGAINS', 'BB', 'BD', 'BE', 'BERLIN', 'BEST',
'BF', 'BG', 'BH', 'BI', 'BID', 'BIKE', 'BIZ', 'BJ', 'BLUE', 'BM', 'BN', 'BO', 'BOUTIQUE', 'BR', 'BS', 'BT',
'BUILD', 'BUILDERS', 'BUZZ', 'BV', 'BW', 'BY', 'BZ', 'CA', 'CAB', 'CAMERA', 'CAMP', 'CARDS', 'CAREERS', 'CAT',
'CATERING', 'CC', 'CD', 'CENTER', 'CEO', 'CF', 'CG', 'CH', 'CHEAP', 'CHRISTMAS', 'CI', 'CK', 'CL', 'CLEANING',
'CLOTHING', 'CLUB', 'CM', 'CN', 'CO', 'CODES', 'COFFEE', 'COM', 'COMMUNITY', 'COMPANY', 'COMPUTER', 'CONDOS',
'CONSTRUCTION', 'CONTRACTORS', 'COOL', 'COOP', 'CR', 'CRUISES', 'CU', 'CV', 'CW', 'CX', 'CY', 'CZ', 'DANCE',
'DATING', 'DE', 'DEMOCRAT', 'DIAMONDS', 'DIRECTORY', 'DJ', 'DK', 'DM', 'DNP', 'DO', 'DOMAINS', 'DZ', 'EC',
'EDU', 'EDUCATION', 'EE', 'EG', 'EMAIL', 'ENTERPRISES', 'EQUIPMENT', 'ER', 'ES', 'ESTATE', 'ET', 'EU', 'EVENTS',
'EXPERT', 'EXPOSED', 'FARM', 'FI', 'FISH', 'FJ', 'FK', 'FLIGHTS', 'FLORIST', 'FM', 'FO', 'FOUNDATION', 'FR',
'FUTBOL', 'GA', 'GALLERY', 'GB', 'GD', 'GE', 'GF', 'GG', 'GH', 'GI', 'GIFT', 'GL', 'GLASS', 'GM', 'GN', 'GOV',
'GP', 'GQ', 'GR', 'GRAPHICS', 'GS', 'GT', 'GU', 'GUITARS', 'GURU', 'GW', 'GY', 'HK', 'HM', 'HN', 'HOLDINGS',
'HOLIDAY', 'HOUSE', 'HR', 'HT', 'HU', 'ID', 'IE', 'IL', 'IM', 'IMMOBILIEN', 'IN', 'INDUSTRIES', 'INFO', 'INK',
'INSTITUTE', 'INT', 'INTERNATIONAL', 'IO', 'IQ', 'IR', 'IS', 'IT', 'JE', 'JM', 'JO', 'JOBS', 'JP', 'KAUFEN',
'KE', 'KG', 'KH', 'KI', 'KIM', 'KITCHEN', 'KIWI', 'KM', 'KN', 'KOELN', 'KP', 'KR', 'KRED', 'KW', 'KY', 'KZ',
'LA', 'LAND', 'LB', 'LC', 'LI', 'LIGHTING', 'LIMO', 'LINK', 'LK', 'LR', 'LS', 'LT', 'LU', 'LUXURY', 'LV', 'LY',
'MA', 'MAISON', 'MANAGEMENT', 'MANGO', 'MARKETING', 'MC', 'MD', 'ME', 'MENU', 'MG', 'MH', 'MIL', 'MK', 'ML',
'MM', 'MN', 'MO', 'MOBI', 'MODA', 'MONASH', 'MP', 'MQ', 'MR', 'MS', 'MT', 'MU', 'MUSEUM', 'MV', 'MW', 'MX',
'MY', 'MZ', 'NA', 'NAGOYA', 'NAME', 'NC', 'NE', 'NET', 'NEUSTAR', 'NF', 'NG', 'NI', 'NINJA', 'NL', 'NO', 'NP',
'NR', 'NU', 'NZ', 'OKINAWA', 'OM', 'ONION', 'ONL', 'ORG', 'PA', 'PARTNERS', 'PARTS', 'PE', 'PF', 'PG', 'PH',
'PHOTO', 'PHOTOGRAPHY', 'PHOTOS', 'PICS', 'PINK', 'PK', 'PL', 'PLUMBING', 'PM', 'PN', 'POST', 'PR', 'PRO',
'PRODUCTIONS', 'PROPERTIES', 'PS', 'PT', 'PUB', 'PW', 'PY', 'QA', 'QPON', 'RE', 'RECIPES', 'RED', 'RENTALS',
'REPAIR', 'REPORT', 'REVIEWS', 'RICH', 'RO', 'RS', 'RU', 'RUHR', 'RW', 'SA', 'SB', 'SC', 'SD', 'SE', 'SEXY',
'SG', 'SH', 'SHIKSHA', 'SHOES', 'SI', 'SINGLES', 'SJ', 'SK', 'SL', 'SM', 'SN', 'SO', 'SOCIAL', 'SOLAR',
'SOLUTIONS', 'SR', 'ST', 'SU', 'SUPPLIES', 'SUPPLY', 'SUPPORT', 'SV', 'SX', 'SY', 'SYSTEMS', 'SZ', 'TATTOO',
'TC', 'TD', 'TECHNOLOGY', 'TEL', 'TF', 'TG', 'TH', 'TIENDA', 'TIPS', 'TJ', 'TK', 'TL', 'TM', 'TN', 'TO',
'TODAY', 'TOKYO', 'TOOLS', 'TP', 'TR', 'TRAINING', 'TRAVEL', 'TT', 'TV', 'TW', 'TZ', 'UA', 'UG', 'UK', 'UNO',
'US', 'UY', 'UZ', 'VA', 'VACATIONS', 'VC', 'VE', 'VENTURES', 'VG', 'VI', 'VIAJES', 'VILLAS', 'VISION', 'VN',
'VOTE', 'VOTING', 'VOTO', 'VOYAGE', 'VU', 'WANG', 'WATCH', 'WED', 'WF', 'WIEN', 'WIKI', 'WORKS', 'WS',
'XN--3BST00M', 'XN--3DS443G', 'XN--3E0B707E', 'XN--45BRJ9C', 'XN--55QW42G', 'XN--55QX5D', 'XN--6FRZ82G',
'XN--6QQ986B3XL', 'XN--80AO21A', 'XN--80ASEHDB', 'XN--80ASWG', 'XN--90A3AC', 'XN--C1AVG', 'XN--CG4BKI',
'XN--CLCHC0EA0B2G2A9GCD', 'XN--D1ACJ3B', 'XN--FIQ228C5HS', 'XN--FIQ64B', 'XN--FIQS8S', 'XN--FIQZ9S',
'XN--FPCRJ9C3D', 'XN--FZC2C9E2C', 'XN--GECRJ9C', 'XN--H2BRJ9C', 'XN--I1B6B1A6A2E', 'XN--IO0A7I', 'XN--J1AMH',
'XN--J6W193G', 'XN--KPRW13D', 'XN--KPRY57D', 'XN--L1ACC', 'XN--LGBBAT1AD8J', 'XN--MGB9AWBF', 'XN--MGBA3A4F16A',
'XN--MGBAAM7A8H', 'XN--MGBAB2BD', 'XN--MGBAYH7GPA', 'XN--MGBBH1A71E', 'XN--MGBC0A9AZCG', 'XN--MGBERP4A5D4AR',
'XN--MGBX4CD0AB', 'XN--NGBC5AZD', 'XN--NQV7F', 'XN--NQV7FS00EMA', 'XN--O3CW4H', 'XN--OGBPF8FL', 'XN--P1AI',
'XN--PGBS0DH', 'XN--Q9JYB4C', 'XN--RHQV96G', 'XN--S9BRJ9C', 'XN--UNUP4Y', 'XN--WGBH1C', 'XN--WGBL6A',
'XN--XKC2AL3HYE2A', 'XN--XKC2DL3A5EE0H', 'XN--YFRO4I67O', 'XN--YGBI2AMMX', 'XN--ZFR164B', 'XXX', 'XYZ', 'YE',
'YT', 'ZA', 'ZM', 'ZONE', 'ZW']
class Strings(Module):
cmd = 'strings'
description = 'Extract strings from file'
authors = ['nex', 'Brian Wallace', 'Christophe Vandeplas']
def __init__(self):
super(Strings, self).__init__()
self.parser.add_argument('-a', '--all', action='store_true', help='Print all strings')
self.parser.add_argument('-F', '--files', action='store_true', help='Extract filenames from strings')
self.parser.add_argument('-H', '--hosts', action='store_true', help='Extract IP addresses and domains from strings')
self.parser.add_argument('-N', '--network', action='store_true', help='Extract various network related strings')
self.parser.add_argument('-I', '--interesting', action='store_true', help='Extract various interesting strings')
self.parser.add_argument('-s', '--scan', action='store_true', help='Scan all files in the project with all the scanners')
def extract_hosts(self, strings):
results = []
for entry in strings:
to_add = False
if IPV4_REGEX.search(entry):
to_add = True
elif IPV6_REGEX.search(entry):
try:
inet_pton(AF_INET6, entry)
except socket_error:
continue
else:
to_add = True
elif DOMAIN_REGEX.search(entry):
if entry[entry.rfind('.') + 1:].upper() in TLD:
to_add = True
if to_add:
if entry not in results:
results.append(entry)
return results
def extract_network(self, strings):
results = []
for entry in strings:
to_add = False
if URL_REGEX.search(entry):
to_add = True
if GET_POST_REGEX.search(entry):
to_add = True
if HOST_REGEX.search(entry):
to_add = True
if USERAGENT_REGEX.search(entry):
to_add = True
if EMAIL_REGEX.search(entry):
if entry[entry.rfind('.') + 1:].upper() in TLD:
to_add = True
if to_add:
if entry not in results:
results.append(entry)
return results
def extract_files(self, strings):
results = []
for entry in strings:
to_add = False
if FILE_REGEX.search(entry):
to_add = True
if to_add:
if entry not in results:
results.append(entry)
return results
def extract_interesting(self, strings):
results = []
for entry in strings:
to_add = False
if PDB_REGEX.search(entry):
to_add = True
if REGKEY_REGEX.search(entry):
to_add = True
if REGKEY2_REGEX.search(entry):
to_add = True
if to_add:
if entry not in results:
results.append(entry)
return results
def get_strings(self, f, min=4):
'''
String implementation see http://stackoverflow.com/a/17197027/6880819
Extended with Unicode support
'''
results = []
result = ""
counter = 1
wide_word = False
for c in f.data:
# already have something, check if the second byte is a null
if counter == 2 and c == "\x00":
wide_word = True
counter += 1
continue
# every 2 chars we allow a 00
if wide_word and c == "\x00" and not counter % 2:
counter += 1
continue
# valid char, go to next - newlines are to be considered as the end of the string
if c in string.printable and c not in ['\n', '\r']:
result += c
counter += 1
continue
if len(result) >= min:
results.append(result)
# reset the variables
result = ""
counter = 1
wide_word = False
if len(result) >= min: # catch result at EOF
results.append(result)
return results
def process_strings(self, strings, sample_name=""):
if sample_name:
prefix = '{} - '.format(sample_name)
else:
prefix = ''
if self.args.all:
self.log('success', '{}All strings:'.format(prefix))
for entry in strings:
self.log('', entry)
if self.args.hosts:
results = self.extract_hosts(strings)
if results:
self.log('success', '{}IP addresses and domains:'.format(prefix))
for result in results:
self.log('item', result)
if self.args.network:
results = self.extract_network(strings)
if results:
self.log('success', '{}Network related:'.format(prefix))
for result in results:
self.log('item', result)
if self.args.files:
results = self.extract_files(strings)
if results:
self.log('success', '{}Filenames:'.format(prefix))
for result in results:
self.log('item', result)
if self.args.interesting:
results = self.extract_interesting(strings)
if results:
self.log('success', '{}Various interesting strings:'.format(prefix))
for result in results:
self.log('item', result)
def run(self):
super(Strings, self).run()
if self.args is None:
return
if not (self.args.all or self.args.files or self.args.hosts or self.args.network or self.args.interesting):
self.log('error', 'At least one of the parameters is required')
self.usage()
return
if self.args.scan:
db = Database()
samples = db.find(key='all')
for sample in samples:
sample_path = get_sample_path(sample.sha256)
strings = self.get_strings(File(sample_path))
self.process_strings(strings, sample.name)
else:
if not __sessions__.is_set():
self.log('error', "No open session")
return
if os.path.exists(__sessions__.current.file.path):
strings = self.get_strings(__sessions__.current.file)
self.process_strings(strings)
|
MeteorAdminz/viper
|
viper/modules/strings.py
|
Python
|
bsd-3-clause
| 13,823
|
[
"Brian"
] |
992f42bbbf147e23a199a686e6f8a50bce12908f9e353330bb92ecc75bb63f9b
|
#
# Copyright (C) 2017-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Testmodule for the observable accumulator.
"""
import sys
import unittest as ut
import numpy as np
import espressomd # pylint: disable=import-error
import espressomd.observables
import espressomd.accumulators
class AccumulatorTest(ut.TestCase):
"""
Test class for the observable accumulator.
"""
def setUp(self):
np.random.seed(seed=162)
self.system = espressomd.System(box_l=[10.0] * 3)
self.system.cell_system.skin = 0.4
self.system.time_step = 0.01
self.system.part.add(id=0, pos=[0.0, 0.0, 0.0])
self.system.integrator.run(steps=0)
self.pos_obs = espressomd.observables.ParticlePositions(ids=(0,))
self.pos_obs_acc = espressomd.accumulators.MeanVarianceCalculator(
obs=self.pos_obs)
self.system.auto_update_accumulators.add(self.pos_obs_acc)
self.positions = np.copy(self.system.box_l * np.random.rand(10, 3))
def test_accumulator(self):
"""Check that accumulator results are the same as the respective numpy result.
"""
for i in range(self.positions.shape[0]):
self.system.part[0].pos = self.positions[i]
self.system.integrator.run(1)
self.assertEqual(self.pos_obs, self.pos_obs_acc.get_params()['obs'])
np.testing.assert_allclose(
self.pos_obs_acc.get_mean(), np.mean(
self.positions, axis=0), atol=1e-4)
np.testing.assert_allclose(
self.pos_obs_acc.get_variance(), np.var(
self.positions, axis=0, ddof=1), atol=1e-4)
if __name__ == "__main__":
suite = ut.TestSuite()
suite.addTests(ut.TestLoader().loadTestsFromTestCase(AccumulatorTest))
result = ut.TextTestRunner(verbosity=4).run(suite)
sys.exit(not result.wasSuccessful())
|
hmenke/espresso
|
testsuite/python/accumulator.py
|
Python
|
gpl-3.0
| 2,519
|
[
"ESPResSo"
] |
ac61f1355252cfc55a09fa1625df052508485b71d1c19ea6f5d3e47cf4629e34
|
""" Test functions for stats module
"""
import warnings
import re
import sys
import pickle
import os
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_warns,
assert_array_less, suppress_warnings)
import pytest
from pytest import raises as assert_raises
import numpy
import numpy as np
from numpy import typecodes, array
from numpy.lib.recfunctions import rec_append_fields
from scipy import special
from scipy._lib._util import check_random_state
from scipy.integrate import IntegrationWarning
import scipy.stats as stats
from scipy.stats._distn_infrastructure import argsreduce
import scipy.stats.distributions
from scipy.special import xlogy
from .test_continuous_basic import distcont
# python -OO strips docstrings
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
def _assert_hasattr(a, b, msg=None):
if msg is None:
msg = '%s does not have attribute %s' % (a, b)
assert_(hasattr(a, b), msg=msg)
def test_api_regression():
# https://github.com/scipy/scipy/issues/3802
_assert_hasattr(scipy.stats.distributions, 'f_gen')
def check_vonmises_pdf_periodic(k, l, s, x):
vm = stats.vonmises(k, loc=l, scale=s)
assert_almost_equal(vm.pdf(x), vm.pdf(x % (2*numpy.pi*s)))
def check_vonmises_cdf_periodic(k, l, s, x):
vm = stats.vonmises(k, loc=l, scale=s)
assert_almost_equal(vm.cdf(x) % 1, vm.cdf(x % (2*numpy.pi*s)) % 1)
def test_vonmises_pdf_periodic():
for k in [0.1, 1, 101]:
for x in [0, 1, numpy.pi, 10, 100]:
check_vonmises_pdf_periodic(k, 0, 1, x)
check_vonmises_pdf_periodic(k, 1, 1, x)
check_vonmises_pdf_periodic(k, 0, 10, x)
check_vonmises_cdf_periodic(k, 0, 1, x)
check_vonmises_cdf_periodic(k, 1, 1, x)
check_vonmises_cdf_periodic(k, 0, 10, x)
def test_vonmises_line_support():
assert_equal(stats.vonmises_line.a, -np.pi)
assert_equal(stats.vonmises_line.b, np.pi)
def test_vonmises_numerical():
vm = stats.vonmises(800)
assert_almost_equal(vm.cdf(0), 0.5)
@pytest.mark.parametrize('dist',
['alpha', 'betaprime',
'fatiguelife', 'invgamma', 'invgauss', 'invweibull',
'johnsonsb', 'levy', 'levy_l', 'lognorm', 'gilbrat',
'powerlognorm', 'rayleigh', 'wald'])
def test_support(dist):
"""gh-6235"""
dct = dict(distcont)
args = dct[dist]
dist = getattr(stats, dist)
assert_almost_equal(dist.pdf(dist.a, *args), 0)
assert_equal(dist.logpdf(dist.a, *args), -np.inf)
assert_almost_equal(dist.pdf(dist.b, *args), 0)
assert_equal(dist.logpdf(dist.b, *args), -np.inf)
class TestRandInt(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.randint.rvs(5, 30, size=100)
assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
assert_(len(vals) == 100)
vals = stats.randint.rvs(5, 30, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.randint.rvs(15, 46)
assert_((val >= 15) & (val < 46))
assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))
val = stats.randint(15, 46).rvs(3)
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pdf(self):
k = numpy.r_[0:36]
out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)
vals = stats.randint.pmf(k, 5, 30)
assert_array_almost_equal(vals, out)
def test_cdf(self):
x = np.linspace(0, 36, 100)
k = numpy.floor(x)
out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0)
vals = stats.randint.cdf(x, 5, 30)
assert_array_almost_equal(vals, out, decimal=12)
class TestBinom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.binom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.binom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.binom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for Ticket #1842
vals1 = stats.binom.pmf(100, 100, 1)
vals2 = stats.binom.pmf(0, 100, 0)
assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)
assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)
def test_entropy(self):
# Basic entropy tests.
b = stats.binom(2, 0.5)
expected_p = np.array([0.25, 0.5, 0.25])
expected_h = -sum(xlogy(expected_p, expected_p))
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.binom(2, 0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.binom(2, 1.0)
h = b.entropy()
assert_equal(h, 0.0)
def test_warns_p0(self):
# no spurious warnigns are generated for p=0; gh-3817
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
assert_equal(stats.binom(n=2, p=0).mean(), 0)
assert_equal(stats.binom(n=2, p=0).std(), 0)
class TestBernoulli(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.bernoulli.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.bernoulli.rvs(0.75)
assert_(isinstance(val, int))
val = stats.bernoulli(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_entropy(self):
# Simple tests of entropy.
b = stats.bernoulli(0.25)
expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.bernoulli(0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.bernoulli(1.0)
h = b.entropy()
assert_equal(h, 0.0)
class TestBradford(object):
# gh-6216
def test_cdf_ppf(self):
c = 0.1
x = np.logspace(-20, -4)
q = stats.bradford.cdf(x, c)
xx = stats.bradford.ppf(q, c)
assert_allclose(x, xx)
class TestNBinom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.nbinom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.nbinom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for ticket 1779
assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),
stats.nbinom.pmf(700, 721, 0.52))
# logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)
val = scipy.stats.nbinom.logpmf(0, 1, 1)
assert_equal(val, 0)
class TestGenInvGauss(object):
def setup_method(self):
np.random.seed(1234)
@pytest.mark.slow
def test_rvs_with_mode_shift(self):
# ratio_unif w/ mode shift
gig = stats.geninvgauss(2.3, 1.5)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_without_mode_shift(self):
# ratio_unif w/o mode shift
gig = stats.geninvgauss(0.9, 0.75)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_new_method(self):
# new algorithm of Hoermann / Leydold
gig = stats.geninvgauss(0.1, 0.2)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_p_zero(self):
def my_ks_check(p, b):
gig = stats.geninvgauss(p, b)
rvs = gig.rvs(size=1500, random_state=1234)
return stats.kstest(rvs, gig.cdf)[1] > 0.05
# boundary cases when p = 0
assert_equal(my_ks_check(0, 0.2), True) # new algo
assert_equal(my_ks_check(0, 0.9), True) # ratio_unif w/o shift
assert_equal(my_ks_check(0, 1.5), True) # ratio_unif with shift
def test_rvs_negative_p(self):
# if p negative, return inverse
assert_equal(
stats.geninvgauss(-1.5, 2).rvs(size=10, random_state=1234),
1 / stats.geninvgauss(1.5, 2).rvs(size=10, random_state=1234))
def test_invgauss(self):
# test that invgauss is special case
ig = stats.geninvgauss.rvs(size=1500, p=-0.5, b=1, random_state=1234)
assert_equal(stats.kstest(ig, 'invgauss', args=[1])[1] > 0.15, True)
# test pdf and cdf
mu, x = 100, np.linspace(0.01, 1, 10)
pdf_ig = stats.geninvgauss.pdf(x, p=-0.5, b=1 / mu, scale=mu)
assert_allclose(pdf_ig, stats.invgauss(mu).pdf(x))
cdf_ig = stats.geninvgauss.cdf(x, p=-0.5, b=1 / mu, scale=mu)
assert_allclose(cdf_ig, stats.invgauss(mu).cdf(x))
def test_pdf_R(self):
# test against R package GIGrvg
# x <- seq(0.01, 5, length.out = 10)
# GIGrvg::dgig(x, 0.5, 1, 1)
vals_R = np.array([2.081176820e-21, 4.488660034e-01, 3.747774338e-01,
2.693297528e-01, 1.905637275e-01, 1.351476913e-01,
9.636538981e-02, 6.909040154e-02, 4.978006801e-02,
3.602084467e-02])
x = np.linspace(0.01, 5, 10)
assert_allclose(vals_R, stats.geninvgauss.pdf(x, 0.5, 1))
def test_pdf_zero(self):
# pdf at 0 is 0, needs special treatment to avoid 1/x in pdf
assert_equal(stats.geninvgauss.pdf(0, 0.5, 0.5), 0)
# if x is large and p is moderate, make sure that pdf does not
# overflow because of x**(p-1); exp(-b*x) forces pdf to zero
assert_equal(stats.geninvgauss.pdf(2e6, 50, 2), 0)
class TestNormInvGauss(object):
def setup_method(self):
np.random.seed(1234)
def test_cdf_R(self):
# test pdf and cdf vals against R
# require("GeneralizedHyperbolic")
# x_test <- c(-7, -5, 0, 8, 15)
# r_cdf <- GeneralizedHyperbolic::pnig(x_test, mu = 0, a = 1, b = 0.5)
# r_pdf <- GeneralizedHyperbolic::dnig(x_test, mu = 0, a = 1, b = 0.5)
r_cdf = np.array([8.034920282e-07, 2.512671945e-05, 3.186661051e-01,
9.988650664e-01, 9.999848769e-01])
x_test = np.array([-7, -5, 0, 8, 15])
vals_cdf = stats.norminvgauss.cdf(x_test, a=1, b=0.5)
assert_allclose(vals_cdf, r_cdf, atol=1e-9)
def test_pdf_R(self):
# values from R as defined in test_cdf_R
r_pdf = np.array([1.359600783e-06, 4.413878805e-05, 4.555014266e-01,
7.450485342e-04, 8.917889931e-06])
x_test = np.array([-7, -5, 0, 8, 15])
vals_pdf = stats.norminvgauss.pdf(x_test, a=1, b=0.5)
assert_allclose(vals_pdf, r_pdf, atol=1e-9)
def test_stats(self):
a, b = 1, 0.5
gamma = np.sqrt(a**2 - b**2)
v_stats = (b / gamma, a**2 / gamma**3, 3.0 * b / (a * np.sqrt(gamma)),
3.0 * (1 + 4 * b**2 / a**2) / gamma)
assert_equal(v_stats, stats.norminvgauss.stats(a, b, moments='mvsk'))
def test_ppf(self):
a, b = 1, 0.5
x_test = np.array([0.001, 0.5, 0.999])
vals = stats.norminvgauss.ppf(x_test, a, b)
assert_allclose(x_test, stats.norminvgauss.cdf(vals, a, b))
class TestGeom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.geom.rvs(0.75)
assert_(isinstance(val, int))
val = stats.geom(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
vals = stats.geom.pmf([1, 2, 3], 0.5)
assert_array_almost_equal(vals, [0.5, 0.25, 0.125])
def test_logpmf(self):
# regression test for ticket 1793
vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5))
vals2 = stats.geom.logpmf([1, 2, 3], 0.5)
assert_allclose(vals1, vals2, rtol=1e-15, atol=0)
# regression test for gh-4028
val = stats.geom.logpmf(1, 1)
assert_equal(val, 0.0)
def test_cdf_sf(self):
vals = stats.geom.cdf([1, 2, 3], 0.5)
vals_sf = stats.geom.sf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, expected)
assert_array_almost_equal(vals_sf, 1-expected)
def test_logcdf_logsf(self):
vals = stats.geom.logcdf([1, 2, 3], 0.5)
vals_sf = stats.geom.logsf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, np.log(expected))
assert_array_almost_equal(vals_sf, np.log1p(-expected))
def test_ppf(self):
vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)
expected = array([1.0, 2.0, 3.0])
assert_array_almost_equal(vals, expected)
def test_ppf_underflow(self):
# this should not underflow
assert_allclose(stats.geom.ppf(1e-20, 1e-20), 1.0, atol=1e-14)
class TestPlanck(object):
def setup_method(self):
np.random.seed(1234)
def test_sf(self):
vals = stats.planck.sf([1, 2, 3], 5.)
expected = array([4.5399929762484854e-05,
3.0590232050182579e-07,
2.0611536224385579e-09])
assert_array_almost_equal(vals, expected)
def test_logsf(self):
vals = stats.planck.logsf([1000., 2000., 3000.], 1000.)
expected = array([-1001000., -2001000., -3001000.])
assert_array_almost_equal(vals, expected)
class TestGennorm(object):
def test_laplace(self):
# test against Laplace (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_norm(self):
# test against normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 2)
pdf2 = stats.norm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
class TestHalfgennorm(object):
def test_expon(self):
# test against exponential (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 1)
pdf2 = stats.expon.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_halfnorm(self):
# test against half normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 2)
pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
def test_gennorm(self):
# test against generalized normal
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, .497324)
pdf2 = stats.gennorm.pdf(points, .497324)
assert_almost_equal(pdf1, 2*pdf2)
class TestTruncnorm(object):
def setup_method(self):
np.random.seed(1234)
def test_ppf_ticket1131(self):
vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])
assert_array_almost_equal(vals, expected)
def test_isf_ticket1131(self):
vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])
assert_array_almost_equal(vals, expected)
def test_gh_2477_small_values(self):
# Check a case that worked in the original issue.
low, high = -11, -10
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# Check a case that failed in the original issue.
low, high = 10, 11
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# @pytest.mark.xfail(reason="truncnorm rvs is know to fail at extreme tails")
def test_gh_2477_large_values(self):
# Check a case that used to fail because of extreme tailness.
low, high = 100, 101
with np.errstate(divide='ignore'):
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
print(low, x.min(), x.max(), high)
assert_(low <= x.min() <= x.max() <= high), str([low, high, x])
# Check some additional extreme tails
low, high = 1000, 1001
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
low, high = 10000, 10001
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_9403_nontail_values(self):
for low, high in [[3, 4], [-4, -3]]:
xvals = np.array([-np.inf, low, high, np.inf])
xmid = (high+low)/2.0
cdfs = stats.truncnorm.cdf(xvals, low, high)
sfs = stats.truncnorm.sf(xvals, low, high)
pdfs = stats.truncnorm.pdf(xvals, low, high)
expected_cdfs = np.array([0, 0, 1, 1])
expected_sfs = np.array([1.0, 1.0, 0.0, 0.0])
expected_pdfs = np.array([0, 3.3619772, 0.1015229, 0])
if low < 0:
expected_pdfs = np.array([0, 0.1015229, 3.3619772, 0])
assert_almost_equal(cdfs, expected_cdfs)
assert_almost_equal(sfs, expected_sfs)
assert_almost_equal(pdfs, expected_pdfs)
assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]), low+0.5)
pvals = np.array([0, 0.5, 1.0])
ppfs = stats.truncnorm.ppf(pvals, low, high)
expected_ppfs = np.array([low, np.sign(low)*3.1984741, high])
assert_almost_equal(ppfs, expected_ppfs)
if low < 0:
assert_almost_equal(stats.truncnorm.sf(xmid, low, high), 0.8475544278436675)
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high), 0.1524455721563326)
else:
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high), 0.8475544278436675)
assert_almost_equal(stats.truncnorm.sf(xmid, low, high), 0.1524455721563326)
pdf = stats.truncnorm.pdf(xmid, low, high)
assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2)
def test_gh_9403_medium_tail_values(self):
for low, high in [[39, 40], [-40, -39]]:
xvals = np.array([-np.inf, low, high, np.inf])
xmid = (high+low)/2.0
cdfs = stats.truncnorm.cdf(xvals, low, high)
sfs = stats.truncnorm.sf(xvals, low, high)
pdfs = stats.truncnorm.pdf(xvals, low, high)
expected_cdfs = np.array([0, 0, 1, 1])
expected_sfs = np.array([1.0, 1.0, 0.0, 0.0])
expected_pdfs = np.array([0, 3.90256074e+01, 2.73349092e-16, 0])
if low < 0:
expected_pdfs = np.array([0, 2.73349092e-16, 3.90256074e+01, 0])
assert_almost_equal(cdfs, expected_cdfs)
assert_almost_equal(sfs, expected_sfs)
assert_almost_equal(pdfs, expected_pdfs)
assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]), low+0.5)
pvals = np.array([0, 0.5, 1.0])
ppfs = stats.truncnorm.ppf(pvals, low, high)
expected_ppfs = np.array([low, np.sign(low)*39.01775731, high])
assert_almost_equal(ppfs, expected_ppfs)
cdfs = stats.truncnorm.cdf(ppfs, low, high)
assert_almost_equal(cdfs, pvals)
if low < 0:
assert_almost_equal(stats.truncnorm.sf(xmid, low, high), 0.9999999970389126)
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high), 2.961048103554866e-09)
else:
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high), 0.9999999970389126)
assert_almost_equal(stats.truncnorm.sf(xmid, low, high), 2.961048103554866e-09)
pdf = stats.truncnorm.pdf(xmid, low, high)
assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2)
xvals = np.linspace(low, high, 11)
xvals2 = -xvals[::-1]
assert_almost_equal(stats.truncnorm.cdf(xvals, low, high), stats.truncnorm.sf(xvals2, -high, -low)[::-1])
assert_almost_equal(stats.truncnorm.sf(xvals, low, high), stats.truncnorm.cdf(xvals2, -high, -low)[::-1])
assert_almost_equal(stats.truncnorm.pdf(xvals, low, high), stats.truncnorm.pdf(xvals2, -high, -low)[::-1])
def _test_moments_one_range(self, a, b, expected):
m0, v0, s0, k0 = expected[:4]
m, v, s, k = stats.truncnorm.stats(a, b, moments='mvsk')
assert_almost_equal(m, m0)
assert_almost_equal(v, v0)
assert_almost_equal(s, s0)
assert_almost_equal(k, k0)
@pytest.mark.xfail_on_32bit("reduced accuracy with 32bit platforms.")
def test_moments(self):
# Values validated by changing TRUNCNORM_TAIL_X so as to evaluate
# using both the _norm_XXX() and _norm_logXXX() functions, and by
# removing the _stats and _munp methods in truncnorm tp force
# numerical quadrature.
self._test_moments_one_range(-30, 30, [0, 1, 0.0, 0.0])
self._test_moments_one_range(-10, 10, [0, 1, 0.0, 0.0])
self._test_moments_one_range(-3, 3, [0, 0.97333692, 0.0, -0.17111444])
self._test_moments_one_range(-2, 2, [0, 0.7737413, 0.0, -0.63446328])
self._test_moments_one_range(0, np.inf, [0.79788456, 0.36338023, 0.99527175, 0.8691773])
self._test_moments_one_range(-1, 3, [0.2827861, 0.61614174, 0.53930185, -0.20582065])
self._test_moments_one_range(-3, 1, [-0.2827861, 0.61614174, -0.53930185, -0.20582065])
self._test_moments_one_range(-10, -9, [-9.10845629, 0.01144881, -1.89856073, 5.07334611])
self._test_moments_one_range(-20, -19, [-19.05234395, 0.00272507, -1.9838686, 5.87208674])
self._test_moments_one_range(-30, -29, [-29.03440124, 0.00118066, -1.99297727, 5.9303358])
self._test_moments_one_range(-40, -39, [-39.02560741993262, 0.0006548, -1.99631464, 5.61677584])
self._test_moments_one_range(39, 40, [39.02560741993262, 0.0006548, 1.99631464, 5.61677584])
def test_9902_moments(self):
m, v = stats.truncnorm.stats(0, np.inf, moments='mv')
assert_almost_equal(m, 0.79788456)
assert_almost_equal(v, 0.36338023)
def test_gh_1489_trac_962_rvs(self):
# Check the original example.
low, high = 10, 15
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
class TestHypergeom(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
assert_(numpy.all(vals >= 0) &
numpy.all(vals <= 3))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.hypergeom.rvs(20, 3, 10)
assert_(isinstance(val, int))
val = stats.hypergeom(20, 3, 10).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_precision(self):
# comparison number from mpmath
M = 2500
n = 50
N = 500
tot = M
good = n
hgpmf = stats.hypergeom.pmf(2, tot, good, N)
assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
def test_args(self):
# test correct output for corner cases of arguments
# see gh-2325
assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
def test_cdf_above_one(self):
# for some values of parameters, hypergeom cdf was >1, see gh-2238
assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)
def test_precision2(self):
# Test hypergeom precision for large numbers. See #1218.
# Results compared with those from R.
oranges = 9.9e4
pears = 1.1e5
fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4
quantile = 2e4
res = [stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten)
for eaten in fruits_eaten]
expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
8.265601e-11, 0.1237904, 1])
assert_allclose(res, expected, atol=0, rtol=5e-7)
# Test with array_like first argument
quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]
res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)
expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]
assert_allclose(res2, expected2, atol=0, rtol=5e-7)
def test_entropy(self):
# Simple tests of entropy.
hg = stats.hypergeom(4, 1, 1)
h = hg.entropy()
expected_p = np.array([0.75, 0.25])
expected_h = -np.sum(xlogy(expected_p, expected_p))
assert_allclose(h, expected_h)
hg = stats.hypergeom(1, 1, 1)
h = hg.entropy()
assert_equal(h, 0.0)
def test_logsf(self):
# Test logsf for very large numbers. See issue #4982
# Results compare with those from R (v3.2.0):
# phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE)
# -2239.771
k = 1e4
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logsf(k, M, n, N)
expected = -2239.771 # From R
assert_almost_equal(result, expected, decimal=3)
k = 1
M = 1600
n = 600
N = 300
result = stats.hypergeom.logsf(k, M, n, N)
expected = -2.566567e-68 # From R
assert_almost_equal(result, expected, decimal=15)
def test_logcdf(self):
# Test logcdf for very large numbers. See issue #8692
# Results compare with those from R (v3.3.2):
# phyper(k, n, M-n, N, lower.tail=TRUE, log.p=TRUE)
# -5273.335
k = 1
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -5273.335 # From R
assert_almost_equal(result, expected, decimal=3)
# Same example as in issue #8692
k = 40
M = 1600
n = 50
N = 300
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -7.565148879229e-23 # From R
assert_almost_equal(result, expected, decimal=15)
k = 125
M = 1600
n = 250
N = 500
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -4.242688e-12 # From R
assert_almost_equal(result, expected, decimal=15)
# test broadcasting robustness based on reviewer
# concerns in PR 9603; using an array version of
# the example from issue #8692
k = np.array([40, 40, 40])
M = 1600
n = 50
N = 300
result = stats.hypergeom.logcdf(k, M, n, N)
expected = np.full(3, -7.565148879229e-23) # filled from R result
assert_almost_equal(result, expected, decimal=15)
class TestLoggamma(object):
def test_stats(self):
# The following precomputed values are from the table in section 2.2
# of "A Statistical Study of Log-Gamma Distribution", by Ping Shing
# Chan (thesis, McMaster University, 1993).
table = np.array([
# c, mean, var, skew, exc. kurt.
0.5, -1.9635, 4.9348, -1.5351, 4.0000,
1.0, -0.5772, 1.6449, -1.1395, 2.4000,
12.0, 2.4427, 0.0869, -0.2946, 0.1735,
]).reshape(-1, 5)
for c, mean, var, skew, kurt in table:
computed = stats.loggamma.stats(c, moments='msvk')
assert_array_almost_equal(computed, [mean, var, skew, kurt],
decimal=4)
class TestLogistic(object):
# gh-6226
def test_cdf_ppf(self):
x = np.linspace(-20, 20)
y = stats.logistic.cdf(x)
xx = stats.logistic.ppf(y)
assert_allclose(x, xx)
def test_sf_isf(self):
x = np.linspace(-20, 20)
y = stats.logistic.sf(x)
xx = stats.logistic.isf(y)
assert_allclose(x, xx)
def test_extreme_values(self):
# p is chosen so that 1 - (1 - p) == p in double precision
p = 9.992007221626409e-16
desired = 34.53957599234088
assert_allclose(stats.logistic.ppf(1 - p), desired)
assert_allclose(stats.logistic.isf(p), desired)
class TestLogser(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.logser.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.logser.rvs(0.75)
assert_(isinstance(val, int))
val = stats.logser(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf_small_p(self):
m = stats.logser.pmf(4, 1e-20)
# The expected value was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 64
# >>> k = 4
# >>> p = mpmath.mpf('1e-20')
# >>> float(-(p**k)/k/mpmath.log(1-p))
# 2.5e-61
# It is also clear from noticing that for very small p,
# log(1-p) is approximately -p, and the formula becomes
# p**(k-1) / k
assert_allclose(m, 2.5e-61)
def test_mean_small_p(self):
m = stats.logser.mean(1e-8)
# The expected mean was computed using mpmath:
# >>> import mpmath
# >>> mpmath.dps = 60
# >>> p = mpmath.mpf('1e-8')
# >>> float(-p / ((1 - p)*mpmath.log(1 - p)))
# 1.000000005
assert_allclose(m, 1.000000005)
class TestPareto(object):
def test_stats(self):
# Check the stats() method with some simple values. Also check
# that the calculations do not trigger RuntimeWarnings.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')
assert_equal(m, 3.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')
assert_equal(m, 2.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')
assert_allclose(m, 2.5 / 1.5)
assert_allclose(v, 2.5 / (1.5*1.5*0.5))
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')
assert_allclose(m, 1.5)
assert_allclose(v, 0.75)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')
assert_allclose(m, 3.5 / 2.5)
assert_allclose(v, 3.5 / (2.5*2.5*1.5))
assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')
assert_allclose(m, 4.0 / 3.0)
assert_allclose(v, 4.0 / 18.0)
assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')
assert_allclose(m, 4.5 / 3.5)
assert_allclose(v, 4.5 / (3.5*3.5*2.5))
assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))
assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))
def test_sf(self):
x = 1e9
b = 2
scale = 1.5
p = stats.pareto.sf(x, b, loc=0, scale=scale)
expected = (scale/x)**b # 2.25e-18
assert_allclose(p, expected)
class TestGenpareto(object):
def test_ab(self):
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
a, b = stats.genpareto._get_support(c)
assert_equal(a, 0.)
assert_(np.isposinf(b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
a, b = stats.genpareto._get_support(c)
assert_allclose([a, b], [0., 0.5])
def test_c0(self):
# with c=0, genpareto reduces to the exponential distribution
# rv = stats.genpareto(c=0.)
rv = stats.genpareto(c=0.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.expon.pdf(x))
assert_allclose(rv.cdf(x), stats.expon.cdf(x))
assert_allclose(rv.sf(x), stats.expon.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.expon.ppf(q))
def test_cm1(self):
# with c=-1, genpareto reduces to the uniform distr on [0, 1]
rv = stats.genpareto(c=-1.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.uniform.pdf(x))
assert_allclose(rv.cdf(x), stats.uniform.cdf(x))
assert_allclose(rv.sf(x), stats.uniform.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.uniform.ppf(q))
# logpdf(1., c=-1) should be zero
assert_allclose(rv.logpdf(1), 0)
def test_x_inf(self):
# make sure x=inf is handled gracefully
rv = stats.genpareto(c=0.1)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=0.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=-1.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
def test_c_continuity(self):
# pdf is continuous at c=0, -1
x = np.linspace(0, 10, 30)
for c in [0, -1]:
pdf0 = stats.genpareto.pdf(x, c)
for dc in [1e-14, -1e-14]:
pdfc = stats.genpareto.pdf(x, c + dc)
assert_allclose(pdf0, pdfc, atol=1e-12)
cdf0 = stats.genpareto.cdf(x, c)
for dc in [1e-14, 1e-14]:
cdfc = stats.genpareto.cdf(x, c + dc)
assert_allclose(cdf0, cdfc, atol=1e-12)
def test_c_continuity_ppf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
ppf0 = stats.genpareto.ppf(q, c)
for dc in [1e-14, -1e-14]:
ppfc = stats.genpareto.ppf(q, c + dc)
assert_allclose(ppf0, ppfc, atol=1e-12)
def test_c_continuity_isf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
isf0 = stats.genpareto.isf(q, c)
for dc in [1e-14, -1e-14]:
isfc = stats.genpareto.isf(q, c + dc)
assert_allclose(isf0, isfc, atol=1e-12)
def test_cdf_ppf_roundtrip(self):
# this should pass with machine precision. hat tip @pbrod
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [1e-8, -1e-18, 1e-15, -1e-15]:
assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),
q, atol=1e-15)
def test_logsf(self):
logp = stats.genpareto.logsf(1e10, .01, 0, 1)
assert_allclose(logp, -1842.0680753952365)
# Values in 'expected_stats' are
# [mean, variance, skewness, excess kurtosis].
@pytest.mark.parametrize(
'c, expected_stats',
[(0, [1, 1, 2, 6]),
(1/4, [4/3, 32/9, 10/np.sqrt(2), np.nan]),
(1/9, [9/8, (81/64)*(9/7), (10/9)*np.sqrt(7), 754/45]),
(-1, [1/2, 1/12, 0, -6/5])])
def test_stats(self, c, expected_stats):
result = stats.genpareto.stats(c, moments='mvsk')
assert_allclose(result, expected_stats, rtol=1e-13, atol=1e-15)
def test_var(self):
# Regression test for gh-11168.
v = stats.genpareto.var(1e-8)
assert_allclose(v, 1.000000040000001, rtol=1e-13)
class TestPearson3(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.pearson3.rvs(0.1, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllFloat'])
val = stats.pearson3.rvs(0.5)
assert_(isinstance(val, float))
val = stats.pearson3(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllFloat'])
assert_(len(val) == 3)
def test_pdf(self):
vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),
atol=1e-6)
vals = stats.pearson3.pdf(-3, 0.1)
assert_allclose(vals, np.array([0.00313791]), atol=1e-6)
vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,
0.39885918, 0.23413173]), atol=1e-6)
def test_cdf(self):
vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),
atol=1e-6)
vals = stats.pearson3.cdf(-3, 0.1)
assert_allclose(vals, [0.00082256], atol=1e-6)
vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,
5.06649130e-01, 8.41442111e-01], atol=1e-6)
class TestKappa4(object):
def test_cdf_genpareto(self):
# h = 1 and k != 0 is generalized Pareto
x = [0.0, 0.1, 0.2, 0.5]
h = 1.0
for k in [-1.9, -1.0, -0.5, -0.2, -0.1, 0.1, 0.2, 0.5, 1.0,
1.9]:
vals = stats.kappa4.cdf(x, h, k)
# shape parameter is opposite what is expected
vals_comp = stats.genpareto.cdf(x, -k)
assert_allclose(vals, vals_comp)
def test_cdf_genextreme(self):
# h = 0 and k != 0 is generalized extreme value
x = np.linspace(-5, 5, 10)
h = 0.0
k = np.linspace(-3, 3, 10)
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.genextreme.cdf(x, k)
assert_allclose(vals, vals_comp)
def test_cdf_expon(self):
# h = 1 and k = 0 is exponential
x = np.linspace(0, 10, 10)
h = 1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.expon.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_gumbel_r(self):
# h = 0 and k = 0 is gumbel_r
x = np.linspace(-5, 5, 10)
h = 0.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.gumbel_r.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_logistic(self):
# h = -1 and k = 0 is logistic
x = np.linspace(-5, 5, 10)
h = -1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.logistic.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_uniform(self):
# h = 1 and k = 1 is uniform
x = np.linspace(-5, 5, 10)
h = 1.0
k = 1.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.uniform.cdf(x)
assert_allclose(vals, vals_comp)
def test_integers_ctor(self):
# regression test for gh-7416: _argcheck fails for integer h and k
# in numpy 1.12
stats.kappa4(1, 2)
class TestPoisson(object):
def setup_method(self):
np.random.seed(1234)
def test_pmf_basic(self):
# Basic case
ln2 = np.log(2)
vals = stats.poisson.pmf([0, 1, 2], ln2)
expected = [0.5, ln2/2, ln2**2/4]
assert_allclose(vals, expected)
def test_mu0(self):
# Edge case: mu=0
vals = stats.poisson.pmf([0, 1, 2], 0)
expected = [1, 0, 0]
assert_array_equal(vals, expected)
interval = stats.poisson.interval(0.95, 0)
assert_equal(interval, (0, 0))
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.poisson.rvs(0.5)
assert_(isinstance(val, int))
val = stats.poisson(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_stats(self):
mu = 16.0
result = stats.poisson.stats(mu, moments='mvsk')
assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])
mu = np.array([0.0, 1.0, 2.0])
result = stats.poisson.stats(mu, moments='mvsk')
expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5])
assert_allclose(result, expected)
class TestKSTwo(object):
def setup_method(self):
np.random.seed(1234)
def test_cdf(self):
for n in [1, 2, 3, 10, 100, 1000]:
# Test x-values:
# 0, 1/2n, where the cdf should be 0
# 1/n, where the cdf should be n!/n^n
# 0.5, where the cdf should match ksone.cdf
# 1-1/n, where cdf = 1-2/n^n
# 1, where cdf == 1
# (E.g. Exact values given by Eqn 1 in Simard / L'Ecuyer)
x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1])
v1 = (1.0/n)**n
lg = scipy.special.gammaln(n+1)
elg = (np.exp(lg) if v1 != 0 else 0)
expected = np.array([0, 0, v1 * elg,
1 - 2*stats.ksone.sf(0.5, n),
max(1 - 2*v1, 0.0),
1.0])
vals_cdf = stats.kstwo.cdf(x, n)
assert_allclose(vals_cdf, expected)
def test_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
# Same x values as in test_cdf, and use sf = 1 - cdf
x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1])
v1 = (1.0/n)**n
lg = scipy.special.gammaln(n+1)
elg = (np.exp(lg) if v1 != 0 else 0)
expected = np.array([1.0, 1.0,
1 - v1 * elg,
2*stats.ksone.sf(0.5, n),
min(2*v1, 1.0), 0])
vals_sf = stats.kstwo.sf(x, n)
assert_allclose(vals_sf, expected)
def test_cdf_sqrtn(self):
# For fixed a, cdf(a/sqrt(n), n) -> kstwobign(a) as n->infinity
# cdf(a/sqrt(n), n) is an increasing function of n (and a)
# Check that the function is indeed increasing (allowing for some
# small floating point and algorithm differences.)
x = np.linspace(0, 2, 11)[1:]
ns = [50, 100, 200, 400, 1000, 2000]
for _x in x:
xn = _x / np.sqrt(ns)
probs = stats.kstwo.cdf(xn, ns)
diffs = np.diff(probs)
assert_array_less(diffs, 1e-8)
def test_cdf_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
vals_cdf = stats.kstwo.cdf(x, n)
vals_sf = stats.kstwo.sf(x, n)
assert_array_almost_equal(vals_cdf, 1 - vals_sf)
def test_cdf_sf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x / np.sqrt(n)
vals_cdf = stats.kstwo.cdf(xn, n)
vals_sf = stats.kstwo.sf(xn, n)
assert_array_almost_equal(vals_cdf, 1 - vals_sf)
def test_ppf_of_cdf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x[x > 0.5/n]
vals_cdf = stats.kstwo.cdf(xn, n)
# CDFs close to 1 are better dealt with using the SF
cond = (0 < vals_cdf) & (vals_cdf < 0.99)
vals = stats.kstwo.ppf(vals_cdf, n)
assert_allclose(vals[cond], xn[cond], rtol=1e-4)
def test_isf_of_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x[x > 0.5/n]
vals_isf = stats.kstwo.isf(xn, n)
cond = (0 < vals_isf) & (vals_isf < 1.0)
vals = stats.kstwo.sf(vals_isf, n)
assert_allclose(vals[cond], xn[cond], rtol=1e-4)
def test_ppf_of_cdf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = (x / np.sqrt(n))[x > 0.5/n]
vals_cdf = stats.kstwo.cdf(xn, n)
cond = (0 < vals_cdf) & (vals_cdf < 1.0)
vals = stats.kstwo.ppf(vals_cdf, n)
assert_allclose(vals[cond], xn[cond])
def test_isf_of_sf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = (x / np.sqrt(n))[x > 0.5/n]
vals_sf = stats.kstwo.sf(xn, n)
# SFs close to 1 are better dealt with using the CDF
cond = (0 < vals_sf) & (vals_sf < 0.95)
vals = stats.kstwo.isf(vals_sf, n)
assert_allclose(vals[cond], xn[cond])
def test_ppf(self):
probs = np.linspace(0, 1, 11)[1:]
for n in [1, 2, 3, 10, 100, 1000]:
xn = stats.kstwo.ppf(probs, n)
vals_cdf = stats.kstwo.cdf(xn, n)
assert_allclose(vals_cdf, probs)
def test_simard_lecuyer_table1(self):
# Compute the cdf for values near the mean of the distribution.
# The mean u ~ log(2)*sqrt(pi/(2n))
# Compute for x in [u/4, u/3, u/2, u, 2u, 3u]
# This is the computation of Table 1 of Simard, R., L'Ecuyer, P. (2011)
# "Computing the Two-Sided Kolmogorov-Smirnov Distribution".
# Except that the values below are not from the published table, but
# were generated using an independent SageMath implementation of
# Durbin's algorithm (with the exponentiation and scaling of
# Marsaglia/Tsang/Wang's version) using 500 bit arithmetic.
# Some of the values in the published table have relative
# errors greater than 1e-4.
ns = [10, 50, 100, 200, 500, 1000]
ratios = np.array([1.0/4, 1.0/3, 1.0/2, 1, 2, 3])
expected = np.array([
[1.92155292e-08, 5.72933228e-05, 2.15233226e-02, 6.31566589e-01, 9.97685592e-01, 9.99999942e-01],
[2.28096224e-09, 1.99142563e-05, 1.42617934e-02, 5.95345542e-01, 9.96177701e-01, 9.99998662e-01],
[1.00201886e-09, 1.32673079e-05, 1.24608594e-02, 5.86163220e-01, 9.95866877e-01, 9.99998240e-01],
[4.93313022e-10, 9.52658029e-06, 1.12123138e-02, 5.79486872e-01, 9.95661824e-01, 9.99997964e-01],
[2.37049293e-10, 6.85002458e-06, 1.01309221e-02, 5.73427224e-01, 9.95491207e-01, 9.99997750e-01],
[1.56990874e-10, 5.71738276e-06, 9.59725430e-03, 5.70322692e-01, 9.95409545e-01, 9.99997657e-01]
])
for idx, n in enumerate(ns):
x = ratios * np.log(2) * np.sqrt(np.pi/2/n)
vals_cdf = stats.kstwo.cdf(x, n)
assert_allclose(vals_cdf, expected[idx], rtol=1e-5)
class TestZipf(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.zipf.rvs(1.5)
assert_(isinstance(val, int))
val = stats.zipf(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_moments(self):
# n-th moment is finite iff a > n + 1
m, v = stats.zipf.stats(a=2.8)
assert_(np.isfinite(m))
assert_equal(v, np.inf)
s, k = stats.zipf.stats(a=4.8, moments='sk')
assert_(not np.isfinite([s, k]).all())
class TestDLaplace(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.dlaplace.rvs(1.5, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.dlaplace.rvs(1.5)
assert_(isinstance(val, int))
val = stats.dlaplace(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
assert_(stats.dlaplace.rvs(0.8) is not None)
def test_stats(self):
# compare the explicit formulas w/ direct summation using pmf
a = 1.
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
N = 37
xx = np.arange(-N, N+1)
pp = dl.pmf(xx)
m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)
assert_equal((m, s), (0, 0))
assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)
def test_stats2(self):
a = np.log(2.)
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
assert_equal((m, s), (0., 0.))
assert_allclose((v, k), (4., 3.25))
class TestInvGamma(object):
def test_invgamma_inf_gh_1866(self):
# invgamma's moments are only finite for a>n
# specific numbers checked w/ boost 1.54
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')
expected = [0.05461496450, 0.0001723162534, 1.020362676,
2.055616582]
assert_allclose(mvsk, expected)
a = [1.1, 3.1, 5.6]
mvsk = stats.invgamma.stats(a=a, moments='mvsk')
expected = ([10., 0.476190476, 0.2173913043], # mmm
[np.inf, 0.2061430632, 0.01312749422], # vvv
[np.nan, 41.95235392, 2.919025532], # sss
[np.nan, np.nan, 24.51923076]) # kkk
for x, y in zip(mvsk, expected):
assert_almost_equal(x, y)
def test_cdf_ppf(self):
# gh-6245
x = np.logspace(-2.6, 0)
y = stats.invgamma.cdf(x, 1)
xx = stats.invgamma.ppf(y, 1)
assert_allclose(x, xx)
def test_sf_isf(self):
# gh-6245
if sys.maxsize > 2**32:
x = np.logspace(2, 100)
else:
# Invgamme roundtrip on 32-bit systems has relative accuracy
# ~1e-15 until x=1e+15, and becomes inf above x=1e+18
x = np.logspace(2, 18)
y = stats.invgamma.sf(x, 1)
xx = stats.invgamma.isf(y, 1)
assert_allclose(x, xx, rtol=1.0)
class TestF(object):
def test_endpoints(self):
# Compute the pdf at the left endpoint dst.a.
data = [[stats.f, (2, 1), 1.0]]
for _f, _args, _correct in data:
ans = _f.pdf(_f.a, *_args)
print(_f, (_args), ans, _correct, ans == _correct)
ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]
correct = [_correct_ for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
def test_f_moments(self):
# n-th moment of F distributions is only finite for n < dfd / 2
m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')
assert_(np.isfinite(m))
assert_(np.isfinite(v))
assert_(np.isfinite(s))
assert_(not np.isfinite(k))
def test_moments_warnings(self):
# no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero)
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk')
def test_stats_broadcast(self):
dfn = np.array([[3], [11]])
dfd = np.array([11, 12])
m, v, s, k = stats.f.stats(dfn=dfn, dfd=dfd, moments='mvsk')
m2 = [dfd / (dfd - 2)]*2
assert_allclose(m, m2)
v2 = 2 * dfd**2 * (dfn + dfd - 2) / dfn / (dfd - 2)**2 / (dfd - 4)
assert_allclose(v, v2)
s2 = ((2*dfn + dfd - 2) * np.sqrt(8*(dfd - 4)) /
((dfd - 6) * np.sqrt(dfn*(dfn + dfd - 2))))
assert_allclose(s, s2)
k2num = 12 * (dfn * (5*dfd - 22) * (dfn + dfd - 2) +
(dfd - 4) * (dfd - 2)**2)
k2den = dfn * (dfd - 6) * (dfd - 8) * (dfn + dfd - 2)
k2 = k2num / k2den
assert_allclose(k, k2)
def test_rvgeneric_std():
# Regression test for #1191
assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])
def test_moments_t():
# regression test for #8786
assert_equal(stats.t.stats(df=1, moments='mvsk'),
(np.inf, np.nan, np.nan, np.nan))
assert_equal(stats.t.stats(df=1.01, moments='mvsk'),
(0.0, np.inf, np.nan, np.nan))
assert_equal(stats.t.stats(df=2, moments='mvsk'),
(0.0, np.inf, np.nan, np.nan))
assert_equal(stats.t.stats(df=2.01, moments='mvsk'),
(0.0, 2.01/(2.01-2.0), np.nan, np.inf))
assert_equal(stats.t.stats(df=3, moments='sk'), (np.nan, np.inf))
assert_equal(stats.t.stats(df=3.01, moments='sk'), (0.0, np.inf))
assert_equal(stats.t.stats(df=4, moments='sk'), (0.0, np.inf))
assert_equal(stats.t.stats(df=4.01, moments='sk'), (0.0, 6.0/(4.01 - 4.0)))
class TestRvDiscrete(object):
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
states = [-1, 0, 1, 2, 3, 4]
probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0]
samples = 1000
r = stats.rv_discrete(name='sample', values=(states, probability))
x = r.rvs(size=samples)
assert_(isinstance(x, numpy.ndarray))
for s, p in zip(states, probability):
assert_(abs(sum(x == s)/float(samples) - p) < 0.05)
x = r.rvs()
assert_(isinstance(x, int))
def test_entropy(self):
# Basic tests of entropy.
pvals = np.array([0.25, 0.45, 0.3])
p = stats.rv_discrete(values=([0, 1, 2], pvals))
expected_h = -sum(xlogy(pvals, pvals))
h = p.entropy()
assert_allclose(h, expected_h)
p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
h = p.entropy()
assert_equal(h, 0.0)
def test_pmf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x = [[1., 4.],
[3., 2]]
assert_allclose(rv.pmf(x),
[[0.5, 0.2],
[0., 0.3]], atol=1e-14)
def test_cdf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x_values = [-2, 1., 1.1, 1.5, 2.0, 3.0, 4, 5]
expected = [0, 0.5, 0.5, 0.5, 0.8, 0.8, 1, 1]
assert_allclose(rv.cdf(x_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.cdf(xx) for xx in x_values],
expected, atol=1e-14)
def test_ppf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
q_values = [0.1, 0.5, 0.6, 0.8, 0.9, 1.]
expected = [1, 1, 2, 2, 4, 4]
assert_allclose(rv.ppf(q_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.ppf(q) for q in q_values],
expected, atol=1e-14)
def test_cdf_ppf_next(self):
# copied and special cased from test_discrete_basic
vals = ([1, 2, 4, 7, 8], [0.1, 0.2, 0.3, 0.3, 0.1])
rv = stats.rv_discrete(values=vals)
assert_array_equal(rv.ppf(rv.cdf(rv.xk[:-1]) + 1e-8),
rv.xk[1:])
def test_expect(self):
xk = [1, 2, 4, 6, 7, 11]
pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1]
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_multidimension(self):
xk = np.arange(12).reshape((3, 4))
pk = np.array([[0.1, 0.1, 0.15, 0.05],
[0.1, 0.1, 0.05, 0.05],
[0.1, 0.1, 0.05, 0.05]])
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_bad_input(self):
xk = [1, 2, 3]
pk = [0.5, 0.5]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
pk = [1, 2, 3]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 2, 3]
pk = [0.5, 1.2, -0.7]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 2, 3, 4, 5]
pk = [0.3, 0.3, 0.3, 0.3, -0.2]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
def test_shape_rv_sample(self):
# tests added for gh-9565
# mismatch of 2d inputs
xk, pk = np.arange(4).reshape((2, 2)), np.full((2, 3), 1/6)
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
# same number of elements, but shapes not compatible
xk, pk = np.arange(6).reshape((3, 2)), np.full((2, 3), 1/6)
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
# same shapes => no error
xk, pk = np.arange(6).reshape((3, 2)), np.full((3, 2), 1/6)
assert_equal(stats.rv_discrete(values=(xk, pk)).pmf(0), 1/6)
class TestSkewNorm(object):
def setup_method(self):
self.rng = check_random_state(1234)
def test_normal(self):
# When the skewness is 0 the distribution is normal
x = np.linspace(-5, 5, 100)
assert_array_almost_equal(stats.skewnorm.pdf(x, a=0),
stats.norm.pdf(x))
def test_rvs(self):
shape = (3, 4, 5)
x = stats.skewnorm.rvs(a=0.75, size=shape, random_state=self.rng)
assert_equal(shape, x.shape)
x = stats.skewnorm.rvs(a=-3, size=shape, random_state=self.rng)
assert_equal(shape, x.shape)
def test_moments(self):
X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2,
random_state=self.rng)
expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
computed = stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk')
assert_array_almost_equal(computed, expected, decimal=2)
X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2,
random_state=self.rng)
expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
computed = stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk')
assert_array_almost_equal(computed, expected, decimal=2)
def test_cdf_large_x(self):
# Regression test for gh-7746.
# The x values are large enough that the closest 64 bit floating
# point representation of the exact CDF is 1.0.
p = stats.skewnorm.cdf([10, 20, 30], -1)
assert_allclose(p, np.ones(3), rtol=1e-14)
p = stats.skewnorm.cdf(25, 2.5)
assert_allclose(p, 1.0, rtol=1e-14)
def test_cdf_sf_small_values(self):
# Triples are [x, a, cdf(x, a)]. These values were computed
# using CDF[SkewNormDistribution[0, 1, a], x] in Wolfram Alpha.
cdfvals = [
[-8, 1, 3.870035046664392611e-31],
[-4, 2, 8.1298399188811398e-21],
[-2, 5, 1.55326826787106273e-26],
[-9, -1, 2.257176811907681295e-19],
[-10, -4, 1.523970604832105213e-23],
]
for x, a, cdfval in cdfvals:
p = stats.skewnorm.cdf(x, a)
assert_allclose(p, cdfval, rtol=1e-8)
# For the skew normal distribution, sf(-x, -a) = cdf(x, a).
p = stats.skewnorm.sf(-x, -a)
assert_allclose(p, cdfval, rtol=1e-8)
class TestExpon(object):
def test_zero(self):
assert_equal(stats.expon.pdf(0), 1)
def test_tail(self): # Regression test for ticket 807
assert_equal(stats.expon.cdf(1e-18), 1e-18)
assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.expon.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.expon.fit, x)
class TestNorm(object):
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.norm.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.norm.fit, x)
def test_bad_keyword_arg(self):
x = [1, 2, 3]
assert_raises(TypeError, stats.norm.fit, x, plate="shrimp")
class TestUniform(object):
"""gh-10300"""
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.uniform.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.uniform.fit, x)
class TestExponNorm(object):
def test_moments(self):
# Some moment test cases based on non-loc/scaled formula
def get_moms(lam, sig, mu):
# See wikipedia for these formulae
# where it is listed as an exponentially modified gaussian
opK2 = 1.0 + 1 / (lam*sig)**2
exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)
exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)
return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]
mu, sig, lam = 0, 1, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -3, 2, 0.1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = 0, 3, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -5, 11, 3.5
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.exponnorm.fit, x, floc=0, fscale=1)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.exponnorm.fit, x, floc=0, fscale=1)
def test_extremes_x(self):
# Test for extreme values against overflows
assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(1, 0.01), 0.0)
assert_almost_equal(stats.exponnorm.pdf(-900, 0.01), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 0.01), 0.0)
class TestGenExpon(object):
def test_pdf_unity_area(self):
from scipy.integrate import simps
# PDF should integrate to one
p = stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_almost_equal(simps(p, dx=0.01), 1, 1)
def test_cdf_bounds(self):
# CDF should always be positive
cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_(numpy.all((0 <= cdf) & (cdf <= 1)))
class TestExponpow(object):
def test_tail(self):
assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8),
5)
class TestSkellam(object):
def test_pmf(self):
# comparison to R
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skpmfR = numpy.array(
[4.2254582961926893e-005, 1.1404838449648488e-004,
2.8979625801752660e-004, 6.9177078182101231e-004,
1.5480716105844708e-003, 3.2412274963433889e-003,
6.3373707175123292e-003, 1.1552351566696643e-002,
1.9606152375042644e-002, 3.0947164083410337e-002,
4.5401737566767360e-002, 6.1894328166820688e-002,
7.8424609500170578e-002, 9.2418812533573133e-002,
1.0139793148019728e-001, 1.0371927988298846e-001,
9.9076583077406091e-002, 8.8546660073089561e-002,
7.4187842052486810e-002, 5.8392772862200251e-002,
4.3268692953013159e-002, 3.0248159818374226e-002,
1.9991434305603021e-002, 1.2516877303301180e-002,
7.4389876226229707e-003])
assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)
def test_cdf(self):
# comparison to R, only 5 decimals
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skcdfR = numpy.array(
[6.4061475386192104e-005, 1.7810985988267694e-004,
4.6790611790020336e-004, 1.1596768997212152e-003,
2.7077485103056847e-003, 5.9489760066490718e-003,
1.2286346724161398e-002, 2.3838698290858034e-002,
4.3444850665900668e-002, 7.4392014749310995e-002,
1.1979375231607835e-001, 1.8168808048289900e-001,
2.6011268998306952e-001, 3.5253150251664261e-001,
4.5392943399683988e-001, 5.5764871387982828e-001,
6.5672529695723436e-001, 7.4527195703032389e-001,
8.1945979908281064e-001, 8.7785257194501087e-001,
9.2112126489802404e-001, 9.5136942471639818e-001,
9.7136085902200120e-001, 9.8387773632530240e-001,
9.9131672394792536e-001])
assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)
class TestLognorm(object):
def test_pdf(self):
# Regression test for Ticket #1471: avoid nan with 0/0 situation
# Also make sure there are no warnings at x=0, cf gh-5202
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
pdf = stats.lognorm.pdf([0, 0.5, 1], 1)
assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])
def test_logcdf(self):
# Regression test for gh-5940: sf et al would underflow too early
x2, mu, sigma = 201.68, 195, 0.149
assert_allclose(stats.lognorm.sf(x2-mu, s=sigma),
stats.norm.sf(np.log(x2-mu)/sigma))
assert_allclose(stats.lognorm.logsf(x2-mu, s=sigma),
stats.norm.logsf(np.log(x2-mu)/sigma))
class TestBeta(object):
def test_logpdf(self):
# Regression test for Ticket #1326: avoid nan with 0*log(0) situation
logpdf = stats.beta.logpdf(0, 1, 0.5)
assert_almost_equal(logpdf, -0.69314718056)
logpdf = stats.beta.logpdf(0, 0.5, 1)
assert_almost_equal(logpdf, np.inf)
def test_logpdf_ticket_1866(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.beta(alpha, beta)
assert_allclose(b.logpdf(x).sum(), -1201.699061824062)
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_fit_bad_keyword_args(self):
x = [0.1, 0.5, 0.6]
assert_raises(TypeError, stats.beta.fit, x, floc=0, fscale=1,
plate="shrimp")
def test_fit_duplicated_fixed_parameter(self):
# At most one of 'f0', 'fa' or 'fix_a' can be given to the fit method.
# More than one raises a ValueError.
x = [0.1, 0.5, 0.6]
assert_raises(ValueError, stats.beta.fit, x, fa=0.5, fix_a=0.5)
class TestBetaPrime(object):
def test_logpdf(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.betaprime(alpha, beta)
assert_(np.isfinite(b.logpdf(x)).all())
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_cdf(self):
# regression test for gh-4030: Implementation of
# scipy.stats.betaprime.cdf()
x = stats.betaprime.cdf(0, 0.2, 0.3)
assert_equal(x, 0.0)
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
cdfs = stats.betaprime.cdf(x, alpha, beta)
assert_(np.isfinite(cdfs).all())
# check the new cdf implementation vs generic one:
gen_cdf = stats.rv_continuous._cdf_single
cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x]
assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12)
class TestGamma(object):
def test_pdf(self):
# a few test cases to compare with R
pdf = stats.gamma.pdf(90, 394, scale=1./5)
assert_almost_equal(pdf, 0.002312341)
pdf = stats.gamma.pdf(3, 10, scale=1./5)
assert_almost_equal(pdf, 0.1620358)
def test_logpdf(self):
# Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)
# situation
logpdf = stats.gamma.logpdf(0, 1)
assert_almost_equal(logpdf, 0)
def test_fit_bad_keyword_args(self):
x = [0.1, 0.5, 0.6]
assert_raises(TypeError, stats.gamma.fit, x, floc=0, plate="shrimp")
class TestChi2(object):
# regression tests after precision improvements, ticket:1041, not verified
def test_precision(self):
assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003,
decimal=14)
assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778,
decimal=14)
def test_ppf(self):
# Expected values computed with mpmath.
df = 4.8
x = stats.chi2.ppf(2e-47, df)
assert_allclose(x, 1.098472479575179840604902808e-19, rtol=1e-10)
x = stats.chi2.ppf(0.5, df)
assert_allclose(x, 4.15231407598589358660093156, rtol=1e-10)
df = 13
x = stats.chi2.ppf(2e-77, df)
assert_allclose(x, 1.0106330688195199050507943e-11, rtol=1e-10)
x = stats.chi2.ppf(0.1, df)
assert_allclose(x, 7.041504580095461859307179763, rtol=1e-10)
class TestGumbelL(object):
# gh-6228
def test_cdf_ppf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.cdf(x)
xx = stats.gumbel_l.ppf(y)
assert_allclose(x, xx)
def test_logcdf_logsf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.logcdf(x)
z = stats.gumbel_l.logsf(x)
u = np.exp(y)
v = -special.expm1(z)
assert_allclose(u, v)
def test_sf_isf(self):
x = np.linspace(-20, 5)
y = stats.gumbel_l.sf(x)
xx = stats.gumbel_l.isf(y)
assert_allclose(x, xx)
class TestLevyStable(object):
def test_fit(self):
# construct data to have percentiles that match
# example in McCulloch 1986.
x = [-.05413,-.05413,
0.,0.,0.,0.,
.00533,.00533,.00533,.00533,.00533,
.03354,.03354,.03354,.03354,.03354,
.05309,.05309,.05309,.05309,.05309]
alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x)
assert_allclose(alpha1, 1.48, rtol=0, atol=0.01)
assert_almost_equal(beta1, -.22, 2)
assert_almost_equal(scale1, 0.01717, 4)
assert_almost_equal(loc1, 0.00233, 2) # to 2 dps due to rounding error in McCulloch86
# cover alpha=2 scenario
x2 = x + [.05309,.05309,.05309,.05309,.05309]
alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(x2)
assert_equal(alpha2, 2)
assert_equal(beta2, -1)
assert_almost_equal(scale2, .02503, 4)
assert_almost_equal(loc2, .03354, 4)
@pytest.mark.slow
def test_pdf_nolan_samples(self):
""" Test pdf values against Nolan's stablec.exe output
see - http://fs2.american.edu/jpnolan/www/stable/stable.html
There's a known limitation of Nolan's executable for alpha < 0.2.
Repeat following with beta = -1, -.5, 0, .5 and 1
stablec.exe <<
1 # pdf
1 # Nolan S equivalent to S0 in scipy
.25,2,.25 # alpha
-1,-1,0 # beta
-10,10,1 # x
1,0 # gamma, delta
2 # output file
"""
data = np.load(os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/stable-pdf-sample-data.npy')))
data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta')
# support numpy 1.8.2 for travis
npisin = np.isin if hasattr(np, "isin") else np.in1d
tests = [
# best selects
['best', None, 8, None],
# quadrature is accurate for most alpha except 0.25; perhaps limitation of Nolan stablec?
# we reduce size of x to speed up computation as numerical integration slow.
['quadrature', None, 8, lambda r: (r['alpha'] > 0.25) & (npisin(r['x'], [-10,-5,0,5,10]))],
# zolatarev is accurate except at alpha==1, beta != 0
['zolotarev', None, 8, lambda r: r['alpha'] != 1],
['zolotarev', None, 8, lambda r: (r['alpha'] == 1) & (r['beta'] == 0)],
['zolotarev', None, 1, lambda r: (r['alpha'] == 1) & (r['beta'] != 0)],
# fft accuracy reduces as alpha decreases, fails at low values of alpha and x=0
['fft', 0, 4, lambda r: r['alpha'] > 1],
['fft', 0, 3, lambda r: (r['alpha'] < 1) & (r['alpha'] > 0.25)],
['fft', 0, 1, lambda r: (r['alpha'] == 0.25) & (r['x'] != 0)], # not useful here
]
for ix, (default_method, fft_min_points, decimal_places, filter_func) in enumerate(tests):
stats.levy_stable.pdf_default_method = default_method
stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points
subdata = data[filter_func(data)] if filter_func is not None else data
with suppress_warnings() as sup:
sup.record(RuntimeWarning, "Density calculation unstable for alpha=1 and beta!=0.*")
sup.record(RuntimeWarning, "Density calculations experimental for FFT method.*")
p = stats.levy_stable.pdf(subdata['x'], subdata['alpha'], subdata['beta'], scale=1, loc=0)
subdata2 = rec_append_fields(subdata, 'calc', p)
failures = subdata2[(np.abs(p-subdata['p']) >= 1.5*10.**(-decimal_places)) | np.isnan(p)]
assert_almost_equal(p, subdata['p'], decimal_places, "pdf test %s failed with method '%s'\n%s" % (ix, default_method, failures), verbose=False)
@pytest.mark.slow
def test_cdf_nolan_samples(self):
""" Test cdf values against Nolan's stablec.exe output
see - http://fs2.american.edu/jpnolan/www/stable/stable.html
There's a known limitation of Nolan's executable for alpha < 0.2.
Repeat following with beta = -1, -.5, 0, .5 and 1
stablec.exe <<
2 # cdf
1 # Nolan S equivalent to S0 in scipy
.25,2,.25 # alpha
-1,-1,0 # beta
-10,10,1 # x
1,0 # gamma, delta
2 # output file
"""
data = np.load(os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/stable-cdf-sample-data.npy')))
data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta')
tests = [
# zolatarev is accurate for all values
['zolotarev', None, 8, None],
# fft accuracy poor, very poor alpha < 1
['fft', 0, 2, lambda r: r['alpha'] > 1],
]
for ix, (default_method, fft_min_points, decimal_places, filter_func) in enumerate(tests):
stats.levy_stable.pdf_default_method = default_method
stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points
subdata = data[filter_func(data)] if filter_func is not None else data
with suppress_warnings() as sup:
sup.record(RuntimeWarning, 'FFT method is considered ' +
'experimental for cumulative distribution ' +
'function evaluations.*')
p = stats.levy_stable.cdf(subdata['x'], subdata['alpha'], subdata['beta'], scale=1, loc=0)
subdata2 = rec_append_fields(subdata, 'calc', p)
failures = subdata2[(np.abs(p-subdata['p']) >= 1.5*10.**(-decimal_places)) | np.isnan(p)]
assert_almost_equal(p, subdata['p'], decimal_places, "cdf test %s failed with method '%s'\n%s" % (ix, default_method, failures), verbose=False)
def test_pdf_alpha_equals_one_beta_non_zero(self):
""" sample points extracted from Tables and Graphs of Stable Probability
Density Functions - Donald R Holt - 1973 - p 187.
"""
xs = np.array([0, 0, 0, 0,
1, 1, 1, 1,
2, 2, 2, 2,
3, 3, 3, 3,
4, 4, 4, 4])
density = np.array([.3183, .3096, .2925, .2622,
.1591, .1587, .1599, .1635,
.0637, .0729, .0812, .0955,
.0318, .0390, .0458, .0586,
.0187, .0236, .0285, .0384])
betas = np.array([0, .25, .5, 1,
0, .25, .5, 1,
0, .25, .5, 1,
0, .25, .5, 1,
0, .25, .5, 1])
tests = [
['quadrature', None, 4],
#['fft', 0, 4],
['zolotarev', None, 1],
]
with np.errstate(all='ignore'), suppress_warnings() as sup:
sup.filter(category=RuntimeWarning, message="Density calculation unstable.*")
for default_method, fft_min_points, decimal_places in tests:
stats.levy_stable.pdf_default_method = default_method
stats.levy_stable.pdf_fft_min_points_threshold = fft_min_points
#stats.levy_stable.fft_grid_spacing = 0.0001
pdf = stats.levy_stable.pdf(xs, 1, betas, scale=1, loc=0)
assert_almost_equal(pdf, density, decimal_places, default_method)
def test_stats(self):
param_sets = [
[(1.48,-.22, 0, 1), (0,np.inf,np.NaN,np.NaN)],
[(2,.9, 10, 1.5), (10,4.5,0,0)]
]
for args, exp_stats in param_sets:
calc_stats = stats.levy_stable.stats(args[0], args[1], loc=args[2], scale=args[3], moments='mvsk')
assert_almost_equal(calc_stats, exp_stats)
class TestArrayArgument(object): # test for ticket:992
def setup_method(self):
np.random.seed(1234)
def test_noexception(self):
rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5),
size=(10, 5))
assert_equal(rvs.shape, (10, 5))
class TestDocstring(object):
def test_docstrings(self):
# See ticket #761
if stats.rayleigh.__doc__ is not None:
assert_("rayleigh" in stats.rayleigh.__doc__.lower())
if stats.bernoulli.__doc__ is not None:
assert_("bernoulli" in stats.bernoulli.__doc__.lower())
def test_no_name_arg(self):
# If name is not given, construction shouldn't fail. See #1508.
stats.rv_continuous()
stats.rv_discrete()
class TestEntropy(object):
def test_entropy_positive(self):
# See ticket #497
pk = [0.5, 0.2, 0.3]
qk = [0.1, 0.25, 0.65]
eself = stats.entropy(pk, pk)
edouble = stats.entropy(pk, qk)
assert_(0.0 == eself)
assert_(edouble >= 0.0)
def test_entropy_base(self):
pk = np.ones(16, float)
S = stats.entropy(pk, base=2.)
assert_(abs(S - 4.) < 1.e-5)
qk = np.ones(16, float)
qk[:8] = 2.
S = stats.entropy(pk, qk)
S2 = stats.entropy(pk, qk, base=2.)
assert_(abs(S/S2 - np.log(2.)) < 1.e-5)
def test_entropy_zero(self):
# Test for PR-479
assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278,
decimal=12)
def test_entropy_2d(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[0.1933259, 0.18609809])
def test_entropy_2d_zero(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[np.inf, 0.18609809])
pk[0][0] = 0.0
assert_array_almost_equal(stats.entropy(pk, qk),
[0.17403988, 0.18609809])
def test_entropy_base_2d_nondefault_axis(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
assert_array_almost_equal(stats.entropy(pk, axis=1),
[0.63651417, 0.63651417, 0.66156324])
def test_entropy_2d_nondefault_axis(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk, axis=1),
[0.231049, 0.231049, 0.127706])
def test_entropy_raises_value_error(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.1, 0.2], [0.6, 0.3]]
assert_raises(ValueError, stats.entropy, pk, qk)
def test_base_entropy_with_axis_0_is_equal_to_default(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
assert_array_almost_equal(stats.entropy(pk, axis=0),
stats.entropy(pk))
def test_entropy_with_axis_0_is_equal_to_default(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk, axis=0),
stats.entropy(pk, qk))
def test_base_entropy_transposed(self):
pk = np.array([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
assert_array_almost_equal(stats.entropy(pk.T).T,
stats.entropy(pk, axis=1))
def test_entropy_transposed(self):
pk = np.array([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
qk = np.array([[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]])
assert_array_almost_equal(stats.entropy(pk.T, qk.T).T,
stats.entropy(pk, qk, axis=1))
def TestArgsreduce():
a = array([1, 3, 2, 1, 2, 3, 3])
b, c = argsreduce(a > 1, a, 2)
assert_array_equal(b, [3, 2, 2, 3, 3])
assert_array_equal(c, [2, 2, 2, 2, 2])
b, c = argsreduce(2 > 1, a, 2)
assert_array_equal(b, a[0])
assert_array_equal(c, [2])
b, c = argsreduce(a > 0, a, 2)
assert_array_equal(b, a)
assert_array_equal(c, [2] * numpy.size(a))
class TestFitMethod(object):
skip = ['ncf', 'ksone', 'kstwo']
def setup_method(self):
np.random.seed(1234)
# skip these b/c deprecated, or only loc and scale arguments
fitSkipNonFinite = ['frechet_l', 'frechet_r', 'expon', 'norm', 'uniform', ]
@pytest.mark.parametrize('dist,args', distcont)
def test_fit_w_non_finite_data_values(self, dist, args):
"""gh-10300"""
if dist in self.fitSkipNonFinite:
pytest.skip("%s fit known to fail or deprecated" % dist)
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
y = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
distfunc = getattr(stats, dist)
assert_raises(RuntimeError, distfunc.fit, x, floc=0, fscale=1)
assert_raises(RuntimeError, distfunc.fit, y, floc=0, fscale=1)
def test_fix_fit_2args_lognorm(self):
# Regression test for #1551.
np.random.seed(12345)
with np.errstate(all='ignore'):
x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)
expected_shape = np.sqrt(((np.log(x) - np.log(20))**2).mean())
assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),
[expected_shape, 0, 20], atol=1e-8)
def test_fix_fit_norm(self):
x = np.arange(1, 6)
loc, scale = stats.norm.fit(x)
assert_almost_equal(loc, 3)
assert_almost_equal(scale, np.sqrt(2))
loc, scale = stats.norm.fit(x, floc=2)
assert_equal(loc, 2)
assert_equal(scale, np.sqrt(3))
loc, scale = stats.norm.fit(x, fscale=2)
assert_almost_equal(loc, 3)
assert_equal(scale, 2)
def test_fix_fit_gamma(self):
x = np.arange(1, 6)
meanlog = np.log(x).mean()
# A basic test of gamma.fit with floc=0.
floc = 0
a, loc, scale = stats.gamma.fit(x, floc=floc)
s = np.log(x.mean()) - meanlog
assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# Regression tests for gh-2514.
# The problem was that if `floc=0` was given, any other fixed
# parameters were ignored.
f0 = 1
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
f0 = 2
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# loc and scale fixed.
floc = 0
fscale = 2
a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)
assert_equal(loc, floc)
assert_equal(scale, fscale)
c = meanlog - np.log(fscale)
assert_almost_equal(special.digamma(a), c)
def test_fix_fit_beta(self):
# Test beta.fit when both floc and fscale are given.
def mlefunc(a, b, x):
# Zeros of this function are critical points of
# the maximum likelihood function.
n = len(x)
s1 = np.log(x).sum()
s2 = np.log(1-x).sum()
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
# Basic test with floc and fscale given.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)
assert_equal(loc, 0)
assert_equal(scale, 1)
assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6)
# Basic test with f0, floc and fscale given.
# This is also a regression test for gh-2514.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)
assert_equal(a, 2)
assert_equal(loc, 0)
assert_equal(scale, 1)
da, db = mlefunc(a, b, x)
assert_allclose(db, 0, atol=1e-5)
# Same floc and fscale values as above, but reverse the data
# and fix b (f1).
x2 = 1 - x
a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)
assert_equal(b2, 2)
assert_equal(loc2, 0)
assert_equal(scale2, 1)
da, db = mlefunc(a2, b2, x2)
assert_allclose(da, 0, atol=1e-5)
# a2 of this test should equal b from above.
assert_almost_equal(a2, b)
# Check for detection of data out of bounds when floc and fscale
# are given.
assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)
y = np.array([0, .5, 1])
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)
# Check that attempting to fix all the parameters raises a ValueError.
assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,
floc=2, fscale=3)
def test_expon_fit(self):
x = np.array([2, 2, 4, 4, 4, 4, 4, 8])
loc, scale = stats.expon.fit(x)
assert_equal(loc, 2) # x.min()
assert_equal(scale, 2) # x.mean() - x.min()
loc, scale = stats.expon.fit(x, fscale=3)
assert_equal(loc, 2) # x.min()
assert_equal(scale, 3) # fscale
loc, scale = stats.expon.fit(x, floc=0)
assert_equal(loc, 0) # floc
assert_equal(scale, 4) # x.mean() - loc
def test_lognorm_fit(self):
x = np.array([1.5, 3, 10, 15, 23, 59])
lnxm1 = np.log(x - 1)
shape, loc, scale = stats.lognorm.fit(x, floc=1)
assert_allclose(shape, lnxm1.std(), rtol=1e-12)
assert_equal(loc, 1)
assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
shape, loc, scale = stats.lognorm.fit(x, floc=1, fscale=6)
assert_allclose(shape, np.sqrt(((lnxm1 - np.log(6))**2).mean()),
rtol=1e-12)
assert_equal(loc, 1)
assert_equal(scale, 6)
shape, loc, scale = stats.lognorm.fit(x, floc=1, fix_s=0.75)
assert_equal(shape, 0.75)
assert_equal(loc, 1)
assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
def test_uniform_fit(self):
x = np.array([1.0, 1.1, 1.2, 9.0])
loc, scale = stats.uniform.fit(x)
assert_equal(loc, x.min())
assert_equal(scale, x.ptp())
loc, scale = stats.uniform.fit(x, floc=0)
assert_equal(loc, 0)
assert_equal(scale, x.max())
loc, scale = stats.uniform.fit(x, fscale=10)
assert_equal(loc, 0)
assert_equal(scale, 10)
assert_raises(ValueError, stats.uniform.fit, x, floc=2.0)
assert_raises(ValueError, stats.uniform.fit, x, fscale=5.0)
def test_fshapes(self):
# take a beta distribution, with shapes='a, b', and make sure that
# fa is equivalent to f0, and fb is equivalent to f1
a, b = 3., 4.
x = stats.beta.rvs(a, b, size=100, random_state=1234)
res_1 = stats.beta.fit(x, f0=3.)
res_2 = stats.beta.fit(x, fa=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_2 = stats.beta.fit(x, fix_a=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_3 = stats.beta.fit(x, f1=4.)
res_4 = stats.beta.fit(x, fb=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
res_4 = stats.beta.fit(x, fix_b=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
# cannot specify both positional and named args at the same time
assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2)
# check that attempting to fix all parameters raises a ValueError
assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1,
floc=2, fscale=3)
# check that specifying floc, fscale and fshapes works for
# beta and gamma which override the generic fit method
res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1)
aa, bb, ll, ss = res_5
assert_equal([aa, ll, ss], [3., 0, 1])
# gamma distribution
a = 3.
data = stats.gamma.rvs(a, size=100)
aa, ll, ss = stats.gamma.fit(data, fa=a)
assert_equal(aa, a)
def test_extra_params(self):
# unknown parameters should raise rather than be silently ignored
dist = stats.exponnorm
data = dist.rvs(K=2, size=100)
dct = dict(enikibeniki=-101)
assert_raises(TypeError, dist.fit, data, **dct)
class TestFrozen(object):
def setup_method(self):
np.random.seed(1234)
# Test that a frozen distribution gives the same results as the original
# object.
#
# Only tested for the normal distribution (with loc and scale specified)
# and for the gamma distribution (with a shape parameter specified).
def test_norm(self):
dist = stats.norm
frozen = stats.norm(loc=10.0, scale=3.0)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, loc=10.0, scale=3.0)
assert_equal(result_f, result)
assert_equal(frozen.a, dist.a)
assert_equal(frozen.b, dist.b)
def test_gamma(self):
a = 2.0
dist = stats.gamma
frozen = stats.gamma(a)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, a)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(a)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(a)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(a)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(a)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(a)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, a)
assert_equal(result_f, result)
assert_equal(frozen.a, frozen.dist.a)
assert_equal(frozen.b, frozen.dist.b)
def test_regression_ticket_1293(self):
# Create a frozen distribution.
frozen = stats.lognorm(1)
# Call one of its methods that does not take any keyword arguments.
m1 = frozen.moment(2)
# Now call a method that takes a keyword argument.
frozen.stats(moments='mvsk')
# Call moment(2) again.
# After calling stats(), the following was raising an exception.
# So this test passes if the following does not raise an exception.
m2 = frozen.moment(2)
# The following should also be true, of course. But it is not
# the focus of this test.
assert_equal(m1, m2)
def test_ab(self):
# test that the support of a frozen distribution
# (i) remains frozen even if it changes for the original one
# (ii) is actually correct if the shape parameters are such that
# the values of [a, b] are not the default [0, inf]
# take a genpareto as an example where the support
# depends on the value of the shape parameter:
# for c > 0: a, b = 0, inf
# for c < 0: a, b = 0, -1/c
c = -0.1
rv = stats.genpareto(c=c)
a, b = rv.dist._get_support(c)
assert_equal([a, b], [0., 10.])
c = 0.1
stats.genpareto.pdf(0, c=c)
assert_equal(rv.dist._get_support(c), [0, np.inf])
c = -0.1
rv = stats.genpareto(c=c)
a, b = rv.dist._get_support(c)
assert_equal([a, b], [0., 10.])
c = 0.1
stats.genpareto.pdf(0, c) # this should NOT change genpareto.b
assert_equal((rv.dist.a, rv.dist.b), stats.genpareto._get_support(c))
rv1 = stats.genpareto(c=0.1)
assert_(rv1.dist is not rv.dist)
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
rv = stats.genpareto(c=c)
a, b = rv.a, rv.b
assert_equal(a, 0.)
assert_(np.isposinf(b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
a, b = stats.genpareto._get_support(c)
assert_allclose([a, b], [0., 0.5])
def test_rv_frozen_in_namespace(self):
# Regression test for gh-3522
assert_(hasattr(stats.distributions, 'rv_frozen'))
def test_random_state(self):
# only check that the random_state attribute exists,
frozen = stats.norm()
assert_(hasattr(frozen, 'random_state'))
# ... that it can be set,
frozen.random_state = 42
assert_equal(frozen.random_state.get_state(),
np.random.RandomState(42).get_state())
# ... and that .rvs method accepts it as an argument
rndm = np.random.RandomState(1234)
frozen.rvs(size=8, random_state=rndm)
def test_pickling(self):
# test that a frozen instance pickles and unpickles
# (this method is a clone of common_tests.check_pickling)
beta = stats.beta(2.3098496451481823, 0.62687954300963677)
poiss = stats.poisson(3.)
sample = stats.rv_discrete(values=([0, 1, 2, 3],
[0.1, 0.2, 0.3, 0.4]))
for distfn in [beta, poiss, sample]:
distfn.random_state = 1234
distfn.rvs(size=8)
s = pickle.dumps(distfn)
r0 = distfn.rvs(size=8)
unpickled = pickle.loads(s)
r1 = unpickled.rvs(size=8)
assert_equal(r0, r1)
# also smoke test some methods
medians = [distfn.ppf(0.5), unpickled.ppf(0.5)]
assert_equal(medians[0], medians[1])
assert_equal(distfn.cdf(medians[0]),
unpickled.cdf(medians[1]))
def test_expect(self):
# smoke test the expect method of the frozen distribution
# only take a gamma w/loc and scale and poisson with loc specified
def func(x):
return x
gm = stats.gamma(a=2, loc=3, scale=4)
gm_val = gm.expect(func, lb=1, ub=2, conditional=True)
gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4,
lb=1, ub=2, conditional=True)
assert_allclose(gm_val, gamma_val)
p = stats.poisson(3, loc=4)
p_val = p.expect(func)
poisson_val = stats.poisson.expect(func, args=(3,), loc=4)
assert_allclose(p_val, poisson_val)
class TestExpect(object):
# Test for expect method.
#
# Uses normal distribution and beta distribution for finite bounds, and
# hypergeom for discrete distribution with finite support
def test_norm(self):
v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)
assert_almost_equal(v, 4, decimal=14)
m = stats.norm.expect(lambda x: (x), loc=5, scale=2)
assert_almost_equal(m, 5, decimal=14)
lb = stats.norm.ppf(0.05, loc=5, scale=2)
ub = stats.norm.ppf(0.95, loc=5, scale=2)
prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)
assert_almost_equal(prob90, 0.9, decimal=14)
prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,
conditional=True)
assert_almost_equal(prob90c, 1., decimal=14)
def test_beta(self):
# case with finite support interval
v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5),
loc=5, scale=2)
assert_almost_equal(v, 1./18., decimal=13)
m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.)
assert_almost_equal(m, 19/3., decimal=13)
ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)
lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)
prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5.,
scale=2., lb=lb, ub=ub, conditional=False)
assert_almost_equal(prob90, 0.9, decimal=13)
prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5,
scale=2, lb=lb, ub=ub, conditional=True)
assert_almost_equal(prob90c, 1., decimal=13)
def test_hypergeom(self):
# test case with finite bounds
# without specifying bounds
m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)
m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)
assert_almost_equal(m, m_true, decimal=13)
v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
loc=5.)
assert_almost_equal(v, v_true, decimal=14)
# with bounds, bounds equal to shifted support
v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2,
args=(20, 10, 8),
loc=5., lb=5, ub=13)
assert_almost_equal(v_bounds, v_true, decimal=14)
# drop boundary points
prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()
prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
loc=5., lb=6, ub=12)
assert_almost_equal(prob_bounds, prob_true, decimal=13)
# conditional
prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,
lb=6, ub=12, conditional=True)
assert_almost_equal(prob_bc, 1, decimal=14)
# check simple integral
prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
lb=0, ub=8)
assert_almost_equal(prob_b, 1, decimal=13)
def test_poisson(self):
# poisson, use lower bound only
prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,
conditional=False)
prob_b_true = 1-stats.poisson.cdf(2, 2)
assert_almost_equal(prob_bounds, prob_b_true, decimal=14)
prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,
conditional=True)
assert_almost_equal(prob_lb, 1, decimal=14)
def test_genhalflogistic(self):
# genhalflogistic, changes upper bound of support in _argcheck
# regression test for gh-2622
halflog = stats.genhalflogistic
# check consistency when calling expect twice with the same input
res1 = halflog.expect(args=(1.5,))
halflog.expect(args=(0.5,))
res2 = halflog.expect(args=(1.5,))
assert_almost_equal(res1, res2, decimal=14)
def test_rice_overflow(self):
# rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows
# check that using i0e fixes it
assert_(np.isfinite(stats.rice.pdf(999, 0.74)))
assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,))))
def test_logser(self):
# test a discrete distribution with infinite support and loc
p, loc = 0.3, 3
res_0 = stats.logser.expect(lambda k: k, args=(p,))
# check against the correct answer (sum of a geom series)
assert_allclose(res_0,
p / (p - 1.) / np.log(1. - p), atol=1e-15)
# now check it with `loc`
res_l = stats.logser.expect(lambda k: k, args=(p,), loc=loc)
assert_allclose(res_l, res_0 + loc, atol=1e-15)
def test_skellam(self):
# Use a discrete distribution w/ bi-infinite support. Compute two first
# moments and compare to known values (cf skellam.stats)
p1, p2 = 18, 22
m1 = stats.skellam.expect(lambda x: x, args=(p1, p2))
m2 = stats.skellam.expect(lambda x: x**2, args=(p1, p2))
assert_allclose(m1, p1 - p2, atol=1e-12)
assert_allclose(m2 - m1**2, p1 + p2, atol=1e-12)
def test_randint(self):
# Use a discrete distribution w/ parameter-dependent support, which
# is larger than the default chunksize
lo, hi = 0, 113
res = stats.randint.expect(lambda x: x, (lo, hi))
assert_allclose(res,
sum(_ for _ in range(lo, hi)) / (hi - lo), atol=1e-15)
def test_zipf(self):
# Test that there is no infinite loop even if the sum diverges
assert_warns(RuntimeWarning, stats.zipf.expect,
lambda x: x**2, (2,))
def test_discrete_kwds(self):
# check that discrete expect accepts keywords to control the summation
n0 = stats.poisson.expect(lambda x: 1, args=(2,))
n1 = stats.poisson.expect(lambda x: 1, args=(2,),
maxcount=1001, chunksize=32, tolerance=1e-8)
assert_almost_equal(n0, n1, decimal=14)
def test_moment(self):
# test the .moment() method: compute a higher moment and compare to
# a known value
def poiss_moment5(mu):
return mu**5 + 10*mu**4 + 25*mu**3 + 15*mu**2 + mu
for mu in [5, 7]:
m5 = stats.poisson.moment(5, mu)
assert_allclose(m5, poiss_moment5(mu), rtol=1e-10)
class TestNct(object):
def test_nc_parameter(self):
# Parameter values c<=0 were not enabled (gh-2402).
# For negative values c and for c=0 results of rv.cdf(0) below were nan
rv = stats.nct(5, 0)
assert_equal(rv.cdf(0), 0.5)
rv = stats.nct(5, -1)
assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)
def test_broadcasting(self):
res = stats.nct.pdf(5, np.arange(4, 7)[:, None],
np.linspace(0.1, 1, 4))
expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],
[0.00217142, 0.00395366, 0.00683888, 0.01126276],
[0.00153078, 0.00291093, 0.00525206, 0.00900815]])
assert_allclose(res, expected, rtol=1e-5)
def test_variance_gh_issue_2401(self):
# Computation of the variance of a non-central t-distribution resulted
# in a TypeError: ufunc 'isinf' not supported for the input types,
# and the inputs could not be safely coerced to any supported types
# according to the casting rule 'safe'
rv = stats.nct(4, 0)
assert_equal(rv.var(), 2.0)
def test_nct_inf_moments(self):
# n-th moment of nct only exists for df > n
m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk')
assert_(np.isfinite(m))
assert_equal([v, s, k], [np.inf, np.nan, np.nan])
m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk')
assert_(np.isfinite([m, v, s]).all())
assert_equal(k, np.nan)
class TestRice(object):
def test_rice_zero_b(self):
# rice distribution should work with b=0, cf gh-2164
x = [0.2, 1., 5.]
assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all())
q = [0.1, 0.1, 0.5, 0.9]
assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all())
mvsk = stats.rice.stats(0, moments='mvsk')
assert_(np.isfinite(mvsk).all())
# furthermore, pdf is continuous as b\to 0
# rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2)
# see e.g. Abramovich & Stegun 9.6.7 & 9.6.10
b = 1e-8
assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b),
atol=b, rtol=0)
def test_rice_rvs(self):
rvs = stats.rice.rvs
assert_equal(rvs(b=3.).size, 1)
assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5))
class TestErlang(object):
def setup_method(self):
np.random.seed(1234)
def test_erlang_runtimewarning(self):
# erlang should generate a RuntimeWarning if a non-integer
# shape parameter is used.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
# The non-integer shape parameter 1.3 should trigger a
# RuntimeWarning
assert_raises(RuntimeWarning,
stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)
# Calling the fit method with `f0` set to an integer should
# *not* trigger a RuntimeWarning. It should return the same
# values as gamma.fit(...).
data = [0.5, 1.0, 2.0, 4.0]
result_erlang = stats.erlang.fit(data, f0=1)
result_gamma = stats.gamma.fit(data, f0=1)
assert_allclose(result_erlang, result_gamma, rtol=1e-3)
def test_gh_pr_10949_argcheck(self):
assert_equal(stats.erlang.pdf(0.5, a=[1, -1]), stats.gamma.pdf(0.5, a=[1, -1]))
class TestRayleigh(object):
# gh-6227
def test_logpdf(self):
y = stats.rayleigh.logpdf(50)
assert_allclose(y, -1246.0879769945718)
def test_logsf(self):
y = stats.rayleigh.logsf(50)
assert_allclose(y, -1250)
class TestExponWeib(object):
def test_pdf_logpdf(self):
# Regression test for gh-3508.
x = 0.1
a = 1.0
c = 100.0
p = stats.exponweib.pdf(x, a, c)
logp = stats.exponweib.logpdf(x, a, c)
# Expected values were computed with mpmath.
assert_allclose([p, logp],
[1.0000000000000054e-97, -223.35075402042244])
def test_a_is_1(self):
# For issue gh-3508.
# Check that when a=1, the pdf and logpdf methods of exponweib are the
# same as those of weibull_min.
x = np.logspace(-4, -1, 4)
a = 1
c = 100
p = stats.exponweib.pdf(x, a, c)
expected = stats.weibull_min.pdf(x, c)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.weibull_min.logpdf(x, c)
assert_allclose(logp, expected)
def test_a_is_1_c_is_1(self):
# When a = 1 and c = 1, the distribution is exponential.
x = np.logspace(-8, 1, 10)
a = 1
c = 1
p = stats.exponweib.pdf(x, a, c)
expected = stats.expon.pdf(x)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.expon.logpdf(x)
assert_allclose(logp, expected)
class TestWeibull(object):
def test_logpdf(self):
# gh-6217
y = stats.weibull_min.logpdf(0, 1)
assert_equal(y, 0)
def test_with_maxima_distrib(self):
# Tests for weibull_min and weibull_max.
# The expected values were computed using the symbolic algebra
# program 'maxima' with the package 'distrib', which has
# 'pdf_weibull' and 'cdf_weibull'. The mapping between the
# scipy and maxima functions is as follows:
# -----------------------------------------------------------------
# scipy maxima
# --------------------------------- ------------------------------
# weibull_min.pdf(x, a, scale=b) pdf_weibull(x, a, b)
# weibull_min.logpdf(x, a, scale=b) log(pdf_weibull(x, a, b))
# weibull_min.cdf(x, a, scale=b) cdf_weibull(x, a, b)
# weibull_min.logcdf(x, a, scale=b) log(cdf_weibull(x, a, b))
# weibull_min.sf(x, a, scale=b) 1 - cdf_weibull(x, a, b)
# weibull_min.logsf(x, a, scale=b) log(1 - cdf_weibull(x, a, b))
#
# weibull_max.pdf(x, a, scale=b) pdf_weibull(-x, a, b)
# weibull_max.logpdf(x, a, scale=b) log(pdf_weibull(-x, a, b))
# weibull_max.cdf(x, a, scale=b) 1 - cdf_weibull(-x, a, b)
# weibull_max.logcdf(x, a, scale=b) log(1 - cdf_weibull(-x, a, b))
# weibull_max.sf(x, a, scale=b) cdf_weibull(-x, a, b)
# weibull_max.logsf(x, a, scale=b) log(cdf_weibull(-x, a, b))
# -----------------------------------------------------------------
x = 1.5
a = 2.0
b = 3.0
# weibull_min
p = stats.weibull_min.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_min.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_min.cdf(x, a, scale=b)
assert_allclose(c, -special.expm1(-0.25))
lc = stats.weibull_min.logcdf(x, a, scale=b)
assert_allclose(lc, np.log(-special.expm1(-0.25)))
s = stats.weibull_min.sf(x, a, scale=b)
assert_allclose(s, np.exp(-0.25))
ls = stats.weibull_min.logsf(x, a, scale=b)
assert_allclose(ls, -0.25)
# Also test using a large value x, for which computing the survival
# function using the CDF would result in 0.
s = stats.weibull_min.sf(30, 2, scale=3)
assert_allclose(s, np.exp(-100))
ls = stats.weibull_min.logsf(30, 2, scale=3)
assert_allclose(ls, -100)
# weibull_max
x = -1.5
p = stats.weibull_max.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_max.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_max.cdf(x, a, scale=b)
assert_allclose(c, np.exp(-0.25))
lc = stats.weibull_max.logcdf(x, a, scale=b)
assert_allclose(lc, -0.25)
s = stats.weibull_max.sf(x, a, scale=b)
assert_allclose(s, -special.expm1(-0.25))
ls = stats.weibull_max.logsf(x, a, scale=b)
assert_allclose(ls, np.log(-special.expm1(-0.25)))
# Also test using a value of x close to 0, for which computing the
# survival function using the CDF would result in 0.
s = stats.weibull_max.sf(-1e-9, 2, scale=3)
assert_allclose(s, -special.expm1(-1/9000000000000000000))
ls = stats.weibull_max.logsf(-1e-9, 2, scale=3)
assert_allclose(ls, np.log(-special.expm1(-1/9000000000000000000)))
class TestRdist(object):
def test_rdist_cdf_gh1285(self):
# check workaround in rdist._cdf for issue gh-1285.
distfn = stats.rdist
values = [0.001, 0.5, 0.999]
assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0),
values, decimal=5)
def test_rdist_beta(self):
# rdist is a special case of stats.beta
x = np.linspace(-0.99, 0.99, 10)
c = 2.7
assert_almost_equal(0.5*stats.beta(c/2, c/2).pdf((x + 1)/2),
stats.rdist(c).pdf(x))
class TestTrapz(object):
def test_reduces_to_triang(self):
modes = [0, 0.3, 0.5, 1]
for mode in modes:
x = [0, mode, 1]
assert_almost_equal(stats.trapz.pdf(x, mode, mode),
stats.triang.pdf(x, mode))
assert_almost_equal(stats.trapz.cdf(x, mode, mode),
stats.triang.cdf(x, mode))
def test_reduces_to_uniform(self):
x = np.linspace(0, 1, 10)
assert_almost_equal(stats.trapz.pdf(x, 0, 1), stats.uniform.pdf(x))
assert_almost_equal(stats.trapz.cdf(x, 0, 1), stats.uniform.cdf(x))
def test_cases(self):
# edge cases
assert_almost_equal(stats.trapz.pdf(0, 0, 0), 2)
assert_almost_equal(stats.trapz.pdf(1, 1, 1), 2)
assert_almost_equal(stats.trapz.pdf(0.5, 0, 0.8),
1.11111111111111111)
assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 1.0),
1.11111111111111111)
# straightforward case
assert_almost_equal(stats.trapz.pdf(0.1, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 0.8), 1.25)
assert_almost_equal(stats.trapz.pdf(0.9, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapz.cdf(0.1, 0.2, 0.8), 0.03125)
assert_almost_equal(stats.trapz.cdf(0.2, 0.2, 0.8), 0.125)
assert_almost_equal(stats.trapz.cdf(0.5, 0.2, 0.8), 0.5)
assert_almost_equal(stats.trapz.cdf(0.9, 0.2, 0.8), 0.96875)
assert_almost_equal(stats.trapz.cdf(1.0, 0.2, 0.8), 1.0)
def test_trapz_vect(self):
# test that array-valued shapes and arguments are handled
c = np.array([0.1, 0.2, 0.3])
d = np.array([0.5, 0.6])[:, None]
x = np.array([0.15, 0.25, 0.9])
v = stats.trapz.pdf(x, c, d)
cc, dd, xx = np.broadcast_arrays(c, d, x)
res = np.empty(xx.size, dtype=xx.dtype)
ind = np.arange(xx.size)
for i, x1, c1, d1 in zip(ind, xx.ravel(), cc.ravel(), dd.ravel()):
res[i] = stats.trapz.pdf(x1, c1, d1)
assert_allclose(v, res.reshape(v.shape), atol=1e-15)
class TestTriang(object):
def test_edge_cases(self):
with np.errstate(all='raise'):
assert_equal(stats.triang.pdf(0, 0), 2.)
assert_equal(stats.triang.pdf(0.5, 0), 1.)
assert_equal(stats.triang.pdf(1, 0), 0.)
assert_equal(stats.triang.pdf(0, 1), 0)
assert_equal(stats.triang.pdf(0.5, 1), 1.)
assert_equal(stats.triang.pdf(1, 1), 2)
assert_equal(stats.triang.cdf(0., 0.), 0.)
assert_equal(stats.triang.cdf(0.5, 0.), 0.75)
assert_equal(stats.triang.cdf(1.0, 0.), 1.0)
assert_equal(stats.triang.cdf(0., 1.), 0.)
assert_equal(stats.triang.cdf(0.5, 1.), 0.25)
assert_equal(stats.triang.cdf(1., 1.), 1)
class TestMielke(object):
def test_moments(self):
k, s = 4.642, 0.597
# n-th moment exists only if n < s
assert_equal(stats.mielke(k, s).moment(1), np.inf)
assert_equal(stats.mielke(k, 1.0).moment(1), np.inf)
assert_(np.isfinite(stats.mielke(k, 1.01).moment(1)))
def test_burr_equivalence(self):
x = np.linspace(0.01, 100, 50)
k, s = 2.45, 5.32
assert_allclose(stats.burr.pdf(x, s, k/s), stats.mielke.pdf(x, k, s))
class TestBurr(object):
def test_endpoints_7491(self):
# gh-7491
# Compute the pdf at the left endpoint dst.a.
data = [
[stats.fisk, (1,), 1],
[stats.burr, (0.5, 2), 1],
[stats.burr, (1, 1), 1],
[stats.burr, (2, 0.5), 1],
[stats.burr12, (1, 0.5), 0.5],
[stats.burr12, (1, 1), 1.0],
[stats.burr12, (1, 2), 2.0]]
ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]
correct = [_correct_ for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
ans = [_f.logpdf(_f.a, *_args) for _f, _args, _ in data]
correct = [np.log(_correct_) for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
def test_burr_stats_9544(self):
# gh-9544. Test from gh-9978
c, d = 5.0, 3
mean, variance = stats.burr(c, d).stats()
# mean = sc.beta(3 + 1/5, 1. - 1/5) * 3 = 1.4110263...
# var = sc.beta(3 + 2 / 5, 1. - 2 / 5) * 3 - (sc.beta(3 + 1 / 5, 1. - 1 / 5) * 3) ** 2
mean_hc, variance_hc = 1.4110263183925857, 0.22879948026191643
assert_allclose(mean, mean_hc)
assert_allclose(variance, variance_hc)
def test_burr_nan_mean_var_9544(self):
# gh-9544. Test from gh-9978
c, d = 0.5, 3
mean, variance = stats.burr(c, d).stats()
assert_(np.isnan(mean))
assert_(np.isnan(variance))
c, d = 1.5, 3
mean, variance = stats.burr(c, d).stats()
assert_(np.isfinite(mean))
assert_(np.isnan(variance))
c, d = 0.5, 3
e1, e2, e3, e4 = stats.burr._munp(np.array([1, 2, 3, 4]), c, d)
assert_(np.isnan(e1))
assert_(np.isnan(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 1.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isnan(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 2.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 3.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isfinite(e3))
assert_(np.isnan(e4))
c, d = 4.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isfinite(e3))
assert_(np.isfinite(e4))
def test_540_567():
# test for nan returned in tickets 540, 567
assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,
scale=0.204423758009),
0.98353464004309321,
decimal=10, err_msg='test_540_567')
def test_regression_ticket_1316():
# The following was raising an exception, because _construct_default_doc()
# did not handle the default keyword extradoc=None. See ticket #1316.
stats._continuous_distns.gamma_gen(name='gamma')
def test_regression_ticket_1326():
# adjust to avoid nan with 0*log(0)
assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)
def test_regression_tukey_lambda():
# Make sure that Tukey-Lambda distribution correctly handles
# non-positive lambdas.
x = np.linspace(-5.0, 5.0, 101)
olderr = np.seterr(divide='ignore')
try:
for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:
p = stats.tukeylambda.pdf(x, lam)
assert_((p != 0.0).all())
assert_(~np.isnan(p).all())
lam = np.array([[-1.0], [0.0], [2.0]])
p = stats.tukeylambda.pdf(x, lam)
finally:
np.seterr(**olderr)
assert_(~np.isnan(p).all())
assert_((p[0] != 0.0).all())
assert_((p[1] != 0.0).all())
assert_((p[2] != 0.0).any())
assert_((p[2] == 0.0).any())
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
def test_regression_ticket_1421():
assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)
assert_('pmf(x,' in stats.poisson.__doc__)
def test_nan_arguments_gh_issue_1362():
with np.errstate(invalid='ignore'):
assert_(np.isnan(stats.t.logcdf(1, np.nan)))
assert_(np.isnan(stats.t.cdf(1, np.nan)))
assert_(np.isnan(stats.t.logsf(1, np.nan)))
assert_(np.isnan(stats.t.sf(1, np.nan)))
assert_(np.isnan(stats.t.pdf(1, np.nan)))
assert_(np.isnan(stats.t.logpdf(1, np.nan)))
assert_(np.isnan(stats.t.ppf(1, np.nan)))
assert_(np.isnan(stats.t.isf(1, np.nan)))
assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))
def test_frozen_fit_ticket_1536():
np.random.seed(5678)
true = np.array([0.25, 0., 0.5])
x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)
olderr = np.seterr(divide='ignore')
try:
params = np.array(stats.lognorm.fit(x, floc=0.))
finally:
np.seterr(**olderr)
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))
assert_almost_equal(params, true, decimal=2)
np.random.seed(5678)
loc = 1
floc = 0.9
x = stats.norm.rvs(loc, 2., size=100)
params = np.array(stats.norm.fit(x, floc=floc))
expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])
assert_almost_equal(params, expected, decimal=4)
def test_regression_ticket_1530():
# Check the starting value works for Cauchy distribution fit.
np.random.seed(654321)
rvs = stats.cauchy.rvs(size=100)
params = stats.cauchy.fit(rvs)
expected = (0.045, 1.142)
assert_almost_equal(params, expected, decimal=1)
def test_gh_pr_4806():
# Check starting values for Cauchy distribution fit.
np.random.seed(1234)
x = np.random.randn(42)
for offset in 10000.0, 1222333444.0:
loc, scale = stats.cauchy.fit(x + offset)
assert_allclose(loc, offset, atol=1.0)
assert_allclose(scale, 0.6, atol=1.0)
def test_tukeylambda_stats_ticket_1545():
# Some test for the variance and kurtosis of the Tukey Lambda distr.
# See test_tukeylamdba_stats.py for more tests.
mv = stats.tukeylambda.stats(0, moments='mvsk')
# Known exact values:
expected = [0, np.pi**2/3, 0, 1.2]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(3.13, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(0.14, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]
assert_almost_equal(mv, expected, decimal=10)
def test_poisson_logpmf_ticket_1436():
assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))
def test_powerlaw_stats():
"""Test the powerlaw stats function.
This unit test is also a regression test for ticket 1548.
The exact values are:
mean:
mu = a / (a + 1)
variance:
sigma**2 = a / ((a + 2) * (a + 1) ** 2)
skewness:
One formula (see https://en.wikipedia.org/wiki/Skewness) is
gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3
A short calculation shows that E[X**k] is a / (a + k), so gamma_1
can be implemented as
n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3
d = sqrt(a/((a+2)*(a+1)**2)) ** 3
gamma_1 = n/d
Either by simplifying, or by a direct calculation of mu_3 / sigma**3,
one gets the more concise formula:
gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)
kurtosis: (See https://en.wikipedia.org/wiki/Kurtosis)
The excess kurtosis is
gamma_2 = mu_4 / sigma**4 - 3
A bit of calculus and algebra (sympy helps) shows that
mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))
so
gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3
which can be rearranged to
gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))
"""
cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),
(2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]
for a, exact_mvsk in cases:
mvsk = stats.powerlaw.stats(a, moments="mvsk")
assert_array_almost_equal(mvsk, exact_mvsk)
def test_powerlaw_edge():
# Regression test for gh-3986.
p = stats.powerlaw.logpdf(0, 1)
assert_equal(p, 0.0)
def test_exponpow_edge():
# Regression test for gh-3982.
p = stats.exponpow.logpdf(0, 1)
assert_equal(p, 0.0)
# Check pdf and logpdf at x = 0 for other values of b.
p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 1.0, 0.0])
p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 0.0, -np.inf])
def test_gengamma_edge():
# Regression test for gh-3985.
p = stats.gengamma.pdf(0, 1, 1)
assert_equal(p, 1.0)
# Regression tests for gh-4724.
p = stats.gengamma._munp(-2, 200, 1.)
assert_almost_equal(p, 1./199/198)
p = stats.gengamma._munp(-2, 10, 1.)
assert_almost_equal(p, 1./9/8)
def test_ksone_fit_freeze():
# Regression test for ticket #1638.
d = np.array(
[-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,
-0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,
0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,
0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,
0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,
0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,
-0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,
-0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,
-0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,
-0.06037974, 0.37670779, -0.21684405])
try:
olderr = np.seterr(invalid='ignore')
with suppress_warnings() as sup:
sup.filter(IntegrationWarning,
"The maximum number of subdivisions .50. has been "
"achieved.")
sup.filter(RuntimeWarning,
"floating point number truncated to an integer")
stats.ksone.fit(d)
finally:
np.seterr(**olderr)
def test_norm_logcdf():
# Test precision of the logcdf of the normal distribution.
# This precision was enhanced in ticket 1614.
x = -np.asarray(list(range(0, 120, 4)))
# Values from R
expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,
-131.69539607, -203.91715537, -292.09872100, -396.25241451,
-516.38564863, -652.50322759, -804.60844201, -972.70364403,
-1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,
-2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,
-3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,
-4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,
-6277.63751711, -6733.67260303]
assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)
# also test the complex-valued code path
assert_allclose(stats.norm().logcdf(x + 1e-14j).real, expected, atol=1e-8)
# test the accuracy: d(logcdf)/dx = pdf / cdf \equiv exp(logpdf - logcdf)
deriv = (stats.norm.logcdf(x + 1e-10j)/1e-10).imag
deriv_expected = np.exp(stats.norm.logpdf(x) - stats.norm.logcdf(x))
assert_allclose(deriv, deriv_expected, atol=1e-10)
def test_levy_cdf_ppf():
# Test levy.cdf, including small arguments.
x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])
# Expected values were calculated separately with mpmath.
# E.g.
# >>> mpmath.mp.dps = 100
# >>> x = mpmath.mp.mpf('0.01')
# >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))
expected = np.array([0.9747728793699604,
0.3173105078629141,
0.1572992070502851,
0.0015654022580025495,
1.523970604832105e-23,
1.795832784800726e-219])
y = stats.levy.cdf(x)
assert_allclose(y, expected, rtol=1e-10)
# ppf(expected) should get us back to x.
xx = stats.levy.ppf(expected)
assert_allclose(xx, x, rtol=1e-13)
def test_hypergeom_interval_1802():
# these two had endless loops
assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),
(152.0, 197.0))
assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),
(152.0, 197.0))
# this was working also before
assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),
(153.0, 196.0))
# degenerate case .a == .b
assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)
assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)
def test_distribution_too_many_args():
np.random.seed(1234)
# Check that a TypeError is raised when too many args are given to a method
# Regression test for ticket 1815.
x = np.linspace(0.1, 0.7, num=5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)
# These should not give errors
stats.gamma.pdf(x, 2, 3) # loc=3
stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4
stats.gamma.stats(2., 3)
stats.gamma.stats(2., 3, 4)
stats.gamma.stats(2., 3, 4, 'mv')
stats.gamma.rvs(2., 3, 4, 5)
stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)
# Also for a discrete distribution
stats.geom.pmf(x, 2, loc=3) # no error, loc=3
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)
# And for distributions with 0, 2 and 3 args respectively
assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)
stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale
def test_ncx2_tails_ticket_955():
# Trac #955 -- check that the cdf computed by special functions
# matches the integrated pdf
a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
assert_allclose(a, b, rtol=1e-3, atol=0)
def test_ncx2_tails_pdf():
# ncx2.pdf does not return nans in extreme tails(example from gh-1577)
# NB: this is to check that nan_to_num is not needed in ncx2.pdf
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered in log")
assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0)
logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2)
assert_(np.isneginf(logval).all())
@pytest.mark.parametrize('method, expected', [
('cdf', np.array([2.497951336e-09, 3.437288941e-10])),
('pdf', np.array([1.238579980e-07, 1.710041145e-08])),
('logpdf', np.array([-15.90413011, -17.88416331])),
('ppf', np.array([4.865182052, 7.017182271]))
])
def test_ncx2_zero_nc(method, expected):
# gh-5441
# ncx2 with nc=0 is identical to chi2
# Comparison to R (v3.5.1)
# > options(digits=10)
# > pchisq(0.1, df=10, ncp=c(0,4))
# > dchisq(0.1, df=10, ncp=c(0,4))
# > dchisq(0.1, df=10, ncp=c(0,4), log=TRUE)
# > qchisq(0.1, df=10, ncp=c(0,4))
result = getattr(stats.ncx2, method)(0.1, nc=[0, 4], df=10)
assert_allclose(result, expected, atol=1e-15)
def test_ncx2_zero_nc_rvs():
# gh-5441
# ncx2 with nc=0 is identical to chi2
result = stats.ncx2.rvs(df=10, nc=0, random_state=1)
expected = stats.chi2.rvs(df=10, random_state=1)
assert_allclose(result, expected, atol=1e-15)
def test_foldnorm_zero():
# Parameter value c=0 was not enabled, see gh-2399.
rv = stats.foldnorm(0, scale=1)
assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan
def test_stats_shapes_argcheck():
# stats method was failing for vector shapes if some of the values
# were outside of the allowed range, see gh-2678
mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a`
mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)
mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# -1 is not a legal shape parameter
mv3 = stats.lognorm.stats([2, 2.4, -1])
mv2 = stats.lognorm.stats([2, 2.4])
mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.
# stats method with multiple shape parameters is not properly vectorized
# anyway, so some distributions may or may not fail.
# Test subclassing distributions w/ explicit shapes
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, a):
return 42
class _distr2_gen(stats.rv_continuous):
def _cdf(self, x, a):
return 42 * a + x
class _distr3_gen(stats.rv_continuous):
def _pdf(self, x, a, b):
return a + b
def _cdf(self, x, a):
# Different # of shape params from _pdf, to be able to check that
# inspection catches the inconsistency."""
return 42 * a + x
class _distr6_gen(stats.rv_continuous):
# Two shape parameters (both _pdf and _cdf defined, consistent shapes.)
def _pdf(self, x, a, b):
return a*x + b
def _cdf(self, x, a, b):
return 42 * a + x
class TestSubclassingExplicitShapes(object):
# Construct a distribution w/ explicit shapes parameter and test it.
def test_correct_shapes(self):
dummy_distr = _distr_gen(name='dummy', shapes='a')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_wrong_shapes_1(self):
dummy_distr = _distr_gen(name='dummy', shapes='A')
assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))
def test_wrong_shapes_2(self):
dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')
dct = dict(a=1, b=2, c=3)
assert_raises(TypeError, dummy_distr.pdf, 1, **dct)
def test_shapes_string(self):
# shapes must be a string
dct = dict(name='dummy', shapes=42)
assert_raises(TypeError, _distr_gen, **dct)
def test_shapes_identifiers_1(self):
# shapes must be a comma-separated list of valid python identifiers
dct = dict(name='dummy', shapes='(!)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_2(self):
dct = dict(name='dummy', shapes='4chan')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_3(self):
dct = dict(name='dummy', shapes='m(fti)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_nodefaults(self):
dct = dict(name='dummy', shapes='a=2')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_args(self):
dct = dict(name='dummy', shapes='*args')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_kwargs(self):
dct = dict(name='dummy', shapes='**kwargs')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_keywords(self):
# python keywords cannot be used for shape parameters
dct = dict(name='dummy', shapes='a, b, c, lambda')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_signature(self):
# test explicit shapes which agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a')
assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)
def test_shapes_signature_inconsistent(self):
# test explicit shapes which do not agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a, b')
assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))
def test_star_args(self):
# test _pdf with only starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg
dist = _dist_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)
assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)
assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))
def test_star_args_2(self):
# test _pdf with named & starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, offset, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg + offset
dist = _dist_gen(shapes='offset, extra_kwarg')
assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),
stats.norm.pdf(0.5)*33 + 111)
assert_equal(dist.pdf(0.5, 111, 33),
stats.norm.pdf(0.5)*33 + 111)
def test_extra_kwarg(self):
# **kwargs to _pdf are ignored.
# this is a limitation of the framework (_pdf(x, *goodargs))
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, *args, **kwargs):
# _pdf should handle *args, **kwargs itself. Here "handling"
# is ignoring *args and looking for ``extra_kwarg`` and using
# that.
extra_kwarg = kwargs.pop('extra_kwarg', 1)
return stats.norm._pdf(x) * extra_kwarg
dist = _distr_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))
def shapes_empty_string(self):
# shapes='' is equivalent to shapes=None
class _dist_gen(stats.rv_continuous):
def _pdf(self, x):
return stats.norm.pdf(x)
dist = _dist_gen(shapes='')
assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))
class TestSubclassingNoShapes(object):
# Construct a distribution w/o explicit shapes parameter and test it.
def test_only__pdf(self):
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_only__cdf(self):
# _pdf is determined from _cdf by taking numerical derivative
dummy_distr = _distr2_gen(name='dummy')
assert_almost_equal(dummy_distr.pdf(1, a=1), 1)
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_signature_inspection(self):
# check that _pdf signature inspection works correctly, and is used in
# the class docstring
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.numargs, 1)
assert_equal(dummy_distr.shapes, 'a')
res = re.findall(r'logpdf\(x, a, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_signature_inspection_2args(self):
# same for 2 shape params and both _pdf and _cdf defined
dummy_distr = _distr6_gen(name='dummy')
assert_equal(dummy_distr.numargs, 2)
assert_equal(dummy_distr.shapes, 'a, b')
res = re.findall(r'logpdf\(x, a, b, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
def test_signature_inspection_2args_incorrect_shapes(self):
# both _pdf and _cdf defined, but shapes are inconsistent: raises
assert_raises(TypeError, _distr3_gen, name='dummy')
def test_defaults_raise(self):
# default arguments should raise
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a=42):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_starargs_raise(self):
# without explicit shapes, *args are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, *args):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_kwargs_raise(self):
# without explicit shapes, **kwargs are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, **kwargs):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_docstrings():
badones = [r',\s*,', r'\(\s*,', r'^\s*:']
for distname in stats.__all__:
dist = getattr(stats, distname)
if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):
for regex in badones:
assert_(re.search(regex, dist.__doc__) is None)
def test_infinite_input():
assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)
assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)
def test_lomax_accuracy():
# regression test for gh-4033
p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_gompertz_accuracy():
# Regression test for gh-4031
p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_truncexpon_accuracy():
# regression test for gh-4035
p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_rayleigh_accuracy():
# regression test for gh-4034
p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1)
assert_almost_equal(p, 9.0, decimal=15)
def test_genextreme_give_no_warnings():
"""regression test for gh-6219"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
stats.genextreme.cdf(.5, 0)
stats.genextreme.pdf(.5, 0)
stats.genextreme.ppf(.5, 0)
stats.genextreme.logpdf(-np.inf, 0.0)
number_of_warnings_thrown = len(w)
assert_equal(number_of_warnings_thrown, 0)
def test_genextreme_entropy():
# regression test for gh-5181
euler_gamma = 0.5772156649015329
h = stats.genextreme.entropy(-1.0)
assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(0)
assert_allclose(h, euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(1.0)
assert_equal(h, 1)
h = stats.genextreme.entropy(-2.0, scale=10)
assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14)
h = stats.genextreme.entropy(10)
assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(-10)
assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14)
def test_genextreme_sf_isf():
# Expected values were computed using mpmath:
#
# import mpmath
#
# def mp_genextreme_sf(x, xi, mu=0, sigma=1):
# # Formula from wikipedia, which has a sign convention for xi that
# # is the opposite of scipy's shape parameter.
# if xi != 0:
# t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi)
# else:
# t = mpmath.exp(-(x - mu)/sigma)
# return 1 - mpmath.exp(-t)
#
# >>> mpmath.mp.dps = 1000
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("1e8"), mpmath.mp.mpf("0.125"))
# >>> float(s)
# 1.6777205262585625e-57
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("-0.125"))
# >>> float(s)
# 1.52587890625e-21
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("0"))
# >>> float(s)
# 0.00034218086528426593
x = 1e8
s = stats.genextreme.sf(x, -0.125)
assert_allclose(s, 1.6777205262585625e-57)
x2 = stats.genextreme.isf(s, -0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0.125)
assert_allclose(s, 1.52587890625e-21)
x2 = stats.genextreme.isf(s, 0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0)
assert_allclose(s, 0.00034218086528426593)
x2 = stats.genextreme.isf(s, 0)
assert_allclose(x2, x)
def test_burr12_ppf_small_arg():
prob = 1e-16
quantile = stats.burr12.ppf(prob, 2, 3)
# The expected quantile was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 100
# >>> prob = mpmath.mpf('1e-16')
# >>> c = mpmath.mpf(2)
# >>> d = mpmath.mpf(3)
# >>> float(((1-prob)**(-1/d) - 1)**(1/c))
# 5.7735026918962575e-09
assert_allclose(quantile, 5.7735026918962575e-09)
def test_crystalball_function():
"""
All values are calculated using the independent implementation of the
ROOT framework (see https://root.cern.ch/).
Corresponding ROOT code is given in the comments.
"""
X = np.linspace(-5.0, 5.0, 21)[:-1]
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_pdf(x, 1.0, 2.0, 1.0) << ", ";
calculated = stats.crystalball.pdf(X, beta=1.0, m=2.0)
expected = np.array([0.0202867, 0.0241428, 0.0292128, 0.0360652, 0.045645,
0.059618, 0.0811467, 0.116851, 0.18258, 0.265652,
0.301023, 0.265652, 0.18258, 0.097728, 0.0407391,
0.013226, 0.00334407, 0.000658486, 0.000100982,
1.20606e-05])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 1.0) << ", ";
calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0)
expected = np.array([0.0019648, 0.00279754, 0.00417592, 0.00663121,
0.0114587, 0.0223803, 0.0530497, 0.12726, 0.237752,
0.345928, 0.391987, 0.345928, 0.237752, 0.12726,
0.0530497, 0.0172227, 0.00435458, 0.000857469,
0.000131497, 1.57051e-05])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5) {
# std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 2.0, 0.5);
# std::cout << ", ";
# }
calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)
expected = np.array([0.00785921, 0.0111902, 0.0167037, 0.0265249,
0.0423866, 0.0636298, 0.0897324, 0.118876, 0.147944,
0.172964, 0.189964, 0.195994, 0.189964, 0.172964,
0.147944, 0.118876, 0.0897324, 0.0636298, 0.0423866,
0.0265249])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_cdf(x, 1.0, 2.0, 1.0) << ", ";
calculated = stats.crystalball.cdf(X, beta=1.0, m=2.0)
expected = np.array([0.12172, 0.132785, 0.146064, 0.162293, 0.18258,
0.208663, 0.24344, 0.292128, 0.36516, 0.478254,
0.622723, 0.767192, 0.880286, 0.94959, 0.982834,
0.995314, 0.998981, 0.999824, 0.999976, 0.999997])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 1.0) << ", ";
calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0)
expected = np.array([0.00442081, 0.00559509, 0.00730787, 0.00994682,
0.0143234, 0.0223803, 0.0397873, 0.0830763, 0.173323,
0.320592, 0.508717, 0.696841, 0.844111, 0.934357,
0.977646, 0.993899, 0.998674, 0.999771, 0.999969,
0.999997])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5) {
# std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 2.0, 0.5);
# std::cout << ", ";
# }
calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)
expected = np.array([0.0176832, 0.0223803, 0.0292315, 0.0397873, 0.0567945,
0.0830763, 0.121242, 0.173323, 0.24011, 0.320592,
0.411731, 0.508717, 0.605702, 0.696841, 0.777324,
0.844111, 0.896192, 0.934357, 0.960639, 0.977646])
assert_allclose(expected, calculated, rtol=0.001)
def test_crystalball_function_moments():
"""
All values are calculated using the pdf formula and the integrate function
of Mathematica
"""
# The Last two (alpha, n) pairs test the special case n == alpha**2
beta = np.array([2.0, 1.0, 3.0, 2.0, 3.0])
m = np.array([3.0, 3.0, 2.0, 4.0, 9.0])
# The distribution should be correctly normalised
expected_0th_moment = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
calculated_0th_moment = stats.crystalball._munp(0, beta, m)
assert_allclose(expected_0th_moment, calculated_0th_moment, rtol=0.001)
# calculated using wolframalpha.com
# e.g. for beta = 2 and m = 3 we calculate the norm like this:
# integrate exp(-x^2/2) from -2 to infinity +
# integrate (3/2)^3*exp(-2^2/2)*(3/2-2-x)^(-3) from -infinity to -2
norm = np.array([2.5511, 3.01873, 2.51065, 2.53983, 2.507410455])
a = np.array([-0.21992, -3.03265, np.inf, -0.135335, -0.003174])
expected_1th_moment = a / norm
calculated_1th_moment = stats.crystalball._munp(1, beta, m)
assert_allclose(expected_1th_moment, calculated_1th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, 3.2616, 2.519908])
expected_2th_moment = a / norm
calculated_2th_moment = stats.crystalball._munp(2, beta, m)
assert_allclose(expected_2th_moment, calculated_2th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, -0.0577668])
expected_3th_moment = a / norm
calculated_3th_moment = stats.crystalball._munp(3, beta, m)
assert_allclose(expected_3th_moment, calculated_3th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, 7.78468])
expected_4th_moment = a / norm
calculated_4th_moment = stats.crystalball._munp(4, beta, m)
assert_allclose(expected_4th_moment, calculated_4th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, -1.31086])
expected_5th_moment = a / norm
calculated_5th_moment = stats.crystalball._munp(5, beta, m)
assert_allclose(expected_5th_moment, calculated_5th_moment, rtol=0.001)
def test_ncf_variance():
# Regression test for gh-10658 (incorrect variance formula for ncf).
# The correct value of ncf.var(2, 6, 4), 42.75, can be verified with, for
# example, Wolfram Alpha with the expression
# Variance[NoncentralFRatioDistribution[2, 6, 4]]
# or with the implementation of the noncentral F distribution in the C++
# library Boost.
v = stats.ncf.var(2, 6, 4)
assert_allclose(v, 42.75, rtol=1e-14)
class TestHistogram(object):
def setup_method(self):
np.random.seed(1234)
# We have 8 bins
# [1,2), [2,3), [3,4), [4,5), [5,6), [6,7), [7,8), [8,9)
# But actually np.histogram will put the last 9 also in the [8,9) bin!
# Therefore there is a slight difference below for the last bin, from
# what you might have expected.
histogram = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5,
6, 6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8)
self.template = stats.rv_histogram(histogram)
data = stats.norm.rvs(loc=1.0, scale=2.5, size=10000, random_state=123)
norm_histogram = np.histogram(data, bins=50)
self.norm_template = stats.rv_histogram(norm_histogram)
def test_pdf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
pdf_values = np.asarray([0.0/25.0, 0.0/25.0, 1.0/25.0, 1.0/25.0,
2.0/25.0, 2.0/25.0, 3.0/25.0, 3.0/25.0,
4.0/25.0, 4.0/25.0, 5.0/25.0, 5.0/25.0,
4.0/25.0, 4.0/25.0, 3.0/25.0, 3.0/25.0,
3.0/25.0, 3.0/25.0, 0.0/25.0, 0.0/25.0])
assert_allclose(self.template.pdf(values), pdf_values)
# Test explicitly the corner cases:
# As stated above the pdf in the bin [8,9) is greater than
# one would naively expect because np.histogram putted the 9
# into the [8,9) bin.
assert_almost_equal(self.template.pdf(8.0), 3.0/25.0)
assert_almost_equal(self.template.pdf(8.5), 3.0/25.0)
# 9 is outside our defined bins [8,9) hence the pdf is already 0
# for a continuous distribution this is fine, because a single value
# does not have a finite probability!
assert_almost_equal(self.template.pdf(9.0), 0.0/25.0)
assert_almost_equal(self.template.pdf(10.0), 0.0/25.0)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.pdf(x),
stats.norm.pdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_cdf_ppf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
cdf_values = np.asarray([0.0/25.0, 0.0/25.0, 0.0/25.0, 0.5/25.0,
1.0/25.0, 2.0/25.0, 3.0/25.0, 4.5/25.0,
6.0/25.0, 8.0/25.0, 10.0/25.0, 12.5/25.0,
15.0/25.0, 17.0/25.0, 19.0/25.0, 20.5/25.0,
22.0/25.0, 23.5/25.0, 25.0/25.0, 25.0/25.0])
assert_allclose(self.template.cdf(values), cdf_values)
# First three and last two values in cdf_value are not unique
assert_allclose(self.template.ppf(cdf_values[2:-1]), values[2:-1])
# Test of cdf and ppf are inverse functions
x = np.linspace(1.0, 9.0, 100)
assert_allclose(self.template.ppf(self.template.cdf(x)), x)
x = np.linspace(0.0, 1.0, 100)
assert_allclose(self.template.cdf(self.template.ppf(x)), x)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.cdf(x),
stats.norm.cdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_rvs(self):
N = 10000
sample = self.template.rvs(size=N, random_state=123)
assert_equal(np.sum(sample < 1.0), 0.0)
assert_allclose(np.sum(sample <= 2.0), 1.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 2.5), 2.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 3.0), 3.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 3.5), 4.5/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.0), 6.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.5), 8.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 5.0), 10.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 5.5), 12.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.0), 15.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.5), 17.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.0), 19.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.5), 20.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.0), 22.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.5), 23.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_equal(np.sum(sample > 9.0), 0.0)
def test_munp(self):
for n in range(4):
assert_allclose(self.norm_template._munp(n),
stats.norm._munp(n, 1.0, 2.5), rtol=0.05)
def test_entropy(self):
assert_allclose(self.norm_template.entropy(),
stats.norm.entropy(loc=1.0, scale=2.5), rtol=0.05)
def test_loguniform():
# This test makes sure the alias of "loguniform" is log-uniform
rv = stats.loguniform(10 ** -3, 10 ** 0)
rvs = rv.rvs(size=10000, random_state=42)
vals, _ = np.histogram(np.log10(rvs), bins=10)
assert 900 <= vals.min() <= vals.max() <= 1100
assert np.abs(np.median(vals) - 1000) <= 10
class TestArgus(object):
def test_argus_rvs_large_chi(self):
# test that the algorithm can handle large values of chi
x = stats.argus.rvs(50, size=500, random_state=325)
assert_almost_equal(stats.argus(50).mean(), x.mean(), decimal=4)
def test_argus_rvs_ratio_uniforms(self):
# test that the ratio of uniforms algorithms works for chi > 2.611
x = stats.argus.rvs(3.5, size=1500, random_state=1535)
assert_almost_equal(stats.argus(3.5).mean(), x.mean(), decimal=3)
assert_almost_equal(stats.argus(3.5).std(), x.std(), decimal=3)
|
aeklant/scipy
|
scipy/stats/tests/test_distributions.py
|
Python
|
bsd-3-clause
| 165,416
|
[
"Gaussian"
] |
2848bd3bdd670a537097ef55723363c850bd2d2d0fc5bdf3f04a059e76e0f177
|
# -*- coding: utf-8 -*-
from pyaxiom.netcdf import CFDataset
class OrthogonalMultidimensionalTimeseriesProfile(CFDataset):
@classmethod
def is_mine(cls, dsg):
try:
assert dsg.featureType.lower() == 'timeseriesprofile'
assert len(dsg.t_axes()) >= 1
assert len(dsg.x_axes()) >= 1
assert len(dsg.y_axes()) >= 1
assert len(dsg.z_axes()) >= 1
# If there is only a single set of levels and a single set of
# times, then it is orthogonal.
tvar = dsg.t_axes()[0]
assert len(tvar.dimensions) == 1
zvar = dsg.z_axes()[0]
assert len(zvar.dimensions) == 1
assert tvar.dimensions != zvar.dimensions
# Not ragged
o_index_vars = dsg.get_variables_by_attributes(
sample_dimension=lambda x: x is not None
)
assert len(o_index_vars) == 0
r_index_vars = dsg.get_variables_by_attributes(
instance_dimension=lambda x: x is not None
)
assert len(r_index_vars) == 0
except BaseException:
return False
return True
def from_dataframe(self, df, variable_attributes=None, global_attributes=None):
variable_attributes = variable_attributes or {}
global_attributes = global_attributes or {}
raise NotImplementedError
def calculated_metadata(self, df=None, geometries=True, clean_cols=True, clean_rows=True):
# if df is None:
# df = self.to_dataframe(clean_cols=clean_cols, clean_rows=clean_rows)
raise NotImplementedError
def to_dataframe(self):
raise NotImplementedError
|
axiom-data-science/pyaxiom
|
pyaxiom/netcdf/sensors/dsg/timeseriesProfile/om.py
|
Python
|
mit
| 1,732
|
[
"NetCDF"
] |
51aa6312af54f514a19eb24e4011451ddf417036d68f59df17f4e305fde72b16
|
import numpy as np
import loudness as ln
class BinauralInhibitionMG2007:
def __init__(self, cams):
nFilters = cams.size
camStep = cams[1] - cams[0]
g = np.arange(nFilters) * camStep
gaussian = np.exp(-(0.08 * g) ** 2)
self.fullGaussian = np.hstack((gaussian[::-1], gaussian[1:]))
def process(self, specificLoudness):
# convolution with weighting function
self.smoothSpecificLoudness = np.zeros(specificLoudness.shape)
nFilters = specificLoudness.shape[0]
for ear in range(2):
self.smoothSpecificLoudness[:, ear] = \
np.convolve(
self.fullGaussian, specificLoudness[:, ear]
)[nFilters-1:-nFilters+1]
# Gain computation
self.smoothSpecificLoudness[self.smoothSpecificLoudness < 1e-12] = \
1e-12
inhibLeft = \
2.0 / (1 + (1.0 / np.cosh(
self.smoothSpecificLoudness[:, 1] /
self.smoothSpecificLoudness[:, 0]
)) ** 1.5978)
inhibRight = 2.0 / (1 + (1.0 / np.cosh(
self.smoothSpecificLoudness[:, 0] /
self.smoothSpecificLoudness[:, 1]
)) ** 1.5978)
# Apply gains to original specific loudness
self.inhibitedSpecificLoudness = specificLoudness.copy()
self.inhibitedSpecificLoudness[:, 0] /= inhibLeft
self.inhibitedSpecificLoudness[:, 1] /= inhibRight
# python side
cams = np.arange(1.8, 39, 0.1)
model = BinauralInhibitionMG2007(cams)
# input specific loudness patterns (one per ear)
pattern = np.random.rand(cams.size, 2)
model.process(pattern)
# loudness side
specificLoudnessBank = ln.SignalBank()
specificLoudnessBank.initialize(2, cams.size, 1, 1)
specificLoudnessBank.setChannelSpacingInCams(0.1)
specificLoudnessBank.setSignals(pattern.T.reshape((2, pattern.shape[0], 1)))
binauralModel = ln.BinauralInhibitionMG2007()
binauralModel.initialize(specificLoudnessBank)
binauralModel.process(specificLoudnessBank)
inhibitedSpecificLoudnessBank = binauralModel.getOutput()
inhibitedSpecificLoudness =\
inhibitedSpecificLoudnessBank.getSignals().reshape((2, cams.size)).T
print ("Python vs loudness equality test applied to "
+ "inhibited specific loudness patterns: %r "
% np.allclose(inhibitedSpecificLoudness,
model.inhibitedSpecificLoudness))
|
faroit/loudness
|
python/tests/test_BinauralInhibitionMG2007.py
|
Python
|
gpl-3.0
| 2,403
|
[
"Gaussian"
] |
53f6568470ae151626bac3cb859c6b3984d50e878cdf3ac4ceef1915353084c5
|
""" DIRAC API Base Class """
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Core.Utilities.List import sortList
from DIRAC.Core.Security.ProxyInfo import getProxyInfo, formatProxyInfoAsString
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getDNForUsername
from DIRAC.Core.Utilities.Version import getCurrentVersion
import pprint, sys
COMPONENT_NAME = 'API'
def _printFormattedDictList( dictList, fields, uniqueField, orderBy ):
""" Will print ordered the supplied field of a list of dictionaries """
orderDict = {}
fieldWidths = {}
dictFields = {}
for myDict in dictList:
for field in fields:
fieldValue = myDict[field]
if not fieldWidths.has_key( field ):
fieldWidths[field] = len( str( field ) )
if len( str( fieldValue ) ) > fieldWidths[field]:
fieldWidths[field] = len( str( fieldValue ) )
orderValue = myDict[orderBy]
if not orderDict.has_key( orderValue ):
orderDict[orderValue] = []
orderDict[orderValue].append( myDict[uniqueField] )
dictFields[myDict[uniqueField]] = myDict
headString = "%s" % fields[0].ljust( fieldWidths[fields[0]] + 5 )
for field in fields[1:]:
headString = "%s %s" % ( headString, field.ljust( fieldWidths[field] + 5 ) )
print headString
for orderValue in sortList( orderDict.keys() ):
uniqueFields = orderDict[orderValue]
for uniqueField in sortList( uniqueFields ):
myDict = dictFields[uniqueField]
outStr = "%s" % str( myDict[fields[0]] ).ljust( fieldWidths[fields[0]] + 5 )
for field in fields[1:]:
outStr = "%s %s" % ( outStr, str( myDict[field] ).ljust( fieldWidths[field] + 5 ) )
print outStr
#TODO: some of these can just be functions, and moved out of here
class API( object ):
""" An utilities class for APIs
"""
#############################################################################
def __init__( self ):
""" c'tor
"""
self._printFormattedDictList = _printFormattedDictList
self.log = gLogger.getSubLogger( COMPONENT_NAME )
self.section = COMPONENT_NAME
self.pPrint = pprint.PrettyPrinter()
#Global error dictionary
self.errorDict = {}
self.setup = gConfig.getValue( '/DIRAC/Setup', 'Unknown' )
self.diracInfo = getCurrentVersion()['Value']
#############################################################################
def _errorReport( self, error, message = None ):
"""Internal function to return errors and exit with an S_ERROR() """
if not message:
message = error
self.log.warn( error )
return S_ERROR( message )
#############################################################################
def _prettyPrint( self, myObject ):
"""Helper function to pretty print an object. """
print self.pPrint.pformat( myObject )
#############################################################################
def _getCurrentUser( self ):
res = getProxyInfo( False, False )
if not res['OK']:
return self._errorReport( 'No proxy found in local environment', res['Message'] )
proxyInfo = res['Value']
gLogger.debug( formatProxyInfoAsString( proxyInfo ) )
if not proxyInfo.has_key( 'group' ):
return self._errorReport( 'Proxy information does not contain the group', res['Message'] )
res = getDNForUsername( proxyInfo['username'] )
if not res['OK']:
return self._errorReport( 'Failed to get proxies for user', res['Message'] )
return S_OK( proxyInfo['username'] )
#############################################################################
def _reportError( self, message, name = '', **kwargs ):
"""Internal Function. Gets caller method name and arguments, formats the
information and adds an error to the global error dictionary to be
returned to the user.
"""
className = name
if not name:
className = __name__
methodName = sys._getframe( 1 ).f_code.co_name
arguments = []
for key in kwargs:
if kwargs[key]:
arguments.append( '%s = %s ( %s )' % ( key, kwargs[key], type( kwargs[key] ) ) )
finalReport = """Problem with %s.%s() call:
Arguments: %s
Message: %s
""" % ( className, methodName, '/'.join( arguments ), message )
if self.errorDict.has_key( methodName ):
tmp = self.errorDict[methodName]
tmp.append( finalReport )
self.errorDict[methodName] = tmp
else:
self.errorDict[methodName] = [finalReport]
self.log.verbose( finalReport )
return S_ERROR( finalReport )
|
avedaee/DIRAC
|
Core/Base/API.py
|
Python
|
gpl-3.0
| 4,564
|
[
"DIRAC"
] |
6271b8564171d2907c51874458d1c44b5dc646765eaeca5c7ad6c19312771b55
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import logging
from monty.json import MSONable
from pymatgen.core import Structure
from pymatgen.analysis.defects.corrections import FreysoldtCorrection, \
KumagaiCorrection, BandFillingCorrection, BandEdgeShiftingCorrection
from pymatgen.analysis.defects.core import Vacancy
"""
This module implements DefectCompatibility analysis for consideration of
defects
"""
__author__ = "Danny Broberg, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyam Dwaraknath"
__email__ = "shyamd@lbl.gov"
__status__ = "Development"
__date__ = "Mar 15, 2018"
logger = logging.getLogger(__name__)
class DefectCompatibility(MSONable):
"""
The DefectCompatibility class evaluates corrections and delocalization
metrics on a DefectEntry. It can only parse based on the available
parameters that already exist in the parameters dict of the DefectEntry.
required settings in defect_entry.parameters for various types of analysis/correction:
freysoldt: [ "dielectric", "axis_grid", "bulk_planar_averages", "defect_planar_averages",
"initial_defect_structure", "defect_frac_sc_coords"]
kumagai: [ "dielectric", "bulk_atomic_site_averages", "defect_atomic_site_averages",
"site_matching_indices", "initial_defect_structure", "defect_frac_sc_coords"]
bandfilling: ["eigenvalues", "kpoint_weights", "potalign", "vbm", "cbm"]
bandshifting: ["hybrid_cbm", "hybrid_vbm", "vbm", "cbm"]
defect relaxation/structure analysis: ["final_defect_structure", "initial_defect_structure",
"sampling_radius", "defect_frac_sc_coords"]
"""
def __init__(self,
plnr_avg_var_tol=0.0001,
plnr_avg_minmax_tol=0.1,
atomic_site_var_tol=0.005,
atomic_site_minmax_tol=0.1,
tot_relax_tol=1.0,
perc_relax_tol=50.,
defect_tot_relax_tol=2.,
preferred_cc='freysoldt',
free_chg_cutoff=2.1,
use_bandfilling=True,
use_bandedgeshift=True):
"""
Initializes the DefectCompatibility class
Each argument helps decide whether a DefectEntry is flagged as compatible or not
Args:
plnr_avg_var_tol (float): compatibility tolerance for variance of the sampling region
in the planar averaged electrostatic potential (FreysoldtCorrection)
plnr_avg_minmax_tol (float): compatibility tolerance for max/min difference of the
sampling region in the planar averaged electrostatic potential (FreysoldtCorrection)
atomic_site_var_tol (float): compatibility tolerance for variance of the sampling
region in the atomic site averaged electrostatic potential (KumagaiCorrection)
atomic_site_minmax_tol (float): compatibility tolerance for max/min difference
of the sampling region in the atomic site averaged electrostatic
potential (KumagaiCorrection)
tot_relax_tol (float): compatibility tolerance for total integrated relaxation
amount outside of a given radius from the defect (in Angstrom).
Radius is supplied as 'sampling_radius' within parameters of DefectEntry.
perc_relax_tol (float): compatibility tolerance for percentage of total relaxation
outside of a given radius from the defect (percentage amount),
assuming a total integration relaxation greater than 1 Angstrom.
Radius is supplied as 'sampling_radius' within parameters of DefectEntry.
defect_tot_relax_tol (float): compatibility tolerance for displacement of defect site
itself (in Angstrom).
preferred_cc (str): Charge correction that is preferred to be used.
If only one is available based on metadata, then that charge correction will be used.
Options are: 'freysoldt' and 'kumagai'
free_chg_cutoff (float): compatibility tolerance for total amount of host band occupation
outside of band edges, given by eigenvalue data. Extra occupation in the CB would be
free electrons, while lost occupation in VB would be free holes.
use_bandfilling (bool): Whether to include BandFillingCorrection or not (assuming
sufficient metadata is supplied to perform BandFillingCorrection).
use_bandedgeshift (bool): Whether to perform a BandEdgeShiftingCorrection or not (assuming
sufficient metadata is supplied to perform BandEdgeShiftingCorrection).
"""
self.plnr_avg_var_tol = plnr_avg_var_tol
self.plnr_avg_minmax_tol = plnr_avg_minmax_tol
self.atomic_site_var_tol = atomic_site_var_tol
self.atomic_site_minmax_tol = atomic_site_minmax_tol
self.tot_relax_tol = tot_relax_tol
self.perc_relax_tol = perc_relax_tol
self.defect_tot_relax_tol = defect_tot_relax_tol
self.preferred_cc = preferred_cc
self.free_chg_cutoff = free_chg_cutoff
self.use_bandfilling = use_bandfilling
self.use_bandedgeshift = use_bandedgeshift
def process_entry(self, defect_entry, perform_corrections=True):
"""
Process a given DefectEntry with qualifiers given from initialization of class.
Order of processing is:
1) perform all possible defect corrections with information given
2) consider delocalization analyses based on qualifier metrics
given initialization of class. If delocalized, flag entry as delocalized
3) update corrections to defect entry and flag as delocalized
Corrections are applied based on:
i) if free charges are more than free_chg_cutoff then will not apply charge correction,
because it no longer is applicable
ii) use charge correction set by preferred_cc
iii) only use BandFilling correction if use_bandfilling is set to True
iv) only use BandEdgeShift correction if use_bandedgeshift is set to True
"""
for struct_key in ["bulk_sc_structure", "initial_defect_structure", "final_defect_structure"]:
if struct_key in defect_entry.parameters.keys() and isinstance(defect_entry.parameters[struct_key], dict):
defect_entry.parameters[struct_key] = Structure.from_dict(defect_entry.parameters[struct_key])
if perform_corrections:
self.perform_all_corrections(defect_entry)
self.delocalization_analysis(defect_entry)
# apply corrections based on delocalization analysis
corrections = {}
skip_charge_corrections = False
if "num_hole_vbm" in defect_entry.parameters.keys():
if (self.free_chg_cutoff < defect_entry.parameters["num_hole_vbm"]) or (
self.free_chg_cutoff < defect_entry.parameters["num_elec_cbm"]):
logger.info('Will not use charge correction because too many free charges')
skip_charge_corrections = True
if skip_charge_corrections:
corrections.update({'charge_correction': 0.})
else:
if ('freysoldt' in self.preferred_cc.lower()) and ('freysoldt_meta' in defect_entry.parameters.keys()):
frey_meta = defect_entry.parameters['freysoldt_meta']
frey_corr = frey_meta["freysoldt_electrostatic"] + frey_meta["freysoldt_potential_alignment_correction"]
corrections.update({'charge_correction': frey_corr})
elif ('kumagai_meta' in defect_entry.parameters.keys()):
kumagai_meta = defect_entry.parameters['kumagai_meta']
kumagai_corr = kumagai_meta["kumagai_electrostatic"] + \
kumagai_meta["kumagai_potential_alignment_correction"]
corrections.update({'charge_correction': kumagai_corr})
else:
logger.info('Could not use any charge correction because insufficient metadata was supplied.')
if self.use_bandfilling:
if "bandfilling_meta" in defect_entry.parameters.keys():
bfc_corr = defect_entry.parameters["bandfilling_meta"]["bandfilling_correction"]
corrections.update({'bandfilling_correction': bfc_corr})
else:
logger.info('Could not use band filling correction because insufficient metadata was supplied.')
else:
corrections.update({'bandfilling_correction': 0.})
if self.use_bandedgeshift and ("bandshift_meta" in defect_entry.parameters.keys()):
corrections.update({
'bandedgeshifting_correction':
defect_entry.parameters["bandshift_meta"]["bandedgeshifting_correction"]})
# also want to update relevant data for phase diagram
defect_entry.parameters.update({
'phasediagram_meta': {
'vbm': defect_entry.parameters['hybrid_vbm'],
'gap': defect_entry.parameters['hybrid_cbm'] - defect_entry.parameters['hybrid_vbm']
}
})
else:
corrections.update({'bandedgeshifting_correction': 0.})
if (type(defect_entry.parameters['vbm']) == float) and (type(defect_entry.parameters['cbm']) == float):
# still want to have vbm and gap ready for phase diagram
defect_entry.parameters.update({
'phasediagram_meta': {
'vbm': defect_entry.parameters['vbm'],
'gap': defect_entry.parameters['cbm'] - defect_entry.parameters['vbm']
}
})
defect_entry.corrections.update(corrections)
return defect_entry
def perform_all_corrections(self, defect_entry):
# consider running freysoldt correction
required_frey_params = ["dielectric", "axis_grid", "bulk_planar_averages", "defect_planar_averages",
"initial_defect_structure", "defect_frac_sc_coords"]
run_freysoldt = (len(set(defect_entry.parameters.keys()).intersection(required_frey_params))
== len(required_frey_params))
if not run_freysoldt:
logger.info('Insufficient DefectEntry parameters exist for Freysoldt Correction.')
else:
defect_entry = self.perform_freysoldt(defect_entry)
# consider running kumagai correction
required_kumagai_params = ["dielectric", "bulk_atomic_site_averages", "defect_atomic_site_averages",
"site_matching_indices", "initial_defect_structure", "defect_frac_sc_coords"]
run_kumagai = (len(set(defect_entry.parameters.keys()).intersection(required_kumagai_params))
== len(required_kumagai_params))
if not run_kumagai:
logger.info('Insufficient DefectEntry parameters exist for Kumagai Correction.')
else:
try:
defect_entry = self.perform_kumagai(defect_entry)
except Exception:
logger.info("Kumagai correction error occured! Wont perform correction.")
# add potalign based on preferred correction setting if it does not already exist in defect entry
if self.preferred_cc == 'freysoldt':
if 'freysoldt_meta' in defect_entry.parameters.keys():
potalign = defect_entry.parameters['freysoldt_meta']['freysoldt_potalign']
defect_entry.parameters['potalign'] = potalign
elif 'kumagai_meta' in defect_entry.parameters.keys():
logger.info('WARNING: was not able to use potalign from Freysoldt correction, '
'using Kumagai value for purposes of band filling correction.')
potalign = defect_entry.parameters['kumagai_meta']['kumagai_potalign']
defect_entry.parameters['potalign'] = potalign
else:
if 'kumagai_meta' in defect_entry.parameters.keys():
potalign = defect_entry.parameters['kumagai_meta']['kumagai_potalign']
defect_entry.parameters['potalign'] = potalign
elif 'freysoldt_meta' in defect_entry.parameters.keys():
logger.info('WARNING: was not able to use potalign from Kumagai correction, '
'using Freysoldt value for purposes of band filling correction.')
potalign = defect_entry.parameters['freysoldt_meta']['freysoldt_potalign']
defect_entry.parameters['potalign'] = potalign
# consider running band filling correction
required_bandfilling_params = ["eigenvalues", "kpoint_weights", "potalign", "vbm", "cbm"]
run_bandfilling = (len(set(defect_entry.parameters.keys()).intersection(required_bandfilling_params))
== len(required_bandfilling_params))
if run_bandfilling:
if (defect_entry.parameters['vbm'] is None) or (defect_entry.parameters['cbm'] is None) \
or (defect_entry.parameters['potalign'] is None):
run_bandfilling = False
if not run_bandfilling:
logger.info('Insufficient DefectEntry parameters exist for BandFilling Correction.')
else:
defect_entry = self.perform_bandfilling(defect_entry)
# consider running band edge shifting correction
required_bandedge_shifting_params = ["hybrid_cbm", "hybrid_vbm", "vbm", "cbm"]
run_bandedge_shifting = (
len(set(defect_entry.parameters.keys()).intersection(required_bandedge_shifting_params))
== len(required_bandedge_shifting_params))
if not run_bandedge_shifting:
logger.info('Insufficient DefectEntry parameters exist for BandShifting Correction.')
else:
defect_entry = self.perform_band_edge_shifting(defect_entry)
return defect_entry
def perform_freysoldt(self, defect_entry):
FC = FreysoldtCorrection(defect_entry.parameters['dielectric'])
freycorr = FC.get_correction(defect_entry)
freysoldt_meta = FC.metadata.copy()
freysoldt_meta["freysoldt_potalign"] = defect_entry.parameters["potalign"]
freysoldt_meta["freysoldt_electrostatic"] = freycorr["freysoldt_electrostatic"]
freysoldt_meta["freysoldt_potential_alignment_correction"] = freycorr["freysoldt_potential_alignment"]
defect_entry.parameters.update({'freysoldt_meta': freysoldt_meta})
return defect_entry
def perform_kumagai(self, defect_entry):
gamma = defect_entry.parameters['gamma'] if 'gamma' in defect_entry.parameters.keys() else None
sampling_radius = defect_entry.parameters[
'sampling_radius'] if 'sampling_radius' in defect_entry.parameters.keys() else None
KC = KumagaiCorrection(defect_entry.parameters['dielectric'],
sampling_radius=sampling_radius, gamma=gamma)
kumagaicorr = KC.get_correction(defect_entry)
kumagai_meta = {k: v for k, v in KC.metadata.items()}
kumagai_meta["kumagai_potalign"] = defect_entry.parameters["potalign"]
kumagai_meta["kumagai_electrostatic"] = kumagaicorr["kumagai_electrostatic"]
kumagai_meta["kumagai_potential_alignment_correction"] = kumagaicorr["kumagai_potential_alignment"]
defect_entry.parameters.update({'kumagai_meta': kumagai_meta})
return defect_entry
def perform_bandfilling(self, defect_entry):
BFC = BandFillingCorrection()
bfc_dict = BFC.get_correction(defect_entry)
bandfilling_meta = defect_entry.parameters['bandfilling_meta'].copy()
bandfilling_meta.update({'bandfilling_correction': bfc_dict['bandfilling_correction']})
defect_entry.parameters.update({'bandfilling_meta': bandfilling_meta,
# also update free holes and electrons for shallow level shifting correction...
'num_hole_vbm': bandfilling_meta['num_hole_vbm'],
'num_elec_cbm': bandfilling_meta['num_elec_cbm']})
return defect_entry
def perform_band_edge_shifting(self, defect_entry):
BEC = BandEdgeShiftingCorrection()
bec_dict = BEC.get_correction(defect_entry)
bandshift_meta = defect_entry.parameters['bandshift_meta'].copy()
bandshift_meta.update(bec_dict)
defect_entry.parameters.update({"bandshift_meta": bandshift_meta})
return defect_entry
def delocalization_analysis(self, defect_entry):
"""
Do delocalization analysis. To do this, one considers:
i) sampling region of planar averaged electrostatic potential (freysoldt approach)
ii) sampling region of atomic site averaged potentials (kumagai approach)
iii) structural relaxation amount outside of radius considered in kumagai approach (default is wigner seitz
radius)
iv) if defect is not a vacancy type -> track to see how much the defect has moved
calculations that fail delocalization get "is_compatibile" set to False in parameters
also parameters recieves a "delocalization_meta" with following dict:
plnr_avg = {'is_compatible': True/False, 'metadata': metadata used for determining this}
atomic_site = {'is_compatible': True/False, 'metadata': metadata used for determining this}
structure_relax = {'is_compatible': True/False, 'metadata': metadata used for determining this}
defectsite_relax = {'is_compatible': True/False, 'metadata': metadata used for determing this}
"""
defect_entry.parameters.update(
{'is_compatible': True}) # this will be switched to False if delocalization is detected
if 'freysoldt_meta' in defect_entry.parameters.keys():
defect_entry = self.check_freysoldt_delocalized(defect_entry)
else:
logger.info('Insufficient information provided for performing Freysoldt '
'correction delocalization analysis.\n'
'Cannot perform planar averaged electrostatic potential '
'compatibility analysis.')
if 'kumagai_meta' in defect_entry.parameters.keys():
defect_entry = self.check_kumagai_delocalized(defect_entry)
else:
logger.info('Insufficient information provided for performing Kumagai '
'correction delocalization analysis.\n'
'Cannot perform atomic site averaged electrostatic '
'potential compatibility analysis.')
req_struct_delocal_params = ["final_defect_structure", "initial_defect_structure",
"sampling_radius", "defect_frac_sc_coords"]
run_struct_delocal = True if \
len(set(defect_entry.parameters.keys()).intersection(req_struct_delocal_params)) \
== len(req_struct_delocal_params) else False
if run_struct_delocal:
defect_entry = self.check_final_relaxed_structure_delocalized(defect_entry)
else:
logger.info('Insufficient information provided in defect_entry.parameters. '
'Cannot perform full structure site relaxation compatibility analysis.')
return defect_entry
def check_freysoldt_delocalized(self, defect_entry):
plnr_avg_analyze_meta = {}
plnr_avg_allows_compatible = True
for ax in range(3):
freystats = defect_entry.parameters['freysoldt_meta']['pot_corr_uncertainty_md'][ax]['stats']
frey_variance_compatible = True if freystats['variance'] <= self.plnr_avg_var_tol else False
frey_window = abs(freystats['minmax'][1] - freystats['minmax'][0])
frey_minmax_compatible = True if frey_window <= self.plnr_avg_minmax_tol else False
plnr_avg_analyze_meta.update({ax: {'frey_variance_compatible': frey_variance_compatible,
'frey_variance': freystats['variance'],
'plnr_avg_var_tol': self.plnr_avg_var_tol,
'frey_minmax_compatible': frey_minmax_compatible,
'frey_minmax_window': frey_window,
'plnr_avg_minmax_tol': self.plnr_avg_minmax_tol}})
if (not frey_variance_compatible) or (not frey_minmax_compatible):
plnr_avg_allows_compatible = False
if 'delocalization_meta' not in defect_entry.parameters.keys():
defect_entry.parameters['delocalization_meta'] = {}
defect_entry.parameters['delocalization_meta'].update({
'plnr_avg': {
'is_compatible': plnr_avg_allows_compatible,
'metadata': plnr_avg_analyze_meta}})
if not plnr_avg_allows_compatible:
defect_entry.parameters.update({'is_compatible': False})
return defect_entry
def check_kumagai_delocalized(self, defect_entry):
atomic_site_analyze_meta = {}
kumagaistats = defect_entry.parameters['kumagai_meta']['pot_corr_uncertainty_md']['stats']
kumagai_variance_compatible = True if kumagaistats['variance'] <= self.atomic_site_var_tol else False
kumagai_window = abs(kumagaistats['minmax'][1] - kumagaistats['minmax'][0])
kumagai_minmax_compatible = True if kumagai_window <= self.atomic_site_minmax_tol else False
atomic_site_analyze_meta.update({'kumagai_variance_compatible': kumagai_variance_compatible,
'kumagai_variance': kumagaistats['variance'],
'atomic_site_var_tol': self.atomic_site_var_tol,
'kumagai_minmax_compatible': kumagai_minmax_compatible,
'kumagai_minmax_window': kumagai_window,
'plnr_avg_minmax_tol': self.atomic_site_minmax_tol})
atomic_site_allows_compatible = True if (
kumagai_variance_compatible and kumagai_minmax_compatible) else False
if 'delocalization_meta' not in defect_entry.parameters.keys():
defect_entry.parameters['delocalization_meta'] = {}
defect_entry.parameters['delocalization_meta'].update({
'atomic_site':
{'is_compatible': atomic_site_allows_compatible,
'metadata': atomic_site_analyze_meta}})
if not atomic_site_allows_compatible:
defect_entry.parameters.update({'is_compatible': False})
return defect_entry
def check_final_relaxed_structure_delocalized(self, defect_entry):
"""
NOTE this assumes initial and final structures have sites indexed in same way
:param defect_entry:
:return:
"""
structure_relax_analyze_meta = {}
initial_defect_structure = defect_entry.parameters['initial_defect_structure']
final_defect_structure = defect_entry.parameters['final_defect_structure']
radius_to_sample = defect_entry.parameters['sampling_radius']
def_frac_coords = defect_entry.parameters['defect_frac_sc_coords']
initsites = [site.frac_coords for site in initial_defect_structure]
finalsites = [site.frac_coords for site in final_defect_structure]
distmatrix = initial_defect_structure.lattice.get_all_distances(finalsites, initsites)
# calculate distance moved as a function of the distance from the defect
distdata = []
totpert = 0.
defindex = None
for ind, site in enumerate(initial_defect_structure.sites):
if site.distance_and_image_from_frac_coords(def_frac_coords)[0] < 0.01:
defindex = ind
continue
else:
totpert += distmatrix[ind, ind]
# append [distance to defect, distance traveled, index in structure]
distance_to_defect = \
initial_defect_structure.lattice.get_distance_and_image(def_frac_coords, initsites[ind])[0]
distdata.append([distance_to_defect, distmatrix[ind, ind], int(ind)])
if defindex is None and not isinstance(defect_entry.defect, Vacancy):
raise ValueError("fractional coordinate for defect could not be "
"identified in initial_defect_structure")
distdata.sort()
tot_relax_outside_rad = 0.
perc_relax_outside_rad = 0.
for newind in range(len(distdata)):
perc_relax = 100 * distdata[newind][1] / totpert if totpert else 0.
distdata[newind].append(perc_relax) # percentage contribution to total relaxation
if distdata[newind][0] > radius_to_sample:
tot_relax_outside_rad += distdata[newind][1]
perc_relax_outside_rad += distdata[newind][3]
structure_tot_relax_compatible = True if tot_relax_outside_rad <= self.tot_relax_tol else False
structure_perc_relax_compatible = False if (
perc_relax_outside_rad > self.perc_relax_tol and totpert >= 1.) else True
structure_relax_analyze_meta.update({'structure_tot_relax_compatible': structure_tot_relax_compatible,
'tot_relax_outside_rad': tot_relax_outside_rad,
'tot_relax_tol': self.tot_relax_tol,
'structure_perc_relax_compatible': structure_perc_relax_compatible,
'perc_relax_outside_rad': perc_relax_outside_rad,
'perc_relax_tol': self.perc_relax_tol,
'full_structure_relax_data': distdata,
'defect_index': defindex})
structure_relax_allows_compatible = True if (
structure_tot_relax_compatible and structure_perc_relax_compatible) else False
# NEXT: do single defect delocalization analysis (requires similar data, so might as well run in tandem
# with structural delocalization)
defectsite_relax_analyze_meta = {}
if isinstance(defect_entry.defect, Vacancy):
defectsite_relax_allows_compatible = True
defectsite_relax_analyze_meta.update({'relax_amount': None,
'defect_tot_relax_tol': self.defect_tot_relax_tol})
else:
defect_relax_amount = distmatrix[defindex, defindex]
defectsite_relax_allows_compatible = True if defect_relax_amount <= self.defect_tot_relax_tol else False
defectsite_relax_analyze_meta.update({'relax_amount': defect_relax_amount,
'defect_tot_relax_tol': self.defect_tot_relax_tol})
if 'delocalization_meta' not in defect_entry.parameters.keys():
defect_entry.parameters['delocalization_meta'] = {}
defect_entry.parameters['delocalization_meta'].update({
'defectsite_relax':
{'is_compatible': defectsite_relax_allows_compatible,
'metadata': defectsite_relax_analyze_meta}})
defect_entry.parameters['delocalization_meta'].update({
'structure_relax':
{'is_compatible': structure_relax_allows_compatible,
'metadata': structure_relax_analyze_meta}})
if (not structure_relax_allows_compatible) or (not defectsite_relax_allows_compatible):
defect_entry.parameters.update({'is_compatible': False})
return defect_entry
|
fraricci/pymatgen
|
pymatgen/analysis/defects/defect_compatibility.py
|
Python
|
mit
| 28,273
|
[
"pymatgen"
] |
fa78f77dc0daccf74b7adce8249a390e254bbd17b0c1d692daa50a775219a87f
|
#!/usr/bin/env python
"""The X12.map.source module emits Python source definitions from an :mod:`X12.parse` structure.
There are two flavors of Python source that can be produced:
- :class:`PythonVisitor` creates a single definition.
A single Python constructor defines the entire message.
- :class:`FlatPythonVisitor` builds a flat definition.
A number of individual constructors are assembled
into a workable parser.
See :ref:`traversal` for notes on the **Visitor** design pattern.
.. autoclass:: PythonVisitor
:members:
.. autoclass:: FlatPythonDetails
:members:
.. autoclass:: FlatPythonVisitor
:members:
"""
from tigershark.X12.parse import StructureVisitor
from tigershark.X12.parse import StopDescent
class PythonVisitor( StructureVisitor ):
"""Builds a single, huge definition. Not very practical except
for TINY messages. Or if you set :py:data:`skipElement`
to True, the resulting structure might be useful.
:ivar skipElement: normally False, you can set this to True
to produce a Segment-level message summary.
"""
def __init__( self, varName, *args, **kw ):
"""Create a Python Visitor.
:param varName: the name of the Python variable we'll product.
"""
super( PythonVisitor, self ).__init__( *args, **kw )
self.result= []
self.varName= varName
self.skipElement= False # change to True to prune tree, skipping elements
def preMessage( self, msg, indent=0 ):
self.result.append( "from tigershark.X12.parse import Message, Loop, Segment, Composite, Element, Properties" )
if self.varName is None: self.varName= "x%s" % ( msg.name, )
self.result.append( "%s = Message( %r, %r, " % ( self.varName, msg.name, msg.props ) )
def postMessage( self, msg, indent=0 ):
self.result.append( ")" )
def preLoop( self, aLoop, indent=0 ):
"""Report this Loop.
:param aLoop: a :class:`X12.parse.Loop` structure.
:param indent: the indentation level.
"""
self.result.append( indent*' ' + "Loop( %r, %r, " % (aLoop.name, aLoop.props,) )
def postLoop( self, aLoop, indent=0 ):
self.result.append( indent*' ' + ")," )
def preSegment( self, aSegment, indent=0 ):
"""Report this Segment."""
self.result.append( indent*' ' + "Segment( %r, %r," % (aSegment.name, aSegment.props,) )
def postSegment( self, aSegment, indent=0 ):
self.result.append( indent*' ' + ")," )
def preComposite( self, aComposite, indent=0 ):
"""Report this Composite. Configured by the :py:data:`skipElement` variable."""
if self.skipElement:
return
self.result.append( indent*' ' + "Composite( %r, %r," % (
aComposite.name, aComposite.props ) )
def postComposite( self, aComposite, indent=0 ):
"""Report this Composite. Configured by the :py:data:`skipElement` variable."""
if self.skipElement:
return
self.result.append( indent*' ' + ")," )
def postElement( self, anElement, indent=0 ):
"""Report this Element. Configured by the :py:data:`skipElement` variable."""
if self.skipElement:
return
elt= anElement
self.result.append( indent*' ' + "Element( %r, Properties(desc=%r, req_sit=%r, data_type=(%r,%r,%r), position=%d," % (
elt.name, elt.desc, elt.req_sit, elt.type_name, elt.min_len, elt.max_len, elt.position ) )
self.result.append( indent*' ' + " codes=%r ) )," % (
elt.codes, ) )
def getSource( self ):
return "\n".join( self.result )
class FlatPythonDetails( PythonVisitor ):
"""Definitions, of Segment, Composite and Element.
Messages and Loops are silently ignored. They're handled by
another visitor that scans the "top" of the structure.
This Visitor is only used within a Loop or Message.
"""
def preMessage( self, msg, indent= 0 ):
pass
def postMessage( self, msg, indent= 0 ):
pass
def preLoop( self, loop, indent=0 ):
self.result.append( "%s_%s," % ( self.varName, loop.name, ) )
raise StopDescent # prevent expansion of the Loop we just referenced
def postLoop( self, loop, indent=0 ):
pass
class FlatPythonVisitor( StructureVisitor ):
"""Decompose ALL Loops into separate definitions which can be referenced.
This Visitor doesn't process Segments, Composites and Elements.
It only looks at Messages and Loops.
"""
def __init__( self, varName, *args, **kw ):
super( FlatPythonVisitor, self ).__init__( *args, **kw )
self.result= []
self.varName= varName
def preMessage( self, msg, indent=0 ):
self.result.append( "from tigershark.X12.parse import Message, Loop, Segment, Composite, Element, Properties" )
def postMessage( self, msg, indent=0 ):
if self.varName is None: self.varName= "x%s" % ( msg.name, )
self.result.append( "%s = Message( %r, %r, " % ( self.varName, msg.name, msg.props ) )
for s in msg.structure:
details= FlatPythonDetails( self.varName )
# for Segment, Composites, etc., this involves a full pre-/post- visit
# for Loops, however, it's a reference to a previous post- definition
s.visit( details )
self.result.extend( details.result )
self.result.append( ")" )
def preLoop( self, aLoop, indent=0 ):
pass
def postLoop( self, aLoop, indent=0 ):
"""Loop with References to stuff defined during pre-processing."""
self.result.append( "%s_%s = Loop( %r, %r, " % ( self.varName, aLoop.name, aLoop.name, aLoop.props ) )
for s in aLoop.structure:
details= FlatPythonDetails( self.varName )
s.visit( details )
self.result.extend( details.result )
self.result.append( ")" )
def getSource( self ):
return "\n".join( self.result )
|
jdavisp3/TigerShark
|
tigershark/X12/map/source.py
|
Python
|
bsd-3-clause
| 6,021
|
[
"VisIt"
] |
a331071e6c07e809e165b3f8347ad60bce4533db95b1653f7bc1a2c0308b9e2e
|
"""
Actions manager for transcripts ajax calls.
+++++++++++++++++++++++++++++++++++++++++++
Module do not support rollback (pressing "Cancel" button in Studio)
All user changes are saved immediately.
"""
import copy
import os
import logging
import json
import requests
from django.http import HttpResponse, Http404
from django.core.exceptions import PermissionDenied
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.utils.translation import ugettext as _
from opaque_keys import InvalidKeyError
from xmodule.contentstore.content import StaticContent
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import UsageKey
from xmodule.contentstore.django import contentstore
from xmodule.modulestore.exceptions import ItemNotFoundError
from util.json_request import JsonResponse
from xmodule.video_module.transcripts_utils import (
generate_subs_from_source,
generate_srt_from_sjson, remove_subs_from_store,
download_youtube_subs, get_transcripts_from_youtube,
copy_or_rename_transcript,
manage_video_subtitles_save,
GetTranscriptsFromYouTubeException,
TranscriptsRequestValidationException,
youtube_video_transcript_name,
)
from student.auth import has_course_author_access
__all__ = [
'upload_transcripts',
'download_transcripts',
'check_transcripts',
'choose_transcripts',
'replace_transcripts',
'rename_transcripts',
'save_transcripts',
]
log = logging.getLogger(__name__)
def error_response(response, message, status_code=400):
"""
Simplify similar actions: log message and return JsonResponse with message included in response.
By default return 400 (Bad Request) Response.
"""
log.debug(message)
response['status'] = message
return JsonResponse(response, status_code)
@login_required
def upload_transcripts(request):
"""
Upload transcripts for current module.
returns: response dict::
status: 'Success' and HTTP 200 or 'Error' and HTTP 400.
subs: Value of uploaded and saved html5 sub field in video item.
"""
response = {
'status': 'Unknown server error',
'subs': '',
}
locator = request.POST.get('locator')
if not locator:
return error_response(response, 'POST data without "locator" form data.')
try:
item = _get_item(request, request.POST)
except (InvalidKeyError, ItemNotFoundError):
return error_response(response, "Can't find item by locator.")
if 'transcript-file' not in request.FILES:
return error_response(response, 'POST data without "file" form data.')
video_list = request.POST.get('video_list')
if not video_list:
return error_response(response, 'POST data without video names.')
try:
video_list = json.loads(video_list)
except ValueError:
return error_response(response, 'Invalid video_list JSON.')
# Used utf-8-sig encoding type instead of utf-8 to remove BOM(Byte Order Mark), e.g. U+FEFF
source_subs_filedata = request.FILES['transcript-file'].read().decode('utf-8-sig')
source_subs_filename = request.FILES['transcript-file'].name
if '.' not in source_subs_filename:
return error_response(response, "Undefined file extension.")
basename = os.path.basename(source_subs_filename)
source_subs_name = os.path.splitext(basename)[0]
source_subs_ext = os.path.splitext(basename)[1][1:]
if item.category != 'video':
return error_response(response, 'Transcripts are supported only for "video" modules.')
# Allow upload only if any video link is presented
if video_list:
sub_attr = source_subs_name
try:
# Generate and save for 1.0 speed, will create subs_sub_attr.srt.sjson subtitles file in storage.
generate_subs_from_source({1: sub_attr}, source_subs_ext, source_subs_filedata, item)
for video_dict in video_list:
video_name = video_dict['video']
# We are creating transcripts for every video source, if in future some of video sources would be deleted.
# Updates item.sub with `video_name` on success.
copy_or_rename_transcript(video_name, sub_attr, item, user=request.user)
response['subs'] = item.sub
response['status'] = 'Success'
except Exception as ex:
return error_response(response, ex.message)
else:
return error_response(response, 'Empty video sources.')
return JsonResponse(response)
@login_required
def download_transcripts(request):
"""
Passes to user requested transcripts file.
Raises Http404 if unsuccessful.
"""
locator = request.GET.get('locator')
if not locator:
log.debug('GET data without "locator" property.')
raise Http404
try:
item = _get_item(request, request.GET)
except (InvalidKeyError, ItemNotFoundError):
log.debug("Can't find item by locator.")
raise Http404
subs_id = request.GET.get('subs_id')
if not subs_id:
log.debug('GET data without "subs_id" property.')
raise Http404
if item.category != 'video':
log.debug('transcripts are supported only for video" modules.')
raise Http404
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(item.location.course_key, filename)
try:
sjson_transcripts = contentstore().find(content_location)
log.debug("Downloading subs for %s id", subs_id)
str_subs = generate_srt_from_sjson(json.loads(sjson_transcripts.data), speed=1.0)
if not str_subs:
log.debug('generate_srt_from_sjson produces no subtitles')
raise Http404
response = HttpResponse(str_subs, content_type='application/x-subrip')
response['Content-Disposition'] = 'attachment; filename="{0}.srt"'.format(subs_id)
return response
except NotFoundError:
log.debug("Can't find content in storage for %s subs", subs_id)
raise Http404
@login_required
def check_transcripts(request):
"""
Check state of transcripts availability.
request.GET['data'] has key `videos`, which can contain any of the following::
[
{u'type': u'youtube', u'video': u'OEoXaMPEzfM', u'mode': u'youtube'},
{u'type': u'html5', u'video': u'video1', u'mode': u'mp4'}
{u'type': u'html5', u'video': u'video2', u'mode': u'webm'}
]
`type` is youtube or html5
`video` is html5 or youtube video_id
`mode` is youtube, ,p4 or webm
Returns transcripts_presence: dictionary containing the status of the video
"""
response = {
'html5_local': [],
'html5_equal': False,
'is_youtube_mode': False,
'youtube_local': False,
'youtube_server': False,
'youtube_diff': True,
'current_item_subs': None,
'status': 'Success',
}
try:
__, videos, item = _validate_transcripts_data(request)
except TranscriptsRequestValidationException as e:
return error_response(response, e.message)
transcripts_presence = get_transcripts_presence(videos, item)
return JsonResponse(transcripts_presence)
def get_transcripts_presence(videos, item):
""" fills in the transcripts_presence dictionary after for a given component
with its list of videos.
Returns transcripts_presence dict:
html5_local: list of html5 ids, if subtitles exist locally for them;
is_youtube_mode: bool, if we have youtube_id, and as youtube mode is of higher priority, reflect this with flag;
youtube_local: bool, if youtube transcripts exist locally;
youtube_server: bool, if youtube transcripts exist on server;
youtube_diff: bool, if youtube transcripts exist on youtube server, and are different from local youtube ones;
current_item_subs: string, value of item.sub field;
status: string, 'Error' or 'Success';
subs: string, new value of item.sub field, that should be set in module;
command: string, action to front-end what to do and what to show to user.
"""
transcripts_presence = {
'html5_local': [],
'html5_equal': False,
'is_youtube_mode': False,
'youtube_local': False,
'youtube_server': False,
'youtube_diff': True,
'current_item_subs': None,
'status': 'Success',
}
filename = 'subs_{0}.srt.sjson'.format(item.sub)
content_location = StaticContent.compute_location(item.location.course_key, filename)
try:
local_transcripts = contentstore().find(content_location).data
transcripts_presence['current_item_subs'] = item.sub
except NotFoundError:
pass
# Check for youtube transcripts presence
youtube_id = videos.get('youtube', None)
if youtube_id:
transcripts_presence['is_youtube_mode'] = True
# youtube local
filename = 'subs_{0}.srt.sjson'.format(youtube_id)
content_location = StaticContent.compute_location(item.location.course_key, filename)
try:
local_transcripts = contentstore().find(content_location).data
transcripts_presence['youtube_local'] = True
except NotFoundError:
log.debug("Can't find transcripts in storage for youtube id: %s", youtube_id)
# youtube server
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = youtube_id
youtube_transcript_name = youtube_video_transcript_name(youtube_text_api)
if youtube_transcript_name:
youtube_text_api['params']['name'] = youtube_transcript_name
youtube_response = requests.get('http://' + youtube_text_api['url'], params=youtube_text_api['params'])
if youtube_response.status_code == 200 and youtube_response.text:
transcripts_presence['youtube_server'] = True
#check youtube local and server transcripts for equality
if transcripts_presence['youtube_server'] and transcripts_presence['youtube_local']:
try:
youtube_server_subs = get_transcripts_from_youtube(
youtube_id,
settings,
item.runtime.service(item, "i18n")
)
if json.loads(local_transcripts) == youtube_server_subs: # check transcripts for equality
transcripts_presence['youtube_diff'] = False
except GetTranscriptsFromYouTubeException:
pass
# Check for html5 local transcripts presence
html5_subs = []
for html5_id in videos['html5']:
filename = 'subs_{0}.srt.sjson'.format(html5_id)
content_location = StaticContent.compute_location(item.location.course_key, filename)
try:
html5_subs.append(contentstore().find(content_location).data)
transcripts_presence['html5_local'].append(html5_id)
except NotFoundError:
log.debug("Can't find transcripts in storage for non-youtube video_id: %s", html5_id)
if len(html5_subs) == 2: # check html5 transcripts for equality
transcripts_presence['html5_equal'] = json.loads(html5_subs[0]) == json.loads(html5_subs[1])
command, subs_to_use = _transcripts_logic(transcripts_presence, videos)
transcripts_presence.update({
'command': command,
'subs': subs_to_use,
})
return transcripts_presence
def _transcripts_logic(transcripts_presence, videos):
"""
By `transcripts_presence` content, figure what show to user:
returns: `command` and `subs`.
`command`: string, action to front-end what to do and what show to user.
`subs`: string, new value of item.sub field, that should be set in module.
`command` is one of::
replace: replace local youtube subtitles with server one's
found: subtitles are found
import: import subtitles from youtube server
choose: choose one from two html5 subtitles
not found: subtitles are not found
"""
command = None
# new value of item.sub field, that should be set in module.
subs = ''
# youtube transcripts are of high priority than html5 by design
if (
transcripts_presence['youtube_diff'] and
transcripts_presence['youtube_local'] and
transcripts_presence['youtube_server']): # youtube server and local exist
command = 'replace'
subs = videos['youtube']
elif transcripts_presence['youtube_local']: # only youtube local exist
command = 'found'
subs = videos['youtube']
elif transcripts_presence['youtube_server']: # only youtube server exist
command = 'import'
else: # html5 part
if transcripts_presence['html5_local']: # can be 1 or 2 html5 videos
if len(transcripts_presence['html5_local']) == 1 or transcripts_presence['html5_equal']:
command = 'found'
subs = transcripts_presence['html5_local'][0]
else:
command = 'choose'
subs = transcripts_presence['html5_local'][0]
else: # html5 source have no subtitles
# check if item sub has subtitles
if transcripts_presence['current_item_subs'] and not transcripts_presence['is_youtube_mode']:
log.debug("Command is use existing %s subs", transcripts_presence['current_item_subs'])
command = 'use_existing'
else:
command = 'not_found'
log.debug(
"Resulted command: %s, current transcripts: %s, youtube mode: %s",
command,
transcripts_presence['current_item_subs'],
transcripts_presence['is_youtube_mode']
)
return command, subs
@login_required
def choose_transcripts(request):
"""
Replaces html5 subtitles, presented for both html5 sources, with chosen one.
Code removes rejected html5 subtitles and updates sub attribute with chosen html5_id.
It does nothing with youtube id's.
Returns: status `Success` and resulted item.sub value or status `Error` and HTTP 400.
"""
response = {
'status': 'Error',
'subs': '',
}
try:
data, videos, item = _validate_transcripts_data(request)
except TranscriptsRequestValidationException as e:
return error_response(response, e.message)
html5_id = data.get('html5_id') # html5_id chosen by user
# find rejected html5_id and remove appropriate subs from store
html5_id_to_remove = [x for x in videos['html5'] if x != html5_id]
if html5_id_to_remove:
remove_subs_from_store(html5_id_to_remove, item)
if item.sub != html5_id: # update sub value
item.sub = html5_id
item.save_with_metadata(request.user)
response = {
'status': 'Success',
'subs': item.sub,
}
return JsonResponse(response)
@login_required
def replace_transcripts(request):
"""
Replaces all transcripts with youtube ones.
Downloads subtitles from youtube and replaces all transcripts with downloaded ones.
Returns: status `Success` and resulted item.sub value or status `Error` and HTTP 400.
"""
response = {'status': 'Error', 'subs': ''}
try:
__, videos, item = _validate_transcripts_data(request)
except TranscriptsRequestValidationException as e:
return error_response(response, e.message)
youtube_id = videos['youtube']
if not youtube_id:
return error_response(response, 'YouTube id {} is not presented in request data.'.format(youtube_id))
try:
download_youtube_subs(youtube_id, item, settings)
except GetTranscriptsFromYouTubeException as e:
return error_response(response, e.message)
item.sub = youtube_id
item.save_with_metadata(request.user)
response = {
'status': 'Success',
'subs': item.sub,
}
return JsonResponse(response)
def _validate_transcripts_data(request):
"""
Validates, that request contains all proper data for transcripts processing.
Returns tuple of 3 elements::
data: dict, loaded json from request,
videos: parsed `data` to useful format,
item: video item from storage
Raises `TranscriptsRequestValidationException` if validation is unsuccessful
or `PermissionDenied` if user has no access.
"""
data = json.loads(request.GET.get('data', '{}'))
if not data:
raise TranscriptsRequestValidationException(_('Incoming video data is empty.'))
try:
item = _get_item(request, data)
except (InvalidKeyError, ItemNotFoundError):
raise TranscriptsRequestValidationException(_("Can't find item by locator."))
if item.category != 'video':
raise TranscriptsRequestValidationException(_('Transcripts are supported only for "video" modules.'))
# parse data form request.GET.['data']['video'] to useful format
videos = {'youtube': '', 'html5': {}}
for video_data in data.get('videos'):
if video_data['type'] == 'youtube':
videos['youtube'] = video_data['video']
else: # do not add same html5 videos
if videos['html5'].get('video') != video_data['video']:
videos['html5'][video_data['video']] = video_data['mode']
return data, videos, item
@login_required
def rename_transcripts(request):
"""
Create copies of existing subtitles with new names of HTML5 sources.
Old subtitles are not deleted now, because we do not have rollback functionality.
If succeed, Item.sub will be chosen randomly from html5 video sources provided by front-end.
"""
response = {'status': 'Error', 'subs': ''}
try:
__, videos, item = _validate_transcripts_data(request)
except TranscriptsRequestValidationException as e:
return error_response(response, e.message)
old_name = item.sub
for new_name in videos['html5'].keys(): # copy subtitles for every HTML5 source
try:
# updates item.sub with new_name if it is successful.
copy_or_rename_transcript(new_name, old_name, item, user=request.user)
except NotFoundError:
# subtitles file `item.sub` is not presented in the system. Nothing to copy or rename.
error_response(response, "Can't find transcripts in storage for {}".format(old_name))
response['status'] = 'Success'
response['subs'] = item.sub # item.sub has been changed, it is not equal to old_name.
log.debug("Updated item.sub to %s", item.sub)
return JsonResponse(response)
@login_required
def save_transcripts(request):
"""
Saves video module with updated values of fields.
Returns: status `Success` or status `Error` and HTTP 400.
"""
response = {'status': 'Error'}
data = json.loads(request.GET.get('data', '{}'))
if not data:
return error_response(response, 'Incoming video data is empty.')
try:
item = _get_item(request, data)
except (InvalidKeyError, ItemNotFoundError):
return error_response(response, "Can't find item by locator.")
metadata = data.get('metadata')
if metadata is not None:
new_sub = metadata.get('sub')
for metadata_key, value in metadata.items():
setattr(item, metadata_key, value)
item.save_with_metadata(request.user) # item becomes updated with new values
if new_sub:
manage_video_subtitles_save(item, request.user)
else:
# If `new_sub` is empty, it means that user explicitly does not want to use
# transcripts for current video ids and we remove all transcripts from storage.
current_subs = data.get('current_subs')
if current_subs is not None:
for sub in current_subs:
remove_subs_from_store(sub, item)
response['status'] = 'Success'
return JsonResponse(response)
def _get_item(request, data):
"""
Obtains from 'data' the locator for an item.
Next, gets that item from the modulestore (allowing any errors to raise up).
Finally, verifies that the user has access to the item.
Returns the item.
"""
usage_key = UsageKey.from_string(data.get('locator'))
# This is placed before has_course_author_access() to validate the location,
# because has_course_author_access() raises r if location is invalid.
item = modulestore().get_item(usage_key)
# use the item's course_key, because the usage_key might not have the run
if not has_course_author_access(request.user, item.location.course_key):
raise PermissionDenied()
return item
|
jbassen/edx-platform
|
cms/djangoapps/contentstore/views/transcripts_ajax.py
|
Python
|
agpl-3.0
| 20,955
|
[
"FEFF"
] |
65fcba1f9542e6cb25ecad70caa6e496c064789304b5afef6cea95ac1960c093
|
#!/usr/bin/python
import numpy as np
import scipy.interpolate as si
import mayavi.mlab as mylab
def calc_points(line):
points = np.zeros((len(line),3)) # indicies -> point coordinates
for i in range(points.shape[0]):
#points[i,0] = 2 * 0.556 * (line[i][0]-0.5)
#points[i,1] = 2 * 0.556 * (line[i][1]-0.5)
#points[i,2] = 0.798 * (line[i][2]-0.5) # z axis
points[i,0] = 0.556 * (line[i][0]-0.5)
points[i,1] = 0.556 * (line[i][1]-0.5)
points[i,2] = 0.798 * (line[i][2]-0.5) # z axis
#points[i,0] = 0.556 * (line[i][0])
#points[i,1] = 0.556 * (line[i][1])
#points[i,2] = 0.798 * (line[i][2]) # z axis
return points
def bspline(cv, n=100, degree=3):
cv = np.asarray(cv)
count = cv.shape[0]
degree = np.clip(degree,1,count-1) # max degree = count-1
kv = np.array([0]*degree + range(count-degree+1) + [count-degree]*degree,dtype='int')
u = np.linspace(0,(count-degree),num=n)
points = np.zeros((len(u),cv.shape[1]))
for i in xrange(cv.shape[1]):
points[:,i] = si.splev(u, (kv,cv[:,i],degree))
return points
# save geometry lines
def save_poly(fname, lines):
fname += "_poly.txt"
f = open(fname, 'w')
print ' ', fname
for line in lines:
points = calc_points(line)
#spoints = bspline(points, n=points.shape[0], degree=20)
##m = len(points)
m = len(points)/2
if m<4: continue
kx = 3
##if(m>3): kx = 3
##else: kx = m-1
wx = np.ones(len(points))
wx[0] = wx[-1] = 100
tck,u=si.splprep(np.transpose(points),w=wx,k=kx,s=10)
##m /= 2
##if(m<4) : m=4
spoints = np.transpose([si.splev(np.linspace(0,1,m),tck)])
f.write("%2d " % m)
for spoint in spoints:
for vert in spoint:
f.write("%0.2f " % vert)
f.write('\n')
mylab.plot3d(points[:,0], points[:,1], points[:,2], color=(1,0,0))
mylab.plot3d(spoints[:,0], spoints[:,1], spoints[:,2], color=(0,1,0))
f.close()
mylab.show()
return
|
jrugis/cell_mesh
|
poly.py
|
Python
|
gpl-3.0
| 1,938
|
[
"Mayavi"
] |
a0e23534d1f78b2f02a5034322cf83912fe0857b26c95d1923aca652ac724e84
|
# -*- coding: utf-8 -*-
import numpy as np
import numpy.ma as ma
import itertools
import scipy.optimize
from pytmatrix.psd import GammaPSD
import csv
from datetime import datetime
import os
from netCDF4 import Dataset, num2date
from ..DropSizeDistribution import DropSizeDistribution
from ..io import common
def read_parsivel_arm_netcdf(filename):
"""
Takes a filename pointing to an ARM Parsivel netcdf file and returns
a drop size distribution object.
Usage:
dsd = read_parsivel_parsivel_netcdf(filename)
Returns:
DropSizeDistrometer object
"""
reader = ARM_APU_reader(filename)
if reader:
return DropSizeDistribution(reader)
else:
return None
class ARM_APU_reader(object):
"""
This class reads and parses parsivel disdrometer data from ARM netcdf
files. These conform to document (Need Document).
Use the read_parsivel_arm_netcdf() function to interface with this.
"""
def __init__(self, filename):
"""
Handles setting up a APU Reader.
"""
self.fields = {}
self.time = [] # Time in minutes from start of recording
self.Nd = []
self.filename = filename
self.nc_dataset = Dataset(filename)
time = np.ma.array(self.nc_dataset.variables["time"][:])
base_time = datetime.strptime(
self.nc_dataset["time"].units, "seconds since %Y-%m-%d %H:%M:%S 0:00"
)
# Return a common epoch time dictionary
self.time = {
"data": time + (base_time - datetime(1970, 1, 1)).total_seconds(),
"units": common.EPOCH_UNITS,
"standard_name": "Time",
"long_name": "Time (UTC)",
}
# self.time = self._get_epoch_time(time, t_units)
Nd = np.ma.array(self.nc_dataset.variables["number_density_drops"][:])
velocity = np.ma.array(self.nc_dataset.variables["fall_velocity_calculated"][:])
rain_rate = np.ma.array(self.nc_dataset.variables["precip_rate"][:])
raw_spectrum = np.ma.array(self.nc_dataset.variables["raw_spectrum"][:])
raw_spectrum_velocity = np.ma.array(
self.nc_dataset.variables["raw_fall_velocity"][:]
)
self.diameter = common.var_to_dict(
"diameter",
self.nc_dataset.variables["particle_size"][:],
"mm",
"Particle diameter of bins",
)
self.spread = common.var_to_dict(
"spread",
self.nc_dataset.variables["class_size_width"][:],
"mm",
"Bin size spread of bins",
)
self.bin_edges = common.var_to_dict(
"bin_edges",
np.hstack((0, self.diameter["data"] + np.array(self.spread["data"]) / 2)),
"mm",
"Boundaries of bin sizes",
)
self.fields["Nd"] = common.var_to_dict(
"Nd", Nd, "m^-3 mm^-1", "Liquid water particle concentration"
)
self.fields["velocity"] = common.var_to_dict(
"velocity", velocity, "m s^-1", "Terminal fall velocity for each bin"
)
self.fields["rain_rate"] = common.var_to_dict(
"rain_rate", rain_rate, "mm h^-1", "Rain rate"
)
self.fields["drop_spectrum"] = common.var_to_dict(
"drop_sectrum", raw_spectrum, "m^-3 mm^-1", "Droplet Spectrum"
)
self.spectrum_fall_velocity = common.var_to_dict(
"raw_spectrum_velocity",
raw_spectrum_velocity,
"m^-3 mm^-1",
"Spectrum Fall Velocity",
)
def _get_epoch_time(self, sample_times, t_units):
"""Convert time to epoch time and return a dictionary."""
# Convert the time array into a datetime instance
dts = num2date(sample_times, t_units)
# Now convert this datetime instance into a number of seconds since Epoch
timesec = date2num(dts, common.EPOCH_UNITS)
# Now once again convert this data into a datetime instance
time_unaware = num2date(timesec, common.EPOCH_UNITS)
eptime = {
"data": time_unaware,
"units": common.EPOCH_UNITS,
"standard_name": "Time",
"long_name": "Time (UTC)",
}
|
josephhardinee/PyDisdrometer
|
pydsd/aux_readers/ARM_APU_reader.py
|
Python
|
lgpl-2.1
| 4,263
|
[
"NetCDF"
] |
3dbc51206da924baf838b8380da82882303be353327c3d14b032907538230829
|
import argparse
import sys
import skimage.io
import skimage.transform
import scipy.misc
from PIL import Image
def scale_image(input_file, output_file, scale, order=1):
Image.MAX_IMAGE_PIXELS = 50000*50000
img_in = skimage.io.imread(input_file)
if order == 0:
interp = 'nearest'
elif order == 1:
interp = 'bilinear'
elif order == 2:
interp = 'bicubic'
if ',' in scale:
scale = scale[1:-1].split(',')
scale = [int(i) for i in scale]
elif '.' in scale:
scale = float(scale)
else:
scale = int(scale)
res = scipy.misc.imresize(img_in, scale, interp=interp)
skimage.io.imsave(output_file, res)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input_file', type=argparse.FileType('r'), default=sys.stdin, help='input file')
parser.add_argument('out_file', type=argparse.FileType('w'), default=sys.stdin, help='out file (PNG)')
parser.add_argument('scale', type=str, help='fraction scaling factor(float), percentage scaling factor(int), output size(tuple(height,width))') # integer option not implemented in galaxy wrapper
parser.add_argument('order', type=int, default=1, help='interpolation method')
args = parser.parse_args()
scale_image(args.input_file.name, args.out_file.name, args.scale, args.order)
|
BMCV/galaxy-image-analysis
|
tools/scale_image/scale_image.py
|
Python
|
mit
| 1,364
|
[
"Galaxy"
] |
318d80f9e071a1972fc8a03db095a4666c218c560ef54b4eda0fd8e84a06daa9
|
from enum import Enum, unique
from functools import wraps
from django.contrib.auth.decorators import login_required
from django.http import (
HttpResponseRedirect,
HttpResponseForbidden,
HttpResponseBadRequest,
)
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.utils import timezone
from django.views.decorators.http import require_http_methods
from ..forms.comments import CommentAction
from ..forms.tavern import TavernPostForm, NoticeBoardForm, TavernTableAdminForm
from ..models import (
TavernTable,
TavernTableNoticeBoard,
TavernTableVisitor,
TavernBookmark,
TavernAccessRights,
)
from ..tavern import (
LIST_ALL,
LIST_FAVORITE,
SUPPORTED_LIST_STYLES_DISPLAY_NAME,
get_tavern_table_list,
bookmark_table,
unbook_table,
post_table_post,
)
from ..text import escape_user_input
@unique
class BookmarkActions(Enum):
BOOK = "oblibit"
UNBOOK = "neoblibit"
def handle_table_visit(view_func):
"""
Check if the tavern table is accessible:
* If not, redirect to the tavern table list
* If yes, update visit time and attach common variables
"""
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
if "tavern_table_id" in kwargs:
# TODO: room for optimization, could be pre-selected with visitor
request.tavern_table = table = get_object_or_404(
TavernTable, pk=kwargs["tavern_table_id"]
)
if table.is_user_access_allowed(user_profile=request.ddcz_profile):
(
request.tavern_table.visitor,
created,
) = TavernTableVisitor.objects.get_or_create(
tavern_table=table,
user_profile=request.ddcz_profile,
defaults={"unread": 0, "visit_time": timezone.now()},
)
if not created:
request.tavern_table.visitor.visit_time = timezone.now()
request.tavern_table.visitor.save()
# This will be in the future just inferred from the visitor, but for now, the normative
# data is in the TavernBookmark, unfortunately
table.is_bookmarked = (
TavernBookmark.objects.filter(
tavern_table=table, user_profile=request.ddcz_profile
).count()
== 1
)
table.user_can_admin = table.is_admin(request.ddcz_profile)
# Call the actual view function
response = view_func(request, *args, **kwargs)
return response
return HttpResponseRedirect(reverse("ddcz:tavern-list"))
return _wrapped_view_func
@login_required
@require_http_methods(["HEAD", "GET"])
# It would make sense to call it just `list`, but that would make it shadow the build-in list function
def list_tables(request):
"""
Display list of Tavern Tables in a given style ("vypis") that user has access to.
Supported styles:
* Bookmarked tables ("oblibene"): Show only tables user has explicitly bookmarked
TODO: * Active tables ("aktivni"): Show all tables except those in archive
* All tables ("vsechny"): All tables
TODO: * Search tables ("filter"): Show tables user has searched for
"""
list_style = request.GET.get("vypis", None)
if not list_style or list_style not in SUPPORTED_LIST_STYLES_DISPLAY_NAME:
bookmarks = request.ddcz_profile.tavern_bookmarks.count()
if bookmarks > 0:
default_style = LIST_FAVORITE
else:
default_style = LIST_ALL
return HttpResponseRedirect(
f"{reverse('ddcz:tavern-list')}?vypis={default_style}"
)
tavern_tables = get_tavern_table_list(
user_profile=request.ddcz_profile, list_style=list_style
)
return render(
request,
"tavern/list.html",
{
"tavern_tables": tavern_tables,
"supported_list_styles": SUPPORTED_LIST_STYLES_DISPLAY_NAME,
"current_list_style": list_style,
},
)
@login_required
@require_http_methods(["HEAD", "GET", "POST"])
@handle_table_visit
def table_posts(request, tavern_table_id):
table = request.tavern_table
user_can_post = table.is_user_write_allowed(user_profile=request.ddcz_profile)
posts_page = request.GET.get("z_s", 1)
if request.method == "POST":
# Create new Post
# if request.POST["post_type"] == CommentAction.DELETE.value:
# try:
# Phorum.objects.get(
# id=request.POST["post_id"],
# nickname=request.user.profile.nick,
# ).delete()
# except Phorum.DoesNotExist as e:
# messages.error(request, "Zprávu se nepodařilo smazat.")
#
if (
request.POST.get("action", None) == CommentAction.ADD.value
and user_can_post
):
post_form = TavernPostForm(request.POST)
if post_form.is_valid():
post_table_post(
tavern_table=table,
author_profile=request.ddcz_profile,
text=escape_user_input(post_form.cleaned_data["text"]),
)
return HttpResponseRedirect(request.get_full_path())
else:
if posts_page == 1:
table.visitor.unread = 0
table.visitor.save()
post_form = TavernPostForm()
return render(
request,
"tavern/posts.html",
{
"table": table,
"posts_page": posts_page,
"post_form": post_form,
"user_can_post": user_can_post,
},
)
@login_required
@require_http_methods(["HEAD", "GET", "POST"])
@handle_table_visit
def notice_board(request, tavern_table_id):
table = request.tavern_table
user_can_update_notice_board = table.is_notice_board_update_allowed(
user_profile=request.ddcz_profile
)
try:
board = TavernTableNoticeBoard.objects.get(tavern_table=table)
except TavernTableNoticeBoard.DoesNotExist:
board = None
if request.method == "POST":
if not user_can_update_notice_board:
return HttpResponseForbidden("Nemáte právo upravit nástěnku.")
post_form = NoticeBoardForm(request.POST)
if post_form.is_valid():
if board:
board.text = post_form.cleaned_data["text"]
board.changed_at = timezone.now()
board.change_author_nick = request.ddcz_profile.nick
board.save()
else:
TavernTableNoticeBoard.objects.create(
tavern_table=table,
table_name=table.name,
text=escape_user_input(post_form.cleaned_data["text"]),
changed_at=timezone.now(),
change_author_nick=request.ddcz_profile.nick,
)
return HttpResponseRedirect(request.get_full_path())
else:
if board:
post_form = NoticeBoardForm(initial={"text": board.text})
else:
post_form = NoticeBoardForm()
return render(
request,
"tavern/notice-board.html",
{
"table": table,
"notice_board": board,
"post_form": post_form,
"user_can_update_notice_board": user_can_update_notice_board,
},
)
@login_required
@require_http_methods(["HEAD", "GET"])
@handle_table_visit
def table_bookmark(request, tavern_table_id):
table = request.tavern_table
# TODO: The "Book" button should be a form and it should sent a POST request
if "akce" not in request.GET:
return HttpResponseBadRequest(
"`akce` request parameter is mandatory for this endpoint"
)
try:
action = BookmarkActions(request.GET["akce"])
except ValueError:
return HttpResponseBadRequest(
f"Invalid parameter for `akce`: {request.GET['akce']}"
)
if action == BookmarkActions.BOOK:
bookmark_table(user_profile=request.ddcz_profile, tavern_table=table)
elif action == BookmarkActions.UNBOOK:
unbook_table(user_profile=request.ddcz_profile, tavern_table=table)
return HttpResponseRedirect(
reverse("ddcz:tavern-posts", kwargs={"tavern_table_id": table.pk})
)
@login_required
@require_http_methods(["HEAD", "GET", "POST"])
@handle_table_visit
def table_administration(request, tavern_table_id):
table = request.tavern_table
if not table.is_admin(request.ddcz_profile):
return HttpResponseForbidden("Nemáte právo administrovat stůl.")
if request.method == "POST":
tavern_table_admin_form = TavernTableAdminForm(request.POST)
if tavern_table_admin_form.is_valid():
table.name = tavern_table_admin_form.cleaned_data["name"]
table.description = tavern_table_admin_form.cleaned_data["description"]
table.allow_rep = tavern_table_admin_form.cleaned_data["allow_rep"]
table.public = (
len(tavern_table_admin_form.cleaned_data["access_allowed"]) == 0
)
table.save()
table.update_access_privileges(
access_banned=tavern_table_admin_form.cleaned_data["access_banned"],
access_allowed=tavern_table_admin_form.cleaned_data["access_allowed"],
write_allowed=tavern_table_admin_form.cleaned_data["write_allowed"],
assistant_admins=tavern_table_admin_form.cleaned_data[
"assistant_admins"
],
)
return HttpResponseRedirect(request.get_full_path())
else:
privileges = table.get_current_privileges_map()
tavern_table_admin_form = TavernTableAdminForm(
{
"name": table.name,
"description": table.description,
"allow_rep": table.allow_rep,
"assistant_admins": ", ".join(
privileges[TavernAccessRights.ASSISTANT_ADMIN]
),
"write_allowed": ", ".join(
privileges[TavernAccessRights.WRITE_ALLOWED]
),
"access_allowed": ", ".join(
privileges[TavernAccessRights.ACCESS_ALLOWED]
),
"access_banned": ", ".join(
privileges[TavernAccessRights.ACCESS_BANNED]
),
}
)
return render(
request,
"tavern/table-admin.html",
{
"table": table,
"admin_form": tavern_table_admin_form,
},
)
|
dracidoupe/graveyard
|
ddcz/views/tavern.py
|
Python
|
mit
| 10,915
|
[
"VisIt"
] |
9fb943ab0c0fe6d3171779dbbf65efe03fa9c7418c1a4e42a7f77250cf978614
|
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Module for smallrnaseq configuration file. Used with command line app.
Created Jan 2017
Copyright (C) Damien Farrell
"""
from __future__ import absolute_import, print_function
import sys, os, string, time
import types, re, subprocess, glob, shutil
import pandas as pd
try:
import configparser
except:
import ConfigParser as configparser
path = os.path.dirname(os.path.abspath(__file__))
datadir = os.path.join(path, 'data')
from . import aligners
baseoptions = {'base': [('filenames',''),('path',''),('overwrite',0),
('adapter',''),
('index_path','indexes'),
('libraries',''),
('ref_fasta',''),('features',''),
('output','results'),('add_labels',0),
('aligner','bowtie'),
('mirna',0),('species','hsa'),('pad5',3),('pad3',5),
('verbose', 1),
('cpus',1)],
'aligner': [('default_params','-v 1 --best'),
('mirna_params',aligners.BOWTIE_MIRBASE_PARAMS)],
'novel': [('score_cutoff',.7), ('read_cutoff',100),
('strict',0)],
'de': [('count_file',''),('sample_labels',''),('sep',','),
('sample_col',''),('factors_col',''),
('conditions',''),('logfc_cutoff',1.5),
('de_plot','point')]
}
def write_default_config(conffile='default.conf', defaults={}):
"""Write a default config file"""
if not os.path.exists(conffile):
cp = create_config_parser_from_dict(defaults, ['base','novel','aligner','de'])
cp.write(open(conffile,'w'))
print ('wrote config file %s' %conffile)
return conffile
def create_config_parser_from_dict(data, sections, **kwargs):
"""Helper method to create a ConfigParser from a dict and/or keywords"""
cp = configparser.ConfigParser()
for s in sections:
cp.add_section(s)
if not data.has_key(s):
continue
for i in data[s]:
name,val = i
cp.set(s, name, str(val))
#use kwargs to create specific settings in the appropriate section
for s in cp.sections():
opts = cp.options(s)
for k in kwargs:
if k in opts:
cp.set(s, k, kwargs[k])
return cp
def parse_config(conffile=None):
"""Parse a configparser file"""
f = open(conffile,'r')
cp = configparser.ConfigParser()
try:
cp.read(conffile)
except Exception as e:
print ('failed to read config file! check format')
print ('Error returned:', e)
return
f.close()
return cp
def get_options(cp):
"""Makes sure boolean opts are parsed"""
from collections import OrderedDict
options = OrderedDict()
#options = cp._sections['base']
for section in cp.sections():
options.update( (cp._sections[section]) )
for o in options:
for section in cp.sections():
try:
options[o] = cp.getboolean(section, o)
except:
pass
try:
options[o] = cp.getint(section, o)
except:
pass
return options
def print_options(options):
"""Print option key/value pairs"""
for key in options:
print (key, ':', options[key])
print ()
def check_options(opts):
"""Check for missing default options in dict. Meant to handle
incomplete config files"""
sections = baseoptions.keys()
for s in sections:
defaults = dict(baseoptions[s])
for i in defaults:
if i not in opts:
opts[i] = defaults[i]
return opts
|
dmnfarrell/smallrnaseq
|
smallrnaseq/config.py
|
Python
|
gpl-3.0
| 4,475
|
[
"Bowtie"
] |
df4ad14acdbc00178fe9e190c8702880e2dc1ff98c539f6896efcc45993bcffd
|
DEVICE_TEMPLATE='''
<div class="col-md-3">
<br>
<center>
<button type="button" class="btn btn-primary" data-toggle="modal" data-target=".bs-example-modal-MODID" style="width:90%;height:45px;text-align: center; ">TITLE</button>
<div class="modal fade bs-example-modal-MODID" tabindex="-1" role="dialog" aria-labelledby="mySmallModalLabel">
<div class="modal-dialog modal-MODID">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button>
<h4 class="modal-title">TITLE : STATUS </h4>
</div>
<div class="row">
<br>
COMMANDS
</div>
</div>
</div>
</div>
</center>
</div>
'''
COMMAND_TEMPLATE='''
<div class="col-md-6">
<form style="text-align:center" class="form-inline" enctype='application/json' action=/API/translator method='POST'>
<input name='device' value='DEVICE' type='hidden'>
<input name='command' value='COMMAND' type='hidden'>
<input name='value' value='VALUE' type='hidden'>
<input class="btn btn-default" type="submit" value='CTITLE' >
</form>
</div>
'''
PAGE_TEMPLATE = '''
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<!-- Latest compiled and minified CSS -->
<link href="/static/css/bootstrap.min.css" rel="stylesheet">
<link href="/static/css/bootstrap-switch.css" rel="stylesheet">
<script src="/static/js/jquery.min.js"></script>
<script src="/static/js/bootstrap.min.js"></script>
<script src="/static/js/bootstrap-switch.js"></script>
</head>
<body>
NAVBAR
<div class=container>
<div class="row">
PAGEBODY
</div>
</div>
</body>
</html>
'''
NAV_TEMPLATE='''
<nav class="navbar navbar-inverse">
<div class="container">
<!-- Brand and toggle get grouped for better mobile display -->
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1" aria-expanded="false">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="#">Firefly</a>
</div>
<!-- Collect the nav links, forms, and other content for toggling -->
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">
<ul class="nav navbar-nav">
NAVLINKS
</ul>
</div><!-- /.navbar-collapse -->
</div><!-- /.container-fluid -->
</nav>
'''
@app.route('/debugView')
@flask_login.login_required
def debugView():
page_body = ""
allDevices = getAllDevices().get('all_devices')
navbar = formNavbar(allDevices, 'all')
for name, options in collections.OrderedDict(sorted(allDevices.items())).iteritems():
if options is not True and options is not False:
device_name = name
device_title = options.get('title')
device_config = ""
device_body = ""
device_status = options.get('value')
print device_name
print device_status
if options.get('config'):
for c, v in options.get('config').iteritems():
print v.get('value')
device_config += str(COMMAND_TEMPLATE).replace('DEVICE',device_name).replace('CTITLE',c).replace('COMMAND',v.get('command')).replace('VALUE',v.get('value')).replace('ID',device_title.replace(' ', ''))
device_body = DEVICE_TEMPLATE.replace('TITLE', device_title).replace('COMMANDS', device_config).replace('STATUS',device_status)
if device_status == 'Active':
device_body = device_body.replace('panel-default','panel-danger')
if device_status == 'Inactive':
device_body = device_body.replace('panel-default','panel-success')
page_body += device_body
return PAGE_TEMPLATE.replace('PAGEBODY',page_body).replace('NAVBAR', navbar)
def generateDeviceView(dtype):
count = 0
page_body = ""
allViews = getAllDevices()
allDevices = allViews.get('all_devices')
dashboarGroups = allViews.get('dashboard_groups')
navbar = formNavbar(allDevices, dashboarGroups, dtype)
filtered_devices = {}
for name, device in allDevices.iteritems():
if not isinstance(allDevices[name], bool):
filtered_devices[name] = device
allDevices = filtered_devices
for name, options in collections.OrderedDict(sorted(allDevices.items(), key=lambda elem: elem[1]['title'])).iteritems():
if options is not True and options is not False:
if options.get('capabilities') is not None and dtype in options.get('capabilities') or dtype == 'all':
device_name = name
device_title = options.get('title')
device_config = ""
device_body = ""
device_status = options.get('value')
if options.get('config'):
for c, v in options.get('config').iteritems():
print v.get('value')
device_config += str(COMMAND_TEMPLATE).replace('DEVICE',device_name).replace('CTITLE',c).replace('COMMAND',v.get('command')).replace('VALUE',v.get('value'))
else:
device_config = '<div class="row"><div class="col-md-6" style="height:45px;"></div><div class="col-md-6"></div><div class="col-md-6" style="height:30px;"></div><div class="col-md-6"></div></div>'
device_body = DEVICE_TEMPLATE.replace('TITLE', device_title).replace('COMMANDS', device_config).replace('STATUS',device_status).replace('MODID',device_name.replace(' ', ''))
if device_status == 'Active':
device_body = device_body.replace('panel-default','panel-danger')
if device_status == 'Inactive':
device_body = device_body.replace('panel-default','panel-success')
page_body += device_body
return PAGE_TEMPLATE.replace('PAGEBODY',page_body).replace('NAVBAR', navbar)
def generateGroupView(groupName):
page_body = ""
allViews = getAllDevices()
allDevices = allViews.get('all_devices')
dashboarGroups = allViews.get('dashboard_groups')
navbar = formNavbar(allDevices, dashboarGroups, groupName)
filtered_devices = {}
for name, device in allDevices.iteritems():
if not isinstance(allDevices[name], bool):
filtered_devices[name] = device
allDevices = filtered_devices
for name, options in collections.OrderedDict(sorted(allDevices.items(), key=lambda elem: elem[1]['title'])).iteritems():
if options is not True and options is not False and name in dashboarGroups.get(groupName):
if options.get('capabilities') is not None:
device_name = name
device_title = options.get('title')
device_config = ""
device_body = ""
device_status = options.get('value')
if options.get('config'):
for c, v in options.get('config').iteritems():
device_config += str(COMMAND_TEMPLATE).replace('DEVICE',device_name).replace('CTITLE',c).replace('COMMAND',v.get('command')).replace('VALUE',v.get('value'))
else:
device_config = '<div class="row"><div class="col-md-6" style="height:45px;"></div><div class="col-md-6"></div><div class="col-md-6" style="height:30px;"></div><div class="col-md-6"></div></div>'
device_body = DEVICE_TEMPLATE.replace('TITLE', device_title).replace('COMMANDS', device_config).replace('STATUS',device_status).replace('MODID',device_name.replace(' ', ''))
if device_status == 'Active':
device_body = device_body.replace('panel-default','panel-danger')
if device_status == 'Inactive':
device_body = device_body.replace('panel-default','panel-success')
page_body += device_body
return PAGE_TEMPLATE.replace('PAGEBODY',page_body).replace('NAVBAR', navbar)
def formNavbar(devices, groups, current):
navlinks = ''
c = getDeviceCapabilities(devices)
i = 'all'
navlinks += '<li><a href="/devices/'+i+'">'+i.upper()+'</a></li>' if i != current else '<li class="active"><a href="/devices/'+i+'">'+i.upper()+' <span class="sr-only">(current)</span></a></li>'
for i in sorted(groups):
navlinks += '<li><a href="/groups/'+i+'">'+i.upper()+'</a></li>' if i != current else '<li class="active"><a href="/groups/'+i+'">'+i.upper()+' <span class="sr-only">(current)</span></a></li>'
navlinks += '''<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown" role="button" aria-haspopup="true" aria-expanded="false">DEVICES <span class="caret"></span></a>
<ul class="dropdown-menu">
'''
for i in sorted(c):
navlinks += '<li><a href="/devices/'+i+'">'+i.upper()+'</a></li>' if i != current else '<li class="active"><a href="/devices/'+i+'">'+i.upper()+' <span class="sr-only">(current)</span></a></li>'
navlinks += '''
</ul>
</li>
'''
return NAV_TEMPLATE.replace('NAVLINKS', navlinks)
|
zpriddy/Firefly
|
Firefly/web_ui/templates/old_design.py
|
Python
|
apache-2.0
| 8,964
|
[
"Firefly"
] |
8197cc327c369ba40e410d7e29a90a86f37cc527d356d9b04e4fd0b4f46a23ba
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_sslkeyandcertificate
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of SSLKeyAndCertificate Avi RESTful Object
description:
- This module is used to configure SSLKeyAndCertificate object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
ca_certs:
description:
- Ca certificates in certificate chain.
certificate:
description:
- Sslcertificate settings for sslkeyandcertificate.
required: true
certificate_management_profile_ref:
description:
- It is a reference to an object of type certificatemanagementprofile.
created_by:
description:
- Creator name.
dynamic_params:
description:
- Dynamic parameters needed for certificate management profile.
enckey_base64:
description:
- Encrypted private key corresponding to the private key (e.g.
- Those generated by an hsm such as thales nshield).
enckey_name:
description:
- Name of the encrypted private key (e.g.
- Those generated by an hsm such as thales nshield).
hardwaresecuritymodulegroup_ref:
description:
- It is a reference to an object of type hardwaresecuritymodulegroup.
key:
description:
- Private key.
key_params:
description:
- Sslkeyparams settings for sslkeyandcertificate.
name:
description:
- Name of the object.
required: true
status:
description:
- Enum options - ssl_certificate_finished, ssl_certificate_pending.
- Default value when not specified in API or module is interpreted by Avi Controller as SSL_CERTIFICATE_FINISHED.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Enum options - ssl_certificate_type_virtualservice, ssl_certificate_type_system, ssl_certificate_type_ca.
- Default value when not specified in API or module is interpreted by Avi Controller as SSL_CERTIFICATE_TYPE_VIRTUALSERVICE.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a SSL Key and Certificate
avi_sslkeyandcertificate:
controller: 10.10.27.90
username: admin
password: AviNetworks123!
key: |
-----BEGIN PRIVATE KEY-----
....
-----END PRIVATE KEY-----
certificate:
self_signed: true
certificate: |
-----BEGIN CERTIFICATE-----
....
-----END CERTIFICATE-----
type: SSL_CERTIFICATE_TYPE_VIRTUALSERVICE
name: MyTestCert
'''
RETURN = '''
obj:
description: SSLKeyAndCertificate (api/sslkeyandcertificate) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
ca_certs=dict(type='list',),
certificate=dict(type='dict', required=True),
certificate_management_profile_ref=dict(type='str',),
created_by=dict(type='str',),
dynamic_params=dict(type='list',),
enckey_base64=dict(type='str',),
enckey_name=dict(type='str',),
hardwaresecuritymodulegroup_ref=dict(type='str',),
key=dict(type='str',),
key_params=dict(type='dict',),
name=dict(type='str', required=True),
status=dict(type='str',),
tenant_ref=dict(type='str',),
type=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'sslkeyandcertificate',
set(['key']))
if __name__ == '__main__':
main()
|
dav1x/ansible
|
lib/ansible/modules/network/avi/avi_sslkeyandcertificate.py
|
Python
|
gpl-3.0
| 5,738
|
[
"VisIt"
] |
528e2fdd0caeaf821412e8269589e78173574a45e0440604d5cd7892d8db06b8
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2005 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2012 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
The core of the Gramps plugin system. This module provides capability to load
plugins from specified directories and provide information about the loaded
plugins.
Plugins are divided into several categories. These are: reports, tools,
importers, exporters, quick reports, and document generators.
"""
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
import os
import sys
import re
import logging
import importlib
LOG = logging.getLogger('._manager')
LOG.progagate = True
from ..const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..config import config
from . import PluginRegister, ImportPlugin, ExportPlugin, DocGenPlugin
from ..constfunc import win
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_UNAVAILABLE = _("No description was provided")
#-------------------------------------------------------------------------
#
# BasePluginManager
#
#-------------------------------------------------------------------------
class BasePluginManager:
""" unique singleton storage class for a :class:`.PluginManager`. """
__instance = None
def get_instance():
"""
Use this function to get the instance of the :class:`.PluginManager`
"""
if BasePluginManager.__instance is None:
BasePluginManager.__instance = 1 # Set to 1 for __init__()
BasePluginManager.__instance = BasePluginManager()
return BasePluginManager.__instance
get_instance = staticmethod(get_instance)
def __init__(self):
""" This function should only be run once by get_instance() """
if BasePluginManager.__instance is not 1:
raise Exception("This class is a singleton. "
"Use the get_instance() method")
self.__import_plugins = []
self.__export_plugins = []
self.__docgen_plugins = []
self.__attempt_list = []
self.__failmsg_list = []
self.__external_opt_dict = {}
self.__success_list = []
self.__docgen_names = []
self.__mod2text = {}
self.__modules = {}
self.__pgr = PluginRegister.get_instance()
self.__loaded_plugins = {}
self.__scanned_dirs = []
def reg_plugins(self, direct, dbstate=None, uistate=None,
load_on_reg=False, rescan=False):
"""
Searches the specified directory, and registers python plugin that
are being defined in gpr.py files.
If a relationship calculator for env var LANG is present, it is
immediately loaded so it is available for all.
"""
if rescan:
self.__import_plugins = []
self.__export_plugins = []
self.__docgen_plugins = []
self.__docgen_names = []
self.__scanned_dirs = []
self.__pgr._PluginRegister__plugindata = []
self.__pgr._PluginRegister__id_to_pdata = {}
# if we've already scanned this directory or if the directory does not
# exist, we are done. Should only happen in tests.
# LOG.warning("\nPlugin manager registration: %s, load_on_reg=%s,"
# " been_here=%s, pahte exists:%s", direct, load_on_reg,
# direct in self.__scanned_dirs, os.path.isdir(direct))
if os.path.isdir(direct) and direct not in self.__scanned_dirs:
self.__scanned_dirs.append(direct)
for (dirpath, dirnames, filenames) in os.walk(direct,
topdown=True):
for dirname in dirnames[:]:
# Skip hidden and system directories:
if dirname.startswith(".") or dirname in ["po", "locale",
"__pycache__"]:
dirnames.remove(dirname)
# LOG.warning("Plugin dir scanned: %s", dirpath)
self.__pgr.scan_dir(dirpath, filenames, uistate=uistate)
if load_on_reg:
# Run plugins that request to be loaded on startup and
# have a load_on_reg callable.
# first, remove hidden
plugins_to_load = []
for plugin in self.__pgr.filter_load_on_reg():
# LOG.warning("\nFound %s at registration", plugin.id)
if plugin.id in config.get("plugin.hiddenplugins"):
continue
plugins_to_load.append(plugin)
# next, sort on dependencies
# Probably a more efficient method to get dependency graph:
plugins_sorted = []
count = 0
max_count = len(plugins_to_load)
while plugins_to_load:
for plugin in plugins_to_load[:]: # copy of list
# LOG.warning("\nDependencies for %s at registration",
# plugin.id)
delay = False
for depend in plugin.depends_on:
if depend not in [p.id for p in plugins_sorted]:
delay = True
break
if delay:
pass # wait till next loop
else:
if plugin not in plugins_sorted:
plugins_sorted.append(plugin)
if plugin in plugins_to_load:
plugins_to_load.remove(plugin)
count += 1
if count > max_count:
print("Cannot resolve the following plugin dependencies:")
for plugin in plugins_to_load:
print(" Plugin '%s' requires: %s" % (
plugin.id, plugin.depends_on))
break
# now load them:
for plugin in plugins_sorted:
# next line shouldn't be necessary, but this gets called a lot
# of times during Travis test; so avoid multiple copies
plugin.data = []
mod = self.load_plugin(plugin)
if hasattr(mod, "load_on_reg"):
# LOG.warning("\nRun %s at registration", plugin.id)
try:
results = mod.load_on_reg(dbstate, uistate, plugin)
except:
import traceback
traceback.print_exc()
print("Plugin '%s' did not run; continuing..." % plugin.name)
continue
try:
iter(results)
plugin.data += results
except:
plugin.data = results
# Get the addon rules and import them and make them findable
for plugin in self.__pgr.rule_plugins():
mod = self.load_plugin(plugin) # load the addon rule
# get place in rule heirarchy to put the new rule
obj_rules = importlib.import_module(
'gramps.gen.filters.rules.' + plugin.namespace.lower())
# get the new rule class object
r_class = getattr(mod, plugin.ruleclass)
# make the new rule findable via import statements
setattr(obj_rules, plugin.ruleclass, r_class)
# and add it to the correct fiter editor list
obj_rules.editor_rule_list.append(r_class)
def is_loaded(self, pdata_id):
"""
return True if plugin is already loaded
"""
if pdata_id in self.__loaded_plugins:
return True
return False
def load_plugin(self, pdata):
"""
Load a :class:`.PluginData` object. This means import of the python
module. Plugin directories are added to sys path, so files are found
"""
if pdata.id in self.__loaded_plugins:
return self.__loaded_plugins[pdata.id]
need_reload = False
filename = pdata.fname
if filename in self.__modules:
#filename is loaded already, a different plugin in this module
_module = self.__modules[filename]
self.__success_list.append((filename, _module, pdata))
self.__loaded_plugins[pdata.id] = _module
self.__mod2text[_module.__name__] += ' - ' + pdata.description
return _module
if filename in self.__attempt_list:
#new load attempt after a fail, a reload needed
need_reload = True
#remove previous fail of the plugins in this file
dellist = []
for index, data in enumerate(self.__failmsg_list):
if data[0] == filename:
dellist.append(index)
dellist.reverse()
for index in dellist:
del self.__failmsg_list[index]
else:
self.__attempt_list.append(filename)
try:
_module = self.import_plugin(pdata)
if need_reload:
# For some strange reason second importing of a failed plugin
# results in success. Then reload reveals the actual error.
# Looks like a bug in Python.
_module = self.reload(_module, pdata)
if _module:
self.__success_list.append((filename, _module, pdata))
self.__modules[filename] = _module
self.__loaded_plugins[pdata.id] = _module
self.__mod2text[_module.__name__] = pdata.description
return _module
except:
import traceback
traceback.print_exc()
self.__failmsg_list.append((filename, sys.exc_info(), pdata))
return None
def import_plugin(self, pdata):
"""
Rather than just __import__(id), this will add the pdata.fpath
to sys.path first (if needed), import, and then reset path.
"""
module = None
if isinstance(pdata, str):
pdata = self.get_plugin(pdata)
if not pdata:
return None
if pdata.fpath not in sys.path:
if pdata.mod_name:
sys.path.insert(0, pdata.fpath)
try:
module = __import__(pdata.mod_name)
except ValueError as err:
# Python3 on Windows work with unicode in sys.path
# but they are mbcs encode for checking validity
if win():
# we don't want to load Gramps core plugin like this
# only 3rd party plugins
if "gramps" in pdata.fpath:
try:
sys.path.insert(0, ".")
oldwd = os.getcwd()
os.chdir(pdata.fpath)
module = __import__(pdata.mod_name)
os.chdir(oldwd)
sys.path.pop(0)
except ValueError as err:
LOG.warning("Plugin error (from '%s'): %s"
% (pdata.mod_name, err))
else:
LOG.warning("Plugin error (from '%s'): %s"
% (pdata.mod_name, err))
except ImportError as err:
LOG.warning("Plugin error (from '%s'): %s"
% (pdata.mod_name, err))
sys.path.pop(0)
else:
print("WARNING: module cannot be loaded")
else:
module = __import__(pdata.mod_name)
return module
def empty_managed_plugins(self):
""" For some plugins, managed Plugin are used. These are only
reobtained from the registry if this method is called
"""
# TODO: do other lists need to be reset here, too?
self.__import_plugins = []
self.__export_plugins = []
self.__docgen_plugins = []
self.__docgen_names = []
def reload_plugins(self):
""" Reload previously loaded plugins """
pymod = re.compile(r"^(.*)\.py$")
oldfailmsg = self.__failmsg_list[:]
self.__failmsg_list = []
# attempt to reload all plugins that have succeeded in the past
self.empty_managed_plugins()
self.__loaded_plugins = {}
oldmodules = self.__modules
self.__modules = {}
dellist = []
#reload first modules that loaded successfully previously
for (index, plugin) in enumerate(self.__success_list):
filename = plugin[0]
pdata = plugin[2]
filename = filename.replace('pyc','py')
filename = filename.replace('pyo','py')
if filename in self.__modules:
#module already reloaded, a second plugin in same module
continue
try:
self.reload(plugin[1], pdata)
self.__modules[filename] = plugin[1]
self.__loaded_plugins[pdata.id] = plugin[1]
except:
dellist.append(index)
self.__failmsg_list.append((filename, sys.exc_info(), pdata))
dellist.reverse()
for index in dellist:
del self.__success_list[index]
# Remove previously good plugins that are now bad
# from the registered lists
self.__purge_failed()
# attempt to load the plugins that have failed in the past
for (filename, message, pdata) in oldfailmsg:
self.load_plugin(pdata)
def reload(self, module, pdata):
"""
Reloads modules that might not be in the path.
"""
try:
import imp
fp, pathname, description = imp.find_module(pdata.mod_name, [pdata.fpath])
try:
module = imp.load_module(pdata.mod_name, fp, pathname,description)
finally:
if fp:
fp.close()
except:
if pdata.mod_name in sys.modules:
del sys.modules[pdata.mod_name]
module = self.import_plugin(pdata)
return module
def get_fail_list(self):
""" Return the list of failed plugins. """
return self.__failmsg_list
def get_success_list(self):
""" Return the list of succeeded plugins. """
return self.__success_list
def get_plugin(self, id):
"""
Returns a plugin object from :class:`.PluginRegister` by id.
"""
return self.__pgr.get_plugin(id)
def get_reg_reports(self, gui=True):
""" Return list of registered reports
:param gui: bool indicating if GUI reports or CLI reports must be
returned
"""
return self.__pgr.report_plugins(gui)
def get_reg_tools(self, gui=True):
""" Return list of registered tools
:aram gui: bool indicating if GUI reports or CLI reports must be
returned
"""
return self.__pgr.tool_plugins(gui)
def get_reg_quick_reports(self):
""" Return list of registered quick reports
"""
return self.__pgr.quickreport_plugins()
def get_reg_views(self):
""" Return list of registered views
"""
return self.__pgr.view_plugins()
def get_reg_mapservices(self):
""" Return list of registered mapservices
"""
return self.__pgr.mapservice_plugins()
def get_reg_bookitems(self):
""" Return list of reports registered as bookitem
"""
return self.__pgr.bookitem_plugins()
def get_reg_gramplets(self):
""" Return list of non hidden gramplets.
"""
return self.__pgr.gramplet_plugins()
def get_reg_sidebars(self):
""" Return list of registered sidebars.
"""
return self.__pgr.sidebar_plugins()
def get_reg_databases(self):
""" Return list of registered database backends
"""
return self.__pgr.database_plugins()
def get_external_opt_dict(self):
""" Return the dictionary of external options. """
return self.__external_opt_dict
def get_module_description(self, module):
""" Given a module name, return the module description. """
return self.__mod2text.get(module, '')
def get_reg_importers(self):
""" Return list of registered importers
"""
return self.__pgr.import_plugins()
def get_reg_exporters(self):
""" Return list of registered exporters
"""
return self.__pgr.export_plugins()
def get_reg_docgens(self):
""" Return list of registered docgen
"""
return self.__pgr.docgen_plugins()
def get_reg_general(self, category=None):
""" Return list of registered general libs
"""
return self.__pgr.general_plugins(category)
def load_plugin_category(self, category):
"""
Make sure all plugins of a type are loaded.
"""
for plugin in self.__pgr.general_plugins(category):
if not self.is_loaded(plugin):
self.load_plugin(plugin)
def get_plugin_data(self, category):
"""
Gets all of the data from general plugins of type category.
plugin.data may be a single item, an iterable, or a callable.
>>> PLUGMAN.get_plugin_data('CSS')
<a list of raw data items>
"""
retval = []
data = None
for plugin in self.__pgr.general_plugins(category):
data = plugin.data
try:
iter(data)
retval.extend(data)
except:
retval.append(data)
return retval
def process_plugin_data(self, category):
"""
Gathers all of the data from general plugins of type category,
and pass it to a single process function from one of those
plugins.
>>> PLUGMAN.process_plugin_data('CSS')
<a list of processed data items>
"""
retval = []
data = None
process = None
for plugin in self.__pgr.general_plugins(category):
if plugin.process is not None:
mod = self.load_plugin(plugin)
if hasattr(mod, plugin.process):
process = getattr(mod, plugin.process)
data = plugin.data
if data:
try:
iter(data)
retval.extend(data)
except:
retval.append(data)
# LOG.warning("Process plugin data=%s, %s, items=%s",
# process is not None, category, len(retval))
if process:
return process(retval)
return retval
def get_import_plugins(self):
"""
Get the list of import plugins.
:return: :class:`.ImportPlugin` (a list of ImportPlugin instances)
"""
## TODO: would it not be better to remove ImportPlugin and use
## only PluginData, loading from module when importfunction needed?
if self.__import_plugins == []:
#The module still needs to be imported
for pdata in self.get_reg_importers():
if pdata.id in config.get("plugin.hiddenplugins"):
continue
mod = self.load_plugin(pdata)
if mod:
imp = ImportPlugin(name=pdata.name,
description = pdata.description,
import_function = getattr(mod, pdata.import_function),
extension = pdata.extension)
self.__import_plugins.append(imp)
return self.__import_plugins
def get_export_plugins(self):
"""
Get the list of export plugins.
:return: :class:`.ExportPlugin` (a list of ExportPlugin instances)
"""
## TODO: would it not be better to remove ExportPlugin and use
## only PluginData, loading from module when export/options needed?
if self.__export_plugins == []:
#The modules still need to be imported
for pdata in self.get_reg_exporters():
if pdata.id in config.get("plugin.hiddenplugins"):
continue
mod = self.load_plugin(pdata)
if mod:
options = None
if (pdata.export_options and
hasattr(mod, pdata.export_options)):
options = getattr(mod, pdata.export_options)
exp = ExportPlugin(name=pdata.name_accell,
description = pdata.description,
export_function = getattr(mod, pdata.export_function),
extension = pdata.extension,
config = (pdata.export_options_title, options))
self.__export_plugins.append(exp)
return self.__export_plugins
def get_docgen_plugins(self):
"""
Get the list of docgen plugins.
:return: :class:`.DocGenPlugin` (a list of DocGenPlugin instances)
"""
## TODO: would it not be better to return list of plugindata, and only
## import those docgen that will then actuallly be needed?
## So, only do import when docgen.get_basedoc() is requested
if self.__docgen_plugins == []:
#The modules still need to be imported
hiddenplugins = config.get("plugin.hiddenplugins")
for pdata in self.get_reg_docgens():
if pdata.id in hiddenplugins:
continue
mod = self.load_plugin(pdata)
if mod:
oclass = None
if pdata.optionclass:
oclass = getattr(mod, pdata.optionclass)
dgp = DocGenPlugin(name=pdata.name,
description = pdata.description,
basedoc = getattr(mod, pdata.docclass),
paper = pdata.paper,
style = pdata.style,
extension = pdata.extension,
docoptclass = oclass,
basedocname = pdata.docclass )
self.__docgen_plugins.append(dgp)
return self.__docgen_plugins
def get_docgen_names(self):
"""
Get the list of docgen plugin names.
:return: a list of :class:`.DocGenPlugin` names
"""
if self.__docgen_names == []:
hiddenplugins = config.get("plugin.hiddenplugins")
for pdata in self.get_reg_docgens():
if pdata.id not in hiddenplugins:
self.__docgen_names.append(pdata.docclass)
return self.__docgen_names
def register_option(self, option, guioption):
"""
Register an external option.
Register a mapping from option to guioption for an option
that is not native to Gramps but provided by the plugin writer.
This should typically be called during initialisation of a
:class:`.ReportOptions` class.
:param option: the option class
:type option: class that inherits from gen.plug.menu.Option
:param guioption: the gui-option class
:type guioption: class that inherits from Gtk.Widget.
"""
self.__external_opt_dict[option] = guioption;
def __purge_failed(self):
"""
Purge the failed plugins from the corresponding lists.
"""
failed_module_names = [
os.path.splitext(os.path.basename(filename))[0]
for filename, msg, pdata in self.__failmsg_list
]
self.__export_plugins[:] = [ item for item in self.__export_plugins
if item.get_module_name() not in failed_module_names ][:]
self.__import_plugins[:] = [ item for item in self.__import_plugins
if item.get_module_name() not in failed_module_names ][:]
self.__docgen_plugins[:] = [ item for item in self.__docgen_plugins
if item.get_module_name() not in failed_module_names ][:]
|
sam-m888/gramps
|
gramps/gen/plug/_manager.py
|
Python
|
gpl-2.0
| 25,784
|
[
"Brian"
] |
1652b9b1e2717e9cb69ad507112f8c9df854910aa58d8d961640823376c697fe
|
# This plugin is adapted from the Python Console plugin and the IPython
# cookbook at:
# http://ipython.scipy.org/moin/Cookbook/EmbeddingInGTK
# Copyright (C) 2009-2010 Brian Parma
# Updated 2012 Brian Parma
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import logging
import sys
import site
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GLib
from xl.nls import gettext as _
from xl import event
from xl import settings as xl_settings
from xl import providers
from xlgui.widgets import menu
from xlgui import guiutil
from . import ipconsoleprefs
from . import ipython_view as ip
FONT = "Luxi Mono 10"
SETTINGS_STRING = 'plugin_ipconsole_option_set'
LOGGER = logging.getLogger(__name__)
class Quitter:
"""Simple class to handle exit, similar to Python 2.5's.
This Quitter is used to circumvent IPython's circumvention
of the builtin Quitter, since it prevents exaile form closing."""
def __init__(self, exit_function, name):
self.exit_function = exit_function
self.name = name
def __repr__(self):
return 'Type %s() to exit.' % self.name
def __call__(self):
self.exit_function() # Passed in exit function
site.setquit() # Restore default builtins
exit() # Call builtin
class IPView(ip.IPythonView):
'''Extend IPythonView to support closing with Ctrl+D'''
__text_color = None
__background_color = None
__font = None
__css_provider = None
__text_color_str = None
__background_color_str = None
__font_str = None
__iptheme = None
def __init__(self, namespace):
ip.IPythonView.__init__(self)
event.add_ui_callback(self.__on_option_set, SETTINGS_STRING)
self.set_wrap_mode(Gtk.WrapMode.CHAR)
self.updateNamespace(namespace) # expose exaile (passed in)
# prevent exit and quit - freezes window? does bad things
self.updateNamespace({'exit': None, 'quit': None})
style_context = self.get_style_context()
self.__css_provider = Gtk.CssProvider()
style_context.add_provider(
self.__css_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
# Trigger setup through options
for option in ('text_color', 'background_color', 'font'):
self.__on_option_set(
None, xl_settings, 'plugin/ipconsole/{option}'.format(option=option)
)
def __on_option_set(self, _event, settings, option):
if option == 'plugin/ipconsole/font':
pango_font_str = settings.get_option(option, FONT)
self.__font_str = guiutil.css_from_pango_font_description(pango_font_str)
GLib.idle_add(self.__update_css)
if option == 'plugin/ipconsole/text_color':
rgba_str = settings.get_option(option, 'lavender')
rgba = Gdk.RGBA()
rgba.parse(rgba_str)
self.__text_color_str = "color: " + guiutil.css_from_rgba_without_alpha(
rgba
)
GLib.idle_add(self.__update_css)
if option == 'plugin/ipconsole/background_color':
rgba_str = settings.get_option(option, 'black')
rgba = Gdk.RGBA()
rgba.parse(rgba_str)
self.__background_color_str = (
"background-color: " + guiutil.css_from_rgba_without_alpha(rgba)
)
GLib.idle_add(self.__update_css)
def __update_css(self):
if (
self.__text_color_str is None
or self.__background_color_str is None
or self.__font_str is None
):
# early initialization state: not all properties have been initialized yet
return False
data_str = "text {%s; %s;} textview {%s;}" % (
self.__background_color_str,
self.__text_color_str,
self.__font_str,
)
self.__css_provider.load_from_data(data_str.encode('utf-8'))
return False
def onKeyPressExtend(self, key_event):
if ip.IPythonView.onKeyPressExtend(self, key_event):
return True
if key_event.string == '\x04': # ctrl+d
self.destroy()
class IPythonConsoleWindow(Gtk.Window):
"""
A Gtk Window with an embedded IPython Console.
"""
__ipv = None
def __init__(self, namespace):
Gtk.Window.__init__(self)
self.set_title(_("IPython Console - Exaile"))
self.set_size_request(750, 550)
self.set_resizable(True)
self.__ipv = IPView(namespace)
self.__ipv.connect('destroy', lambda *_widget: self.destroy())
self.__ipv.updateNamespace({'self': self}) # Expose self to IPython
# make it scrollable
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrolled_window.add(self.__ipv)
scrolled_window.show_all()
self.add(scrolled_window)
event.add_ui_callback(self.on_option_set, SETTINGS_STRING)
def on_option_set(self, _event, settings, option):
if option == 'plugin/ipconsole/opacity':
if sys.platform.startswith("win32"):
# Setting opacity on Windows crashes with segfault,
# see https://bugzilla.gnome.org/show_bug.cgi?id=674449
# Ignore this option.
return
value = settings.get_option(option, 80.0)
value = value / 100
if value > 1:
value = 1
self.set_opacity(value)
class IPConsolePlugin:
"""
This class holds the IPConsole plugin itself
"""
__console_window = None
__exaile = None
def enable(self, exaile):
"""
Called when plugin is enabled, or when exaile is loaded with the plugin
on by default.
"""
self.__exaile = exaile
def on_gui_loaded(self):
"""
Called when Exaile finished loading its GUI
"""
# Trigger initial setup through options:
if xl_settings.get_option('plugin/ipconsole/autostart', False):
self.__show_console()
# add menuitem to tools menu
item = menu.simple_menu_item(
'ipconsole',
['plugin-sep'],
_('Show _IPython Console'),
callback=lambda *_args: self.__show_console(),
)
providers.register('menubar-tools-menu', item)
def teardown(self, _exaile):
"""
Called when Exaile is shutting down
"""
# if window is open, kill it
if self.__console_window is not None:
self.__console_window.destroy()
def disable(self, exaile):
"""
Called when the plugin is disabled
"""
for item in providers.get('menubar-tools-menu'):
if item.name == 'ipconsole':
providers.unregister('menubar-tools-menu', item)
break
self.teardown(exaile)
def __show_console(self):
"""
Display window when the menu item is clicked.
"""
if self.__console_window is None:
import xl
import xlgui
self.__console_window = IPythonConsoleWindow(
{'exaile': self.__exaile, 'xl': xl, 'xlgui': xlgui}
)
self.__console_window.connect('destroy', self.__console_destroyed)
self.__console_window.present()
self.__console_window.on_option_set(
None, xl_settings, 'plugin/ipconsole/opacity'
)
def __console_destroyed(self, *_args):
"""
Called when the window is closed.
"""
self.__console_window = None
def get_preferences_pane(self):
"""
Called by Exaile when ipconsole preferences pane should be shown
"""
return ipconsoleprefs
plugin_class = IPConsolePlugin
|
exaile/exaile
|
plugins/ipconsole/__init__.py
|
Python
|
gpl-2.0
| 8,594
|
[
"Brian"
] |
5f2e0dc5acca33ee5802b8ba00d180eae325eac8f8201cb696c38a34df4f7914
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RA4base(RPackage):
"""Automated Affymetrix Array Analysis."""
homepage = "https://www.bioconductor.org/packages/a4Base/"
url = "'https://git.bioconductor.org/packages/a4Base'"
list_url = homepage
version('1.24.0', git='https://git.bioconductor.org/packages/a4Base', commit='f674afe424a508df2c8ee6c87a06fbd4aa410ef6')
depends_on('r@3.4.0:3.4.9', when='@1.24.0')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-annaffy', type=('build', 'run'))
depends_on('r-mpm', type=('build', 'run'))
depends_on('r-genefilter', type=('build', 'run'))
depends_on('r-limma', type=('build', 'run'))
depends_on('r-multtest', type=('build', 'run'))
depends_on('r-glmnet', type=('build', 'run'))
depends_on('r-a4preproc', type=('build', 'run'))
depends_on('r-a4core', type=('build', 'run'))
depends_on('r-gplots', type=('build', 'run'))
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-a4base/package.py
|
Python
|
lgpl-2.1
| 2,219
|
[
"Bioconductor"
] |
6106d1d50293d81abbf0755f9a4f2f39e641a934062868fd8ad04428bcd72966
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 25 14:02:59 2015
by LW March 2015
set of utility functions for beamline alingment and commissioning
v 0.0.1 (this version): might have created a typo in E-calibration!!!
added dcm_roll for calculating DCM Roll correction
"""
import datetime as dtt
import time
import numpy as np
from PIL import Image
# from databroker import db, get_fields, get_images, get_table
get_fields = db.get_fields
get_images = db.get_images
get_table = db.get_table
from matplotlib import pyplot as pltfrom
from lmfit import Model
from lmfit import minimize, Parameters, Parameter, report_fit
from scipy.special import erf
import itertools
markers = ['o', 'H', 'D', 'v', '^', '<', '>', 'p',
's', 'h', '*', 'd',
'$I$','$L$', '$O$','$V$','$E$',
'$c$', '$h$','$x$','$b$','$e$','$a$','$m$','$l$','$i$','$n$', '$e$',
'8', '1', '3', '2', '4', '+', 'x', '_', '|', ',', '1',]
markers = np.array( markers *100 )
colors = np.array( ['darkorange', 'mediumturquoise', 'seashell', 'mediumaquamarine', 'darkblue',
'yellowgreen', 'mintcream', 'royalblue', 'springgreen', 'slategray',
'yellow', 'slateblue', 'darkslateblue', 'papayawhip', 'bisque', 'firebrick',
'burlywood', 'dodgerblue', 'dimgrey', 'chartreuse', 'deepskyblue', 'honeydew',
'orchid', 'teal', 'steelblue', 'limegreen', 'antiquewhite',
'linen', 'saddlebrown', 'grey', 'khaki', 'hotpink', 'darkslategray',
'forestgreen', 'lightsalmon', 'turquoise', 'navajowhite',
'darkgrey', 'darkkhaki', 'slategrey', 'indigo',
'darkolivegreen', 'aquamarine', 'moccasin', 'beige', 'ivory', 'olivedrab',
'whitesmoke', 'paleturquoise', 'blueviolet', 'tomato', 'aqua', 'palegoldenrod',
'cornsilk', 'navy', 'mediumvioletred', 'palevioletred', 'aliceblue', 'azure',
'orangered', 'lightgrey', 'lightpink', 'orange', 'wheat',
'darkorchid', 'mediumslateblue', 'lightslategray', 'green', 'lawngreen',
'mediumseagreen', 'darksalmon', 'pink', 'oldlace', 'sienna', 'dimgray', 'fuchsia',
'lemonchiffon', 'maroon', 'salmon', 'gainsboro', 'indianred', 'crimson',
'mistyrose', 'lightblue', 'darkgreen', 'lightgreen', 'deeppink',
'palegreen', 'thistle', 'lightcoral', 'lightgray', 'lightskyblue', 'mediumspringgreen',
'mediumblue', 'peru', 'lightgoldenrodyellow', 'darkseagreen', 'mediumorchid',
'coral', 'lightyellow', 'chocolate', 'lavenderblush', 'darkred', 'lightseagreen',
'darkviolet', 'lightcyan', 'cadetblue', 'blanchedalmond', 'midnightblue',
'darksage', 'lightsteelblue', 'darkcyan', 'floralwhite', 'darkgray',
'lavender', 'sandybrown', 'cornflowerblue', 'gray',
'mediumpurple', 'lightslategrey', 'seagreen',
'silver', 'darkmagenta', 'darkslategrey', 'darkgoldenrod', 'rosybrown',
'goldenrod', 'darkturquoise', 'plum',
'purple', 'olive', 'gold','powderblue', 'peachpuff','violet', 'lime', 'greenyellow', 'tan', 'skyblue',
'magenta', 'black', 'brown', 'green', 'cyan', 'red','blue'] *100 )
colors = colors[::-1]
colors_ = itertools.cycle( colors )
#colors_ = itertools.cycle(sorted_colors_ )
markers_ = itertools.cycle( markers )
def plot1D( y,x=None, yerr=None, ax=None,return_fig=False, ls='-',
legend_size=None, lw=None, *argv,**kwargs):
"""a simple function to plot two-column data by using matplotlib.plot
pass *argv,**kwargs to plot
Parameters
----------
y: column-y
x: column-x, by default x=None, the plot will use index of y as x-axis
Returns
-------
None
"""
RUN_GUI = False
if ax is None:
if RUN_GUI:
fig = Figure()
ax = fig.add_subplot(111)
else:
fig, ax = plt.subplots()
if 'legend' in kwargs.keys():
legend = kwargs['legend']
else:
legend = ' '
try:
logx = kwargs['logx']
except:
logx=False
try:
logy = kwargs['logy']
except:
logy=False
try:
logxy = kwargs['logxy']
except:
logxy= False
if logx==True and logy==True:
logxy = True
try:
marker = kwargs['marker']
except:
try:
marker = kwargs['m']
except:
marker= next( markers_ )
try:
color = kwargs['color']
except:
try:
color = kwargs['c']
except:
color = next( colors_ )
if x is None:
x=range(len(y))
if yerr is None:
ax.plot(x,y, marker=marker,color=color,ls=ls,label= legend, lw=lw)#,*argv,**kwargs)
else:
ax.errorbar(x,y,yerr, marker=marker,color=color,ls=ls,label= legend, lw=lw)#,*argv,**kwargs)
if logx:
ax.set_xscale('log')
if logy:
ax.set_yscale('log')
if logxy:
ax.set_xscale('log')
ax.set_yscale('log')
if 'xlim' in kwargs.keys():
ax.set_xlim( kwargs['xlim'] )
if 'ylim' in kwargs.keys():
ax.set_ylim( kwargs['ylim'] )
if 'xlabel' in kwargs.keys():
ax.set_xlabel(kwargs['xlabel'])
if 'ylabel' in kwargs.keys():
ax.set_ylabel(kwargs['ylabel'])
if 'title' in kwargs.keys():
title = kwargs['title']
else:
title = 'plot'
ax.set_title( title )
#ax.set_xlabel("$Log(q)$"r'($\AA^{-1}$)')
ax.legend(loc = 'best', fontsize=legend_size )
if 'save' in kwargs.keys():
if kwargs['save']:
#dt =datetime.now()
#CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute)
#fp = kwargs['path'] + '%s'%( title ) + CurTime + '.png'
fp = kwargs['path'] + '%s'%( title ) + '.png'
plt.savefig( fp, dpi=fig.dpi)
if return_fig:
return fig
def get_data(scan_id, field='ivu_gap', intensity_field='elm_sum_all', det=None, debug=False):
"""Get data from the scan stored in the table.
from Maksim
:param scan_id: scan id from bluesky.
:param field: visualize the intensity vs. this field.
:param intensity_field: the name of the intensity field.
:param det: the name of the detector.
:param debug: a debug flag.
:return: a tuple of X, Y and timestamp values.
"""
scan, t = get_scan(scan_id)
if det:
imgs = get_images(scan, det)
im = imgs[-1]
if debug:
print(im)
table = get_table(scan)
fields = get_fields(scan)
if debug:
print(table)
print(fields)
x = table[field]
y = table[intensity_field]
return x, y, t
def get_scan(scan_id, debug=False):
"""Get scan from databroker using provided scan id.
from Maksim
:param scan_id: scan id from bluesky.
:param debug: a debug flag.
:return: a tuple of scan and timestamp values.
"""
scan = db[scan_id]
#t = datetime.datetime.fromtimestamp(scan['start']['time']).strftime('%Y-%m-%d %H:%M:%S')
#t = dtt.datetime.fromtimestamp(scan['start']['time']).strftime('%Y-%m-%d %H:%M:%S')
t='N.A. conflicting with other macro'
if debug:
print(scan)
print('Scan ID: {} Timestamp: {}'.format(scan_id, t))
return scan, t
def ps(uid='-1',det='default',suffix='default',shift=.5,logplot='off'):
'''
function to determine statistic on line profile (assumes either peak or erf-profile)
calling sequence: uid='-1',det='default',suffix='default',shift=.5)
det='default' -> get detector from metadata, otherwise: specify, e.g. det='eiger4m_single'
suffix='default' -> _stats1_total / _sum_all, otherwise: specify, e.g. suffix='_stats2_total'
shift: scale for peak presence (0.5 -> peak has to be taller factor 2 above background)
'''
#import datetime
#import time
#import numpy as np
#from PIL import Image
#from databroker import db, get_fields, get_images, get_table
#from matplotlib import pyplot as pltfrom
#from lmfit import Model
#from lmfit import minimize, Parameters, Parameter, report_fit
#from scipy.special import erf
# get the scan information:
if uid == '-1':
uid=-1
if det == 'default':
if db[uid].start.detectors[0] == 'elm' and suffix=='default':
intensity_field='elm_sum_all'
elif db[uid].start.detectors[0] == 'elm':
intensity_field='elm'+suffix
elif suffix == 'default':
intensity_field= db[uid].start.detectors[0]+'_stats1_total'
else:
intensity_field= db[uid].start.detectors[0]+suffix
else:
if det=='elm' and suffix == 'default':
intensity_field='elm_sum_all'
elif det=='elm':
intensity_field = 'elm'+suffix
elif suffix == 'default':
intensity_field=det+'_stats1_total'
else:
intensity_field=det+suffix
field = db[uid].start.motors[0]
#field='dcm_b';intensity_field='elm_sum_all'
[x,y,t]=get_data(uid,field=field, intensity_field=intensity_field, det=None, debug=False) #need to re-write way to get data
x=np.array(x)
y=np.array(y)
PEAK=x[np.argmax(y)]
PEAK_y=np.max(y)
COM=np.sum(x * y) / np.sum(y)
### from Maksim: assume this is a peak profile:
def is_positive(num):
return True if num > 0 else False
# Normalize values first:
ym = (y - np.min(y)) / (np.max(y) - np.min(y)) - shift # roots are at Y=0
positive = is_positive(ym[0])
list_of_roots = []
for i in range(len(y)):
current_positive = is_positive(ym[i])
if current_positive != positive:
list_of_roots.append(x[i - 1] + (x[i] - x[i - 1]) / (abs(ym[i]) + abs(ym[i - 1])) * abs(ym[i - 1]))
positive = not positive
if len(list_of_roots) >= 2:
FWHM=abs(list_of_roots[-1] - list_of_roots[0])
CEN=list_of_roots[0]+0.5*(list_of_roots[1]-list_of_roots[0])
ps.fwhm=FWHM
ps.cen=CEN
#return {
# 'fwhm': abs(list_of_roots[-1] - list_of_roots[0]),
# 'x_range': list_of_roots,
#}
else: # ok, maybe it's a step function..
print('no peak...trying step function...')
ym = ym + shift
def err_func(x, x0, k=2, A=1, base=0 ): #### erf fit from Yugang
return base - A * erf(k*(x-x0))
mod = Model( err_func )
### estimate starting values:
x0=np.mean(x)
#k=0.1*(np.max(x)-np.min(x))
pars = mod.make_params( x0=x0, k=2, A = 1., base = 0. )
result = mod.fit(ym, pars, x = x )
CEN=result.best_values['x0']
FWHM = result.best_values['k']
ps.cen = CEN
ps.fwhm = FWHM
### re-plot results:
if logplot=='on':
plt.close(999)
plt.figure(999)
plt.semilogy([PEAK,PEAK],[np.min(y),np.max(y)],'k--',label='PEAK')
plt.hold(True)
plt.semilogy([CEN,CEN],[np.min(y),np.max(y)],'r-.',label='CEN')
plt.semilogy([COM,COM],[np.min(y),np.max(y)],'g.-.',label='COM')
plt.semilogy(x,y,'bo-')
plt.xlabel(field);plt.ylabel(intensity_field)
plt.legend()
plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9)
plt.show()
else:
plt.close(999)
plt.figure(999)
plt.plot([PEAK,PEAK],[np.min(y),np.max(y)],'k--',label='PEAK')
plt.hold(True)
plt.plot([CEN,CEN],[np.min(y),np.max(y)],'r-.',label='CEN')
plt.plot([COM,COM],[np.min(y),np.max(y)],'g.-.',label='COM')
plt.plot(x,y,'bo-')
plt.xlabel(field);plt.ylabel(intensity_field)
plt.legend()
plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9)
plt.show()
### assign values of interest as function attributes:
ps.peak=PEAK
ps.com=COM
def fit_gisaxs_height_scan_profile( uid='-1', x0=0, k=2, A=1, base=0,
motor = 'diff_yh', det = 'eiger4m_single_stats1_total' ):
'''Fit a GiSAXS scan (diff.yh scan) by a error function
The scan data is first normlized by a simple normalization function:
(y - y.min()) / (y.max() - y.min())
Then fit by error function is defined as base - A * erf(k*(x-x0))
erf is Error function is defined by 2/sqrt(pi)*integral(exp(-t**2), t=0..z)
erf function is import: from scipy.special import erf
Parameters:
x0: the fit center, by default, 0
k: the strech factor, by default 2
A: amplitude of the scan, default 1
base: baseline of the scan, default 0
uid: the uid of the scan, by default is -1, i.e., the last scan
motor: the scan motor, by default 'diff.yh'
det: detector, by default is 'eiger4m_single_stats1_total'
return:
the plot of scan and fitted curve
the fitted x0
'''
from lmfit import Model
from lmfit import minimize, Parameters, Parameter, report_fit
from scipy.special import erf
def norm_y(y ):
return (y - y.min()) / (y.max() - y.min())
def err_func(x, x0, k=2, A=1, base=0 ):
return base - A * erf(k*(x-x0))
mod = Model( err_func )
pars = mod.make_params( x0=x0, k=k, A = A, base = base )
if uid == '-1':
uid = -1
x = np.array( get_table( db[uid], fields = [motor], )[motor] )
y = np.array( get_table( db[uid], fields = [det], )[det] )
ym = norm_y(y)
result = mod.fit(ym, pars, x = x )
fig, ax = plt.subplots()
plot1D( x=x, y = ym, m='o', c='k', ls ='', legend='scan',ax=ax,)
plot1D( x=x, y = result.best_fit,m='', c='r', ls='-', legend='fit-x0=%s'%result.best_values['x0'],ax=ax,)
return result.best_values['x0']
def trans_data_to_pd(data, label=None,dtype='array'):
'''
convert data into pandas.DataFrame
Input:
data: list or np.array
label: the coloum label of the data
dtype: list or array
Output:
a pandas.DataFrame
'''
#lists a [ list1, list2...] all the list have the same length
from numpy import arange,array
import pandas as pd,sys
if dtype == 'list':
data=array(data).T
elif dtype == 'array':
data=array(data)
else:
print("Wrong data type! Now only support 'list' and 'array' tpye")
N,M=data.shape
#print( N, M)
index = arange( N )
if label is None:label=['data%s'%i for i in range(M)]
#print label
df = pd.DataFrame( data, index=index, columns= label )
return df
def export_scan_scalar( uid, x='dcm_b', y= ['xray_eye1_stats1_total'],
path='/XF11ID/analysis/2016_3/commissioning/Results/exported/' ):
'''export uid data to a txt file
uid: unique scan id
x: the x-col
y: the y-cols
path: save path
Example:
data = export_scan_scalar( uid, x='dcm_b', y= ['xray_eye1_stats1_total'],
path='/XF11ID/analysis/2016_3/commissioning/Results/exported/' )
A plot for the data:
d.plot(x='dcm_b', y = 'xray_eye1_stats1_total', marker='o', ls='-', color='r')
'''
from databroker import DataBroker as db, get_images, get_table, get_events, get_fields
#from chxanalys.chx_generic_functions import trans_data_to_pd
import numpy as np
hdr = db[uid]
print( get_fields( hdr ) )
data = get_table( db[uid] )
xp = data[x]
datap = np.zeros( [len(xp), len(y)+1])
datap[:,0] = xp
for i, yi in enumerate(y):
datap[:,i+1] = data[yi]
datap = trans_data_to_pd( datap, label=[x] + [yi for yi in y])
fp = path + 'uid=%s.csv'%uid
datap.to_csv( fp )
print( 'The data was saved in %s'%fp)
return datap
import bluesky.plans as bp
############
##################
####
def plot_reflectivity(db_si,db_rh):
"""
by LW 10/04/2016
plot measured reflectivity R_Si / R_Rh against theoretical curve for 0.18deg incident angle
calling sequence: plot_reflectivity(db_si,db_rh)
db_si: data brooker object for reflectivity scan (E_scan) from Si layer; db_rh: same for Rh layer
Notes: 1) assumes E_scan was used to obtain the data
2) same scan range and number of data points for both scans (does not interpolate to common x-grid)
3) use Ti foil in BPM for scan on elm detector
"""
si_dat=get_table(db_si)
rh_dat=get_table(db_rh)
en_r=xf.get_EBragg('Si111cryo',-si_dat.dcm_b)
plt.figure(19)
plt.semilogy(en_r,si_dat.elm_sum_all/rh_dat.elm_sum_all,label='measured')
plt.hold(True)
r_eng=np.array(np.loadtxt("/home/xf11id/Downloads/R_Rh_0p180.txt"))[:,0]/1e3
rsi_0p18=np.array(np.loadtxt("/home/xf11id/Downloads/R_Si_0p180.txt"))[:,1]
rrh_0p18=np.array(np.loadtxt("/home/xf11id/Downloads/R_Rh_0p180.txt"))[:,1]
plt.semilogy(r_eng,rsi_0p18/rrh_0p18,'r--',label="calc 0.18 deg")
plt.xlabel('E [keV]')
plt.ylabel('R_Si / R_Rh')
plt.grid()
plt.legend()
def E_calibration(file,Edge='Cu',xtal='Si111cryo',B_off=0):
"""
by LW 3/25/2015
function to read energy scan file and determine offset correction
calling sequence: E_calibration(file,Edge='Cu',xtal='Si111cryo',B_off=0)
file: path/filename of experimental data; 'ia' opens interactive dialog; file can be databrooker object, e.g. file=db[-1] t process data from last scan
Edge: elment used for calibration
xtal: monochromator crystal under calibration
B_off (optional): apply offset to Bragg angle data
currently there is no check on input parameters!
"""
# read the data file
import csv
import numpy as np
import matplotlib.pyplot as plt
#import xfuncs as xf
#import Tkinter, tkFileDialog
if file=='ia': # open file dialog
print('this would open a file input dialog IF Tkinter was available in the $%^& python environment as it used to')
#root = Tkinter.Tk()
#root.withdraw()
#file_path = tkFileDialog.askopenfilename()
description=file_path
elif isinstance(file, str) and file!='ia':
file_path=file
descritpion=file_path
#elif isinstance(file,dict) and 'start' in file.keys(): # some genius decided that db[-1] is no longer a dictionary....
elif 'start' in file.keys():
databroker_object=1
description='scan # ',file.start['scan_id'],' uid: ', file.start['uid'][:10]
plt.close("all")
Edge_data={'Cu': 8.979, 'Ti': 4.966}
if databroker_object !=1:
Bragg=[]
Gap=[]
Intensity=[]
with open(file_path, 'rb') as csvfile:
filereader = csv.reader(csvfile, delimiter=' ')
filereader.next() # skip header lines
filereader.next()
filereader.next()
for row in filereader: # read data
try: Bragg.append(float(row[2]))
except: print('could not convert: ',row[2])
try: Gap.append(float(row[5]))
except: print('could not convert: ',row[5])
try: Intensity.append(float(row[7]))
except: print('could not convert: ',row[8])
elif databroker_object==1:
data = get_table(file)
Bragg = data.dcm_b[1:] #retrive the data (first data point is often "wrong", so don't use
#Gap = data.SR:C11-ID:G1{IVU20:1_readback[1:] name is messed up in databroker -> currently don't use gap
Intensity = data.elm_sum_all [1:] #need to find signal from electrometer...elm is commented out in detectors at the moment...???
B=np.array(Bragg)*-1.0+B_off
#G=np.array(Gap[0:len(B)]) # not currently used, but converted for future use
Int=np.array(Intensity[0:len(B)])
# normalize and remove background:
Int=Int-min(Int)
Int=Int/max(Int)
plt.figure(1)
plt.plot(B,Int,'ko-',label='experimental data')
plt.plot([xf.get_Bragg(xtal,Edge_data[Edge])[0],xf.get_Bragg(xtal,Edge_data[Edge])[0]],[0,1],'r--',label='Edge for: '+Edge)
plt.legend(loc='best')
plt.xlabel(r'$\theta_B$ [deg.]')
plt.ylabel('intensity')
plt.title(['Energy Calibration using: ',description])
plt.grid()
plt.figure(2)
Eexp=xf.get_EBragg(xtal,B)
plt.plot(Eexp,Int,'ko-',label='experimental data')
plt.plot([Edge_data[Edge],Edge_data[Edge]],[0,1],'r--',label='Edge for: '+Edge)
plt.legend(loc='best')
plt.xlabel('E [keV.]')
plt.ylabel('intensity')
plt.title(['Energy Calibration using: ',description])
plt.grid()
# calculate derivative and analyze:
Bragg_Edge=xf.get_Bragg(xtal,Edge_data[Edge])[0]
plt.figure(3)
diffdat=np.diff(Int)
plt.plot(B[0:len(diffdat)],diffdat,'ko-',label='diff experimental data')
plt.plot([Bragg_Edge,Bragg_Edge],[min(diffdat),max(diffdat)],'r--',label='Edge for: '+Edge)
plt.legend(loc='best')
plt.xlabel(r'$\theta_B$ [deg.]')
plt.ylabel('diff(int)')
plt.title(['Energy Calibration using: ',description])
plt.grid()
plt.figure(4)
plt.plot(xf.get_EBragg(xtal,B[0:len(diffdat)]),diffdat,'ko-',label='diff experimental data')
plt.plot([Edge_data[Edge],Edge_data[Edge]],[min(diffdat),max(diffdat)],'r--',label='Edge for: '+Edge)
plt.legend(loc='best')
plt.xlabel('E [keV.]')
plt.ylabel('diff(int)')
plt.title(['Energy Calibration using: ',description])
plt.grid()
edge_index=np.argmax(diffdat)
B_edge=xf.get_Bragg(xtal,Edge_data[Edge])[0]
print('')
print('Energy calibration for: ',description)
print('Edge used for calibration: ',Edge)
print('Crystal used for calibration: ',xtal)
print('Bragg angle offset: ', B_edge-B[edge_index],'deg. (CHX coordinate system: ',-(B_edge-B[edge_index]),'deg.)')
print('=> move Bragg to ',-B[edge_index],'deg. and set value to ',-Bragg_Edge,'deg.')
print( 'Energy offset: ',Eexp[edge_index]-Edge_data[Edge],' keV')
def dcm_roll(Bragg,offset,distance,offmode='mm',pixsize=5.0):
"""
by LW 03/27/2015
function to calculate Roll correction on the DCM
calling sequence: dcm_roll(Bragg,offset,distance,offmode='mm',pixsize=5.0)
Bragg: set of Bragg angles
offset: set of corresponding offsets
offmode: units of offsets = mm or pixel (default:'mm')
pixsize: pixel size for offset conversion to mm, if offsets are given in pixels
default is 5um (pixsize is ignored, if offmode is 'mm')
distance: DCM center of 1st xtal to diagnostic/slit [mm]
preset distances available: 'dcm_bpm',dcm_mbs', 'dcm-bds', 'dcm_sample'
"""
import numpy as np
from scipy import optimize
from matplotlib import pyplot as plt
Bragg=np.array(Bragg)
if offmode=='mm':
offset=np.array(offset)
elif offmode=='pixel':
offset=np.array(offset)*pixsize/1000.0
else: raise CHX_utilities_Exception('Eror: offmode must be either "mm" or "pixel"')
if distance=='dcm_bpm':
d=3000.0 # distance dcm-bpm in mm
elif distance=='dcm_mbs':
d=2697.6 #distance dcm-mbs in mm
elif distance=='dcm_sample':
d=16200 #distance dcm-sample in mm
elif distance=='dcm_bds':
d=15500 #distance dcm-sample in mm
else:
try:
d=float(distance)
except:
raise CHX_utilities_Exception('Eror: distance must be a recognized string or numerical value')
# data fitting
fitfunc = lambda p, x: p[0]+2*d*p[1]*np.sin(x/180.*np.pi) # Target function
errfunc = lambda p, x, y: fitfunc(p, Bragg) - y # Distance to the target function
p0 = [np.mean(offset), -.5] # Initial guess for the parameters
p1, success = optimize.leastsq(errfunc, p0[:], args=(Bragg, offset))
# plotting the result:
plt.close(1)
plt.figure(1)
B = np.linspace(Bragg.min(), Bragg.max(), 100)
plt.plot(Bragg,offset,'ro',label='measured offset')
plt.plot(B,fitfunc(p1,B),'k-',label=r'$x_o$+2*D*$\Delta$$\Phi$*sin($\theta_B$)')
plt.legend(loc='best')
plt.ylabel('beam offset [mm]')
plt.xlabel('Bragg angle [deg.]')
print('x_0= ',p1[0],'mm')
print('\Delta \Phi= ',p1[1]*180.0/np.pi,'deg')
def get_ID_calibration_dan(gapstart,gapstop,gapstep=.2,gapoff=0):
"""
by LW 04/20/2015
function to automatically take a ID calibration curve_fit
calling sequence: get_ID_calibration(gapstart,gapstop,gapstep=.2,gapoff=0)
gapstart: minimum gap used in calibration (if <5.2, value will be set to 5.2)
gapstop: maximum gap used in calibration
gapstep: size of steps between two gap points
gapoff: offset applied to calculation gap vs. energy from xfuncs.get_Es(gap-gapoff)
thermal management of Bragg motor is automatic, waiting for cooling <80C between Bragg scans
writes outputfile with fitted value for the center of the Bragg scan to: '/home/xf11id/Repos/chxtools/chxtools/X-ray_database/
changes 03/18/2016: made compatible with python V3 and latest versio of bluesky (working on it!!!)
"""
import numpy as np
#import xfuncs as xf
#from dataportal import DataBroker as db, StepScan as ss, DataMuxer as dm
import time
from epics import caput, caget
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
gaps = np.arange(gapstart, gapstop, gapstep) - gapoff # not sure this should be '+' or '-' ...
print('ID calibration will contain the following gaps [mm]: ',gaps)
xtal_map = {1: 'Si111cryo', 2: 'Si220cryo'}
pos_sts_pv = 'XF:11IDA-OP{Mono:DCM-Ax:X}Pos-Sts'
try:
xtal = xtal_map[caget(pos_sts_pv)]
except KeyError:
raise CHX_utilities_Exception('error: trying to do ID gap calibration with no crystal in the beam')
print('using', xtal, 'for ID gap calibration')
# create file for writing calibration data:
fn='id_CHX_IVU20_'+str(time.strftime("%m"))+str(time.strftime("%d"))+str(time.strftime("%Y"))+'.dat'
fpath='/tmp/'
# fpath='/home/xf11id/Repos/chxtools/chxtools/X-ray_database/'
try:
outFile = open(fpath+fn, 'w')
outFile.write('% data from measurements '+str(time.strftime("%D"))+'\n')
outFile.write('% K colkumn is a placeholder! \n')
outFile.write('% ID gap [mm] K E_1 [keV] \n')
outFile.close()
print('successfully created outputfile: ',fpath+fn)
except:
raise CHX_utilities_Exception('error: could not create output file')
### do the scanning and data fitting, file writing,....
t_adjust=0
center=[]
E1=[]
realgap=[]
detselect(xray_eye1)
print(gaps)
MIN_GAP = 5.2
for i in gaps:
if i >= MIN_GAP:
B_guess=-1.0*xf.get_Bragg(xtal,xf.get_Es(i+gapoff,5)[1])[0]
else:
i = MIN_GAP
B_guess=-1.0*xf.get_Bragg(xtal,xf.get_Es(i,5)[1])[0]
if i > 8 and t_adjust == 0: # adjust acquistion time once while opening the gap (could write something more intelligent in the long run...)
exptime=caget('XF:11IDA-BI{Bpm:1-Cam:1}cam1:AcquireTime')
caput('XF:11IDA-BI{Bpm:1-Cam:1}cam1:AcquireTime',2*exptime)
t_adjust = 1
print('initial guess: Bragg= ',B_guess,' deg. ID gap = ',i,' mm')
es = xf.get_Es(i, 5)[1]
mirror_stripe_pos = round(caget('XF:11IDA-OP{Mir:HDM-Ax:Y}Mtr.VAL'),1)
SI_STRIPE = -7.5
RH_STRIPE = 7.5
if es < 9.5:
stripe = SI_STRIPE
elif es >= 9.5:
stripe = RH_STRIPE
yield from bp.abs_set(hdm.y, stripe)
yield from bp.abs_set(foil_y, 0) # Put YAG in beam.
print('moving DCM Bragg angle to:', B_guess ,'deg and ID gap to', i, 'mm')
yield from bp.abs_set(dcm.b, B_guess)
yield from bp.abs_set(ivu_gap,i)
print('hurray, made it up to here!')
print('about to collect data')
yield from ascan(dcm.b, float(B_guess-.4), float(B_guess+.4), 60,
md={'plan_name': 'ID_calibration',
'mirror_stripe': stripe})
header = db[-1] #retrive the data (first data point is often "wrong", so don't use
data = get_table(header)
B = data.dcm_b[2:]
intdat = data.xray_eye1_stats1_total[2:]
B=np.array(B)
intdat=np.array(intdat)
A=np.max(intdat) # initial parameter guess and fitting
xc=B[np.argmax(intdat)]
w=.2
yo=np.mean(intdat)
p0=[yo,A,xc,w]
print('initial guess for fitting: ',p0)
try:
coeff,var_matrix = curve_fit(gauss,B,intdat,p0=p0)
center.append(coeff[2])
E1.append(xf.get_EBragg(xtal,-coeff[2])/5.0)
realgap.append(caget('SR:C11-ID:G1{IVU20:1-LEnc}Gap'))
# # append data file by i, 1 & xf.get_EBragg(xtal,-coeff[2]/5.0):
with open(fpath+fn, "a") as myfile:
myfile.write(str(caget('SR:C11-ID:G1{IVU20:1-LEnc}Gap'))+' 1.0 '+str(float(xf.get_EBragg(xtal,-coeff[2])/5.0))+'\n')
print('added data point: ',caget('SR:C11-ID:G1{IVU20:1-LEnc}Gap'),' ',1.0,' ',str(float(xf.get_EBragg(xtal,-coeff[2])/5.0)))
except: print('could not evaluate data point for ID gap = ',i,' mm...data point skipped!')
while caget('XF:11IDA-OP{Mono:DCM-Ax:Bragg}T-I') > 80:
time.sleep(30)
print('DCM Bragg axis too hot (>80C)...waiting...')
plt.close(234)
plt.figure(234)
plt.plot(E1,realgap,'ro-')
plt.xlabel('E_1 [keV]')
plt.ylabel('ID gap [mm]')
plt.title('ID gap calibration in file: '+fpath+fn,size=12)
plt.grid()
def get_ID_calibration(gapstart,gapstop,gapstep=.2,gapoff=0):
"""
by LW 04/20/2015
function to automatically take a ID calibration curve_fit
calling sequence: get_ID_calibration(gapstart,gapstop,gapstep=.2,gapoff=0)
gapstart: minimum gap used in calibration (if <5.2, value will be set to 5.2)
gapstop: maximum gap used in calibration
gapstep: size of steps between two gap points
gapoff: offset applied to calculation gap vs. energy from xfuncs.get_Es(gap-gapoff)
thermal management of Bragg motor is automatic, waiting for cooling <80C between Bragg scans
writes outputfile with fitted value for the center of the Bragg scan to: '/home/xf11id/Repos/chxtools/chxtools/X-ray_database/
changes 03/18/2016: made compatible with python V3 and latest versio of bluesky (working on it!!!)
"""
import numpy as np
#import xfuncs as xf
#from dataportal import DataBroker as db, StepScan as ss, DataMuxer as dm
import time
from epics import caput, caget
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
gaps = np.arange(gapstart, gapstop, gapstep) - gapoff # not sure this should be '+' or '-' ...
print('ID calibration will contain the following gaps [mm]: ',gaps)
xtal_map = {1: 'Si111cryo', 2: 'Si220cryo'}
pos_sts_pv = 'XF:11IDA-OP{Mono:DCM-Ax:X}Pos-Sts'
try:
xtal = xtal_map[caget(pos_sts_pv)]
except KeyError:
raise CHX_utilities_Exception('error: trying to do ID gap calibration with no crystal in the beam')
print('using', xtal, 'for ID gap calibration')
# create file for writing calibration data:
fn='id_CHX_IVU20_'+str(time.strftime("%m"))+str(time.strftime("%d"))+str(time.strftime("%Y"))+'.dat'
#fpath='/tmp/'
fpath='/home/xf11id/Repos/chxtools/chxtools/X-ray_database/'
try:
outFile = open(fpath+fn, 'w')
outFile.write('% data from measurements '+str(time.strftime("%D"))+'\n')
outFile.write('% K colkumn is a placeholder! \n')
outFile.write('% ID gap [mm] K E_1 [keV] \n')
outFile.close()
print('successfully created outputfile: ',fpath+fn)
except:
raise CHX_utilities_Exception('error: could not create output file')
### do the scanning and data fitting, file writing,....
t_adjust=0
center=[]
E1=[]
realgap=[]
detselect(xray_eye1)
print(gaps)
MIN_GAP = 5.2
for i in gaps:
if i >= MIN_GAP:
B_guess=-1.0*xf.get_Bragg(xtal,xf.get_Es(i+gapoff,5)[1])[0]
else:
i = MIN_GAP
B_guess=-1.0*xf.get_Bragg(xtal,xf.get_Es(i,5)[1])[0]
if i > 8 and t_adjust == 0: # adjust acquistion time once while opening the gap (could write something more intelligent in the long run...)
exptime=caget('XF:11IDA-BI{Bpm:1-Cam:1}cam1:AcquireTime')
caput('XF:11IDA-BI{Bpm:1-Cam:1}cam1:AcquireTime',2*exptime)
t_adjust = 1
print('initial guess: Bragg= ',B_guess,' deg. ID gap = ',i,' mm')
es = xf.get_Es(i, 5)[1]
mirror_stripe_pos = round(caget('XF:11IDA-OP{Mir:HDM-Ax:Y}Mtr.VAL'),1)
SI_STRIPE = -7.5
RH_STRIPE = 7.5
if es < 9.5:
stripe = SI_STRIPE
elif es >= 9.5:
stripe = RH_STRIPE
mov(hdm.y, stripe)
mov(foil_y, 0) # Put YAG in beam.
print('moving DCM Bragg angle to:', B_guess ,'deg and ID gap to', i, 'mm')
#RE(bp.abs_set(dcm.b, B_guess))
mov(dcm.b, B_guess)
#RE(bp.abs_set(ivu_gap,i))
mov(ivu_gap,i)
print('hurray, made it up to here!')
print('about to collect data')
RE(ascan(dcm.b, float(B_guess-.4), float(B_guess+.4), 60))
header = db[-1] #retrive the data (first data point is often "wrong", so don't use
data = get_table(header)
B = data.dcm_b[2:]
intdat = data.xray_eye1_stats1_total[2:]
B=np.array(B)
intdat=np.array(intdat)
A=np.max(intdat) # initial parameter guess and fitting
xc=B[np.argmax(intdat)]
w=.2
yo=np.mean(intdat)
p0=[yo,A,xc,w]
print('initial guess for fitting: ',p0)
pss = 0
try:
coeff,var_matrix = curve_fit(gauss,B,intdat,p0=p0)
#center.append(coeff)
#E1.append(xf.get_EBragg(xtal,-coeff)/5.0)
realgap.append(caget('SR:C11-ID:G1{IVU20:1-LEnc}Gap'))
# # append data file by i, 1 & xf.get_EBragg(xtal,-coeff/5.0):
print('passed the Gaussian trial fit, will use ps now to write data')
ps() #this should always work
Bvalue = ps.cen
E1.append(xf.get_EBragg(xtal,-Bvalue)/5.0)
center.append(Bvalue)
with open(fpath+fn, "a") as myfile:
myfile.write(str(caget('SR:C11-ID:G1{IVU20:1-LEnc}Gap'))+' 1.0 '+str(float(xf.get_EBragg(xtal,-Bvalue)/5.0))+'\n')
print('added data point: ',caget('SR:C11-ID:G1{IVU20:1-LEnc}Gap'),' ',1.0,' ',str(float(xf.get_EBragg(xtal,-Bvalue)/5.0)))
except: print('could not evaluate data point for ID gap = ',i,' mm...data point skipped!')
while caget('XF:11IDA-OP{Mono:DCM-Ax:Bragg}T-I') > 80:
time.sleep(30)
print('DCM Bragg axis too hot (>80C)...waiting...')
plt.close(234)
plt.figure(234)
plt.plot(E1,realgap,'ro-')
plt.xlabel('E_1 [keV]')
plt.ylabel('ID gap [mm]')
plt.title('ID gap calibration in file: '+fpath+fn,size=12)
plt.grid()
class CHX_utilities_Exception(Exception):
pass
"""
by LW 03/19/2015
class to raise utilities functions specific exceptions
"""
def retrieve_latest_scan(uid='-1',det='default',suffix='default'):
'''
(From Lutz 95-utilities.py)
Retrieve the latest scan results.
Returns the x,y of scan
'''
# get the scan information:
if uid == '-1':
uid=-1
if det == 'default':
if db[uid].start.detectors[0] == 'elm' and suffix=='default':
intensity_field='elm_sum_all'
elif db[uid].start.detectors[0] == 'elm':
intensity_field='elm'+suffix
elif suffix == 'default':
intensity_field= db[uid].start.detectors[0]+'_stats1_total'
else:
intensity_field= db[uid].start.detectors[0]+suffix
else:
if det=='elm' and suffix == 'default':
intensity_field='elm_sum_all'
elif det=='elm':
intensity_field = 'elm'+suffix
elif suffix == 'default':
intensity_field=det+'_stats1_total'
else:
intensity_field=det+suffix
field = db[uid].start.motors[0]
#field='dcm_b';intensity_field='elm_sum_all'
[x,y,t]=get_data(uid,field=field, intensity_field=intensity_field, det=None, debug=False) #need to re-write way to get data
x=np.array(x)
y=np.array(y)
return x, y
|
NSLS-II-CHX/ipython_ophyd
|
startup/95-utilities.py
|
Python
|
bsd-2-clause
| 38,175
|
[
"CRYSTAL",
"Gaussian"
] |
6947761c7c758e9bbee5c1d4db0a48d0cdc14e285758c04c508468b088b18d63
|
import numpy as np
import regreg.api as rr
from selection.randomized.glm import pairs_bootstrap_glm, bootstrap_cov
from ..sampling.langevin import projected_langevin
from ..distributions.api import discrete_family
from ..distributions.api import discrete_family, intervals_from_sample
class M_estimator(object):
def __init__(self, lam, loss, penalty, solve_args={'min_its':50, 'tol':1.e-10}):
"""
Fits the logistic regression to a candidate active set, without penalty.
Calls the method bootstrap_covariance() to bootstrap the covariance matrix.
Computes $\bar{\beta}_E$ which is the restricted
M-estimator (i.e. subject to the constraint $\beta_{-E}=0$).
Parameters:
-----------
active: np.bool
The active set from fitting the logistic lasso
solve_args: dict
Arguments to be passed to regreg solver.
Returns:
--------
None
Notes:
------
Sets self._beta_unpenalized which will be used in the covariance matrix calculation.
Also computes Hessian of loss at restricted M-estimator as well as the bootstrap covariance.
"""
(self.loss,
self.penalty,
self.solve_args) = (loss,
penalty,
solve_args)
self.lam = lam
def solve(self, scaling=1, solve_args={'min_its':20, 'tol':1.e-10}):
(loss,
penalty,
solve_args) = (self.loss,
self.penalty,
self.solve_args)
# initial solution
problem = rr.simple_problem(loss, penalty)
self.initial_soln = problem.solve(**solve_args)
# find the active groups and their direction vectors
# as well as unpenalized groups
groups = np.unique(penalty.groups)
active_groups = np.zeros(len(groups), np.bool)
unpenalized_groups = np.zeros(len(groups), np.bool)
active_directions = []
active = np.zeros(loss.shape, np.bool)
unpenalized = np.zeros(loss.shape, np.bool)
initial_scalings = []
active_directions_list = []
for i, g in enumerate(groups):
group = penalty.groups == g
active_groups[i] = (np.linalg.norm(self.initial_soln[group]) > 1.e-6 * penalty.weights[g]) and (penalty.weights[g] > 0)
unpenalized_groups[i] = (penalty.weights[g] == 0)
if active_groups[i]:
active[group] = True
z = np.zeros(active.shape, np.float)
z[group] = self.initial_soln[group] / np.linalg.norm(self.initial_soln[group])
active_directions.append(z)
initial_scalings.append(np.linalg.norm(self.initial_soln[group]))
active_directions_list.append(z[group])
if unpenalized_groups[i]:
unpenalized[group] = True
# solve the restricted problem
from scipy.linalg import block_diag
self._active_directions_mat = block_diag(*active_directions_list)
self._overall = active + unpenalized
self._inactive = ~self._overall
self._unpenalized = unpenalized
self._active_directions = np.array(active_directions).T
self._active_groups = np.array(active_groups, np.bool)
self._unpenalized_groups = np.array(unpenalized_groups, np.bool)
self.selection_variable = {'groups':self._active_groups,
'variables':self._overall,
'directions':self._active_directions}
# initial state for opt variables
initial_subgrad = -self.loss.smooth_objective(self.initial_soln, 'grad')
# the quadratic of a smooth_atom is not included in computing the smooth_objective
# print("initial sub", initial_subgrad)
X, y = loss.data
# print(np.dot(X.T, y-X.dot(self.initial_soln)))
initial_subgrad = initial_subgrad[self._inactive]
initial_unpenalized = self.initial_soln[self._unpenalized]
self.observed_opt_state = np.concatenate([initial_scalings,
initial_unpenalized,
initial_subgrad], axis=0)
# set the _solved bit
self._solved = True
# Now setup the pieces for linear decomposition
(loss,
penalty,
initial_soln,
overall,
inactive,
unpenalized,
active_groups,
active_directions) = (self.loss,
self.penalty,
self.initial_soln,
self._overall,
self._inactive,
self._unpenalized,
self._active_groups,
self._active_directions)
# scaling should be chosen to be Lipschitz constant for gradient of Gaussian part
# we are implicitly assuming that
# loss is a pairs model
_sqrt_scaling = np.sqrt(scaling)
_beta_unpenalized = restricted_Mest(loss, overall, solve_args=solve_args)
beta_full = np.zeros(overall.shape)
beta_full[overall] = _beta_unpenalized
_hessian = loss.hessian(beta_full)
self._beta_full = beta_full
# observed state for score
self.observed_score_state = np.hstack([_beta_unpenalized * _sqrt_scaling,
-loss.smooth_objective(beta_full, 'grad')[inactive] / _sqrt_scaling])
#print('observed score', self.observed_score_state)
#print("obs score", self.observed_score_state[])
# print()
#print(self.observed_score_state.shape)
# form linear part
self.num_opt_var = p = loss.shape[0] # shorthand for p
self.p = p
# (\bar{\beta}_{E \cup U}, N_{-E}, c_E, \beta_U, z_{-E})
# E for active
# U for unpenalized
# -E for inactive
_opt_linear_term = np.zeros((p, self._active_groups.sum() + unpenalized.sum() + inactive.sum()))
_score_linear_term = np.zeros((p, p))
# \bar{\beta}_{E \cup U} piece -- the unpenalized M estimator
Mest_slice = slice(0, overall.sum())
_Mest_hessian = _hessian[:,overall]
_score_linear_term[:,Mest_slice] = -_Mest_hessian / _sqrt_scaling
# N_{-(E \cup U)} piece -- inactive coordinates of score of M estimator at unpenalized solution
null_idx = range(overall.sum(), p)
inactive_idx = np.nonzero(inactive)[0]
for _i, _n in zip(inactive_idx, null_idx):
_score_linear_term[_i,_n] = -_sqrt_scaling
#print("score mat", _score_linear_term)
# c_E piece
scaling_slice = slice(0, active_groups.sum())
if len(active_directions)==0:
_opt_hessian=0
else:
epsilon = 0
_opt_hessian = (_hessian+ epsilon * np.identity(p)).dot(active_directions)
_opt_linear_term[:,scaling_slice] = _opt_hessian / _sqrt_scaling
self.observed_opt_state[scaling_slice] *= _sqrt_scaling
# beta_U piece
unpenalized_slice = slice(active_groups.sum(), active_groups.sum() + unpenalized.sum())
unpenalized_directions = np.identity(p)[:,unpenalized]
if unpenalized.sum():
epsilon = 0
_opt_linear_term[:,unpenalized_slice] = (_hessian + epsilon * np.identity(p)).dot(unpenalized_directions) / _sqrt_scaling
self.observed_opt_state[unpenalized_slice] *= _sqrt_scaling
# subgrad piece
subgrad_idx = range(active_groups.sum() + unpenalized.sum(), active_groups.sum() + inactive.sum() + unpenalized.sum())
subgrad_slice = slice(active_groups.sum() + unpenalized.sum(), active_groups.sum() + inactive.sum() + unpenalized.sum())
for _i, _s in zip(inactive_idx, subgrad_idx):
_opt_linear_term[_i,_s] = _sqrt_scaling
self.observed_opt_state[subgrad_slice] /= _sqrt_scaling
# form affine part
_opt_affine_term = np.zeros(p)
idx = 0
groups = np.unique(penalty.groups)
for i, g in enumerate(groups):
if active_groups[i]:
group = penalty.groups == g
_opt_affine_term[group] = active_directions[:,idx][group] * penalty.weights[g]
idx += 1
# two transforms that encode score and optimization
# variable roles
# later, we will modify `score_transform`
# in `linear_decomposition`
self.opt_transform = (_opt_linear_term, _opt_affine_term)
self.score_transform = (_score_linear_term, np.zeros(_score_linear_term.shape[0]))
# now store everything needed for the projections
# the projection acts only on the optimization
# variables
self.scaling_slice = scaling_slice
# weights are scaled here because the linear terms scales them by scaling
new_groups = penalty.groups[inactive]
new_weights = dict([(g, penalty.weights[g] / _sqrt_scaling) for g in penalty.weights.keys() if g in np.unique(new_groups)])
# we form a dual group lasso object
# to do the projection
self.group_lasso_dual = rr.group_lasso_dual(new_groups, weights=new_weights, bound=1.)
self.subgrad_slice = subgrad_slice
self._setup = True
#_opt_affine_term_modified = np.dot(np.linalg.inv(_score_linear_term), _opt_affine_term)
#_opt_linear_term_modified =
#self.opt_transform_modified = (_opt_affine_term_modified, _opt_linear_term_modified)
self.score_mat = -_score_linear_term
self.score_mat_inv = np.linalg.inv(self.score_mat)
def projection(self, opt_state):
"""
Full projection for Langevin.
The state here will be only the state of the optimization variables.
"""
if not self._setup:
raise ValueError('setup_sampler should be called before using this function')
new_state = opt_state.copy() # not really necessary to copy
new_state[self.scaling_slice] = np.maximum(opt_state[self.scaling_slice], 0)
new_state[self.subgrad_slice] = self.group_lasso_dual.bound_prox(opt_state[self.subgrad_slice])
return new_state
def normal_data_gradient(self, data_vector):
#return -np.dot(self.total_cov_inv, data_vector-self.reference)
return -np.dot(self.score_cov_inv, data_vector-self.reference)
def gradient(self, opt_state):
"""
Randomization derivative at full state.
"""
opt_linear, opt_offset = self.opt_transform
opt_piece = opt_linear.dot(opt_state) + opt_offset
#data_derivative = self.normal_data_gradient(opt_piece)
# chain rule for optimization part
# opt_grad = opt_linear.T.dot(data_derivative)
opt_piece_modified = self.score_mat_inv.dot(opt_piece)
opt_grad = self.normal_data_gradient(opt_piece_modified)
opt_grad[self.scaling_slice] = self._active_directions_mat.T.dot(opt_grad[self.scaling_slice])
return opt_grad #- self.grad_log_jacobian(opt_state)
def setup_sampler(self, score_mean,
scaling=1, solve_args={'min_its':20, 'tol':1.e-10}):
X, _ = self.loss.data
n, p = X.shape
bootstrap_score = pairs_bootstrap_glm(self.loss,
self._overall,
beta_full=self._beta_full,
inactive=~self._overall)[0]
score_cov = bootstrap_cov(lambda: np.random.choice(n, size=(n,), replace=True), bootstrap_score)
#score_cov = np.zeros((p,p))
#X_E = X[:, self._active_groups]
#X_minusE = X[:, ~self._active_groups]
#score_cov[:self._active_groups.sum(), :self._active_groups.sum()] = np.linalg.inv(np.dot(X_E.T, X_E))
#residual_mat = np.identity(n)-np.dot(X_E, np.linalg.pinv(X_E))
#score_cov[self._active_groups.sum():, self._active_groups.sum():] = np.dot(X_minusE.T, np.dot(residual_mat, X_minusE))
self.score_cov = score_cov
self.score_cov_inv = np.linalg.inv(self.score_cov)
#self.score_mat = -self.score_transform[0]
#self.score_mat_inv = np.linalg.inv(self.score_mat)
#self.total_cov = np.dot(self.score_mat, self.score_cov).dot(self.score_mat.T)
#self.total_cov_inv = np.linalg.inv(self.total_cov)
self.reference = score_mean
#print(self.reference)
def reconstruction_map(self, opt_state):
if not self._setup:
raise ValueError('setup_sampler should be called before using this function')
# reconstruction of randoimzation omega
#opt_state = np.atleast_2d(opt_state)
opt_linear, opt_offset = self.opt_transform
opt_piece = opt_linear.dot(opt_state.T) + opt_offset
return self.score_mat_inv.dot(opt_piece)
def sample(self, ndraw, burnin, stepsize):
'''
Sample `target` from selective density
using projected Langevin sampler with
gradient map `self.gradient` and
projection map `self.projection`.
Parameters
----------
ndraw : int
How long a chain to return?
burnin : int
How many samples to discard?
stepsize : float
Stepsize for Langevin sampler. Defaults
to a crude estimate based on the
dimension of the problem.
keep_opt : bool
Should we return optimization variables
as well as the target?
Returns
-------
gradient : np.float
'''
#if stepsize is None:
# stepsize = 1. / self.crude_lipschitz()
langevin = projected_langevin(self.observed_opt_state.copy(),
self.gradient,
self.projection,
stepsize)
samples = []
for i in range(ndraw + burnin):
langevin.next()
if (i >= burnin):
samples.append(self.reconstruction_map(langevin.state.copy()))
return np.asarray(samples)
def hypothesis_test(self,
test_stat,
observed_value,
ndraw=10000,
burnin=2000,
stepsize=None,
sample=None,
parameter=None,
alternative='twosided'):
'''
Sample `target` from selective density
using projected Langevin sampler with
gradient map `self.gradient` and
projection map `self.projection`.
Parameters
----------
test_stat : callable
Test statistic to evaluate on sample from
selective distribution.
observed_value : float
Observed value of test statistic.
Used in p-value calculation.
ndraw : int
How long a chain to return?
burnin : int
How many samples to discard?
stepsize : float
Stepsize for Langevin sampler. Defaults
to a crude estimate based on the
dimension of the problem.
sample : np.array (optional)
If not None, assumed to be a sample of shape (-1,) + `self.shape`
representing a sample of the target from parameters `self.reference`.
Allows reuse of the same sample for construction of confidence
intervals, hypothesis tests, etc. If not None,
`ndraw, burnin, stepsize` are ignored.
parameter : np.float (optional)
If not None, defaults to `self.reference`.
Otherwise, sample is reweighted using Gaussian tilting.
alternative : ['greater', 'less', 'twosided']
What alternative to use.
Returns
-------
gradient : np.float
'''
if alternative not in ['greater', 'less', 'twosided']:
raise ValueError("alternative should be one of ['greater', 'less', 'twosided']")
if stepsize is None:
stepsize = 1./self.p
if sample is None:
sample = self.sample(ndraw, burnin, stepsize=stepsize)
if parameter is None:
parameter = self.reference
sample_test_stat = np.squeeze(np.array([test_stat(x) for x in sample]))
family = discrete_family(sample_test_stat, np.ones_like(sample_test_stat))
pval = family.cdf(0, observed_value)
if alternative == 'greater':
return 1 - pval
elif alternative == 'less':
return pval
else:
return 2 * min(pval, 1 - pval)
def confidence_intervals(self,
observed,
ndraw=10000,
burnin=2000,
stepsize=None,
sample=None,
level=0.9):
if stepsize is None:
stepsize = 1./self.p
if sample is None:
sample = self.sample(ndraw, burnin, stepsize=stepsize)
print(sample.shape)
#nactive = observed.shape[0]
self.target_cov = self.score_cov[:self._overall.sum(),:self._overall.sum()]
intervals_instance = intervals_from_sample(self.reference[:self._overall.sum()],
sample[:, :self._overall.sum()],
observed[:self._overall.sum()],
self.target_cov)
return intervals_instance.confidence_intervals_all(level=level)
def coefficient_pvalues(self,
observed,
parameter=None,
ndraw=10000,
burnin=2000,
stepsize=None,
sample=None,
alternative='twosided'):
if stepsize is None:
stepsize = 1./self.p
if alternative not in ['greater', 'less', 'twosided']:
raise ValueError("alternative should be one of ['greater', 'less', 'twosided']")
if sample is None:
sample = self.sample(ndraw, burnin, stepsize=stepsize)
if parameter is None:
parameter = np.zeros(self._overall.sum())
#nactive = observed.shape[0]
intervals_instance = intervals_from_sample(self.reference[:self._overall.sum()],
sample[:, :self._overall.sum()],
observed[:self._overall.sum()],
self.target_cov)
pval = intervals_instance.pivots_all(parameter)
if alternative == 'greater':
return 1 - pval
elif alternative == 'less':
return pval
else:
return 2 * np.minimum(pval, 1 - pval)
def restricted_Mest(Mest_loss, active, solve_args={'min_its':50, 'tol':1.e-10}):
X, Y = Mest_loss.data
if Mest_loss._is_transform:
raise NotImplementedError('to fit restricted model, X must be an ndarray or scipy.sparse; general transforms not implemented')
X_restricted = X[:,active]
loss_restricted = rr.affine_smooth(Mest_loss.saturated_loss, X_restricted)
beta_E = loss_restricted.solve(**solve_args)
return beta_E
|
selective-inference/selective-inference
|
selectinf/randomized/sandbox/M_estimator_group_lasso.py
|
Python
|
bsd-3-clause
| 19,747
|
[
"Gaussian"
] |
883841cc8c1cc8d90261625e71d9dd35d0cc534ee437248ba81b13a7fdaa566f
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
# Django Admin, use {% raw %}{% url 'admin:index' %}{% endraw %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
valerymelou/cookiecutter-django-gulp
|
{{cookiecutter.project_slug}}/config/urls.py
|
Python
|
bsd-3-clause
| 1,394
|
[
"VisIt"
] |
c06277cda98f629b44c3147e5e45fc37f9394bccc751597b35eb339455908e39
|
from __future__ import absolute_import, division, print_function
import contextlib
import logging
import multiprocessing
import threading
import time
import traceback
import warnings
from collections import Mapping, OrderedDict
import numpy as np
from ..conventions import cf_encoder
from ..core import indexing
from ..core.pycompat import dask_array_type, iteritems
from ..core.utils import FrozenOrderedDict, NdimSizeLenMixin
# Import default lock
try:
from dask.utils import SerializableLock
HDF5_LOCK = SerializableLock()
except ImportError:
HDF5_LOCK = threading.Lock()
# Create a logger object, but don't add any handlers. Leave that to user code.
logger = logging.getLogger(__name__)
NONE_VAR_NAME = '__values__'
def get_scheduler(get=None, collection=None):
""" Determine the dask scheduler that is being used.
None is returned if not dask scheduler is active.
See also
--------
dask.utils.effective_get
"""
try:
from dask.utils import effective_get
actual_get = effective_get(get, collection)
try:
from dask.distributed import Client
if isinstance(actual_get.__self__, Client):
return 'distributed'
except (ImportError, AttributeError):
try:
import dask.multiprocessing
if actual_get == dask.multiprocessing.get:
return 'multiprocessing'
else:
return 'threaded'
except ImportError:
return 'threaded'
except ImportError:
return None
def get_scheduler_lock(scheduler, path_or_file=None):
""" Get the appropriate lock for a certain situation based onthe dask
scheduler used.
See Also
--------
dask.utils.get_scheduler_lock
"""
if scheduler == 'distributed':
from dask.distributed import Lock
return Lock(path_or_file)
elif scheduler == 'multiprocessing':
return multiprocessing.Lock()
elif scheduler == 'threaded':
from dask.utils import SerializableLock
return SerializableLock()
else:
return threading.Lock()
def _encode_variable_name(name):
if name is None:
name = NONE_VAR_NAME
return name
def _decode_variable_name(name):
if name == NONE_VAR_NAME:
name = None
return name
def find_root(ds):
"""
Helper function to find the root of a netcdf or h5netcdf dataset.
"""
while ds.parent is not None:
ds = ds.parent
return ds
def robust_getitem(array, key, catch=Exception, max_retries=6,
initial_delay=500):
"""
Robustly index an array, using retry logic with exponential backoff if any
of the errors ``catch`` are raised. The initial_delay is measured in ms.
With the default settings, the maximum delay will be in the range of 32-64
seconds.
"""
assert max_retries >= 0
for n in range(max_retries + 1):
try:
return array[key]
except catch:
if n == max_retries:
raise
base_delay = initial_delay * 2 ** n
next_delay = base_delay + np.random.randint(base_delay)
msg = ('getitem failed, waiting %s ms before trying again '
'(%s tries remaining). Full traceback: %s' %
(next_delay, max_retries - n, traceback.format_exc()))
logger.debug(msg)
time.sleep(1e-3 * next_delay)
class CombinedLock(object):
"""A combination of multiple locks.
Like a locked door, a CombinedLock is locked if any of its constituent
locks are locked.
"""
def __init__(self, locks):
self.locks = tuple(set(locks)) # remove duplicates
def acquire(self, *args):
return all(lock.acquire(*args) for lock in self.locks)
def release(self, *args):
for lock in self.locks:
lock.release(*args)
def __enter__(self):
for lock in self.locks:
lock.__enter__()
def __exit__(self, *args):
for lock in self.locks:
lock.__exit__(*args)
@property
def locked(self):
return any(lock.locked for lock in self.locks)
def __repr__(self):
return "CombinedLock(%r)" % list(self.locks)
class BackendArray(NdimSizeLenMixin, indexing.ExplicitlyIndexed):
def __array__(self, dtype=None):
key = indexing.BasicIndexer((slice(None),) * self.ndim)
return np.asarray(self[key], dtype=dtype)
class AbstractDataStore(Mapping):
_autoclose = None
_ds = None
_isopen = False
def __iter__(self):
return iter(self.variables)
def __getitem__(self, key):
return self.variables[key]
def __len__(self):
return len(self.variables)
def get_dimensions(self): # pragma: no cover
raise NotImplementedError
def get_attrs(self): # pragma: no cover
raise NotImplementedError
def get_variables(self): # pragma: no cover
raise NotImplementedError
def get_encoding(self):
return {}
def load(self):
"""
This loads the variables and attributes simultaneously.
A centralized loading function makes it easier to create
data stores that do automatic encoding/decoding.
For example::
class SuffixAppendingDataStore(AbstractDataStore):
def load(self):
variables, attributes = AbstractDataStore.load(self)
variables = {'%s_suffix' % k: v
for k, v in iteritems(variables)}
attributes = {'%s_suffix' % k: v
for k, v in iteritems(attributes)}
return variables, attributes
This function will be called anytime variables or attributes
are requested, so care should be taken to make sure its fast.
"""
variables = FrozenOrderedDict((_decode_variable_name(k), v)
for k, v in self.get_variables().items())
attributes = FrozenOrderedDict(self.get_attrs())
return variables, attributes
@property
def variables(self):
warnings.warn('The ``variables`` property has been deprecated and '
'will be removed in xarray v0.11.',
FutureWarning, stacklevel=2)
variables, _ = self.load()
return variables
@property
def attrs(self):
warnings.warn('The ``attrs`` property has been deprecated and '
'will be removed in xarray v0.11.',
FutureWarning, stacklevel=2)
_, attrs = self.load()
return attrs
@property
def dimensions(self):
warnings.warn('The ``dimensions`` property has been deprecated and '
'will be removed in xarray v0.11.',
FutureWarning, stacklevel=2)
return self.get_dimensions()
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
class ArrayWriter(object):
def __init__(self, lock=HDF5_LOCK):
self.sources = []
self.targets = []
self.lock = lock
def add(self, source, target):
if isinstance(source, dask_array_type):
self.sources.append(source)
self.targets.append(target)
else:
target[...] = source
def sync(self, compute=True):
if self.sources:
import dask.array as da
delayed_store = da.store(self.sources, self.targets,
lock=self.lock, compute=compute,
flush=True)
self.sources = []
self.targets = []
return delayed_store
class AbstractWritableDataStore(AbstractDataStore):
def __init__(self, writer=None, lock=HDF5_LOCK):
if writer is None:
writer = ArrayWriter(lock=lock)
self.writer = writer
self.delayed_store = None
def encode(self, variables, attributes):
"""
Encode the variables and attributes in this store
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
attributes : dict-like
Dictionary of key/value (attribute name / attribute) pairs
Returns
-------
variables : dict-like
attributes : dict-like
"""
variables = OrderedDict([(k, self.encode_variable(v))
for k, v in variables.items()])
attributes = OrderedDict([(k, self.encode_attribute(v))
for k, v in attributes.items()])
return variables, attributes
def encode_variable(self, v):
"""encode one variable"""
return v
def encode_attribute(self, a):
"""encode one attribute"""
return a
def set_dimension(self, d, l): # pragma: no cover
raise NotImplementedError
def set_attribute(self, k, v): # pragma: no cover
raise NotImplementedError
def set_variable(self, k, v): # pragma: no cover
raise NotImplementedError
def sync(self, compute=True):
if self._isopen and self._autoclose:
# datastore will be reopened during write
self.close()
self.delayed_store = self.writer.sync(compute=compute)
def store_dataset(self, dataset):
"""
in stores, variables are all variables AND coordinates
in xarray.Dataset variables are variables NOT coordinates,
so here we pass the whole dataset in instead of doing
dataset.variables
"""
self.store(dataset, dataset.attrs)
def store(self, variables, attributes, check_encoding_set=frozenset(),
unlimited_dims=None):
"""
Top level method for putting data on this store, this method:
- encodes variables/attributes
- sets dimensions
- sets variables
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
attributes : dict-like
Dictionary of key/value (attribute name / attribute) pairs
check_encoding_set : list-like
List of variables that should be checked for invalid encoding
values
unlimited_dims : list-like
List of dimension names that should be treated as unlimited
dimensions.
"""
variables, attributes = self.encode(variables, attributes)
self.set_attributes(attributes)
self.set_dimensions(variables, unlimited_dims=unlimited_dims)
self.set_variables(variables, check_encoding_set,
unlimited_dims=unlimited_dims)
def set_attributes(self, attributes):
"""
This provides a centralized method to set the dataset attributes on the
data store.
Parameters
----------
attributes : dict-like
Dictionary of key/value (attribute name / attribute) pairs
"""
for k, v in iteritems(attributes):
self.set_attribute(k, v)
def set_variables(self, variables, check_encoding_set,
unlimited_dims=None):
"""
This provides a centralized method to set the variables on the data
store.
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
check_encoding_set : list-like
List of variables that should be checked for invalid encoding
values
unlimited_dims : list-like
List of dimension names that should be treated as unlimited
dimensions.
"""
for vn, v in iteritems(variables):
name = _encode_variable_name(vn)
check = vn in check_encoding_set
target, source = self.prepare_variable(
name, v, check, unlimited_dims=unlimited_dims)
self.writer.add(source, target)
def set_dimensions(self, variables, unlimited_dims=None):
"""
This provides a centralized method to set the dimensions on the data
store.
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
unlimited_dims : list-like
List of dimension names that should be treated as unlimited
dimensions.
"""
if unlimited_dims is None:
unlimited_dims = set()
existing_dims = self.get_dimensions()
dims = OrderedDict()
for v in unlimited_dims: # put unlimited_dims first
dims[v] = None
for v in variables.values():
dims.update(dict(zip(v.dims, v.shape)))
for dim, length in dims.items():
if dim in existing_dims and length != existing_dims[dim]:
raise ValueError(
"Unable to update size for existing dimension"
"%r (%d != %d)" % (dim, length, existing_dims[dim]))
elif dim not in existing_dims:
is_unlimited = dim in unlimited_dims
self.set_dimension(dim, length, is_unlimited)
class WritableCFDataStore(AbstractWritableDataStore):
def encode(self, variables, attributes):
# All NetCDF files get CF encoded by default, without this attempting
# to write times, for example, would fail.
variables, attributes = cf_encoder(variables, attributes)
variables = OrderedDict([(k, self.encode_variable(v))
for k, v in variables.items()])
attributes = OrderedDict([(k, self.encode_attribute(v))
for k, v in attributes.items()])
return variables, attributes
class DataStorePickleMixin(object):
"""Subclasses must define `ds`, `_opener` and `_mode` attributes.
Do not subclass this class: it is not part of xarray's external API.
"""
def __getstate__(self):
state = self.__dict__.copy()
del state['_ds']
del state['_isopen']
if self._mode == 'w':
# file has already been created, don't override when restoring
state['_mode'] = 'a'
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._ds = None
self._isopen = False
@property
def ds(self):
if self._ds is not None and self._isopen:
return self._ds
ds = self._opener(mode=self._mode)
self._isopen = True
return ds
@contextlib.contextmanager
def ensure_open(self, autoclose=None):
"""
Helper function to make sure datasets are closed and opened
at appropriate times to avoid too many open file errors.
Use requires `autoclose=True` argument to `open_mfdataset`.
"""
if autoclose is None:
autoclose = self._autoclose
if not self._isopen:
try:
self._ds = self._opener()
self._isopen = True
yield
finally:
if autoclose:
self.close()
else:
yield
def assert_open(self):
if not self._isopen:
raise AssertionError('internal failure: file must be open '
'if `autoclose=True` is used.')
|
jcmgray/xarray
|
xarray/backends/common.py
|
Python
|
apache-2.0
| 15,762
|
[
"NetCDF"
] |
4f7a438679bc5da90fba5d11328d1b6590850920922bf84d6a93c624cb47c4fd
|
import sys
from collections import namedtuple
from ua_parser import user_agent_parser
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str
else:
string_types = basestring
MOBILE_DEVICE_FAMILIES = (
'iPhone',
'iPod',
'Generic Smartphone',
'Generic Feature Phone',
'PlayStation Vita',
)
PC_OS_FAMILIES = (
'Windows 95',
'Windows 98',
'Windows ME',
'Solaris',
)
MOBILE_OS_FAMILIES = (
'Windows Phone',
'Windows Phone OS', # Earlier versions of ua-parser returns Windows Phone OS
'Symbian OS',
'Bada',
'Windows CE',
'Windows Mobile',
)
MOBILE_BROWSER_FAMILIES = (
'Opera Mobile',
'Opera Mini',
)
TABLET_DEVICE_FAMILIES = (
'iPad',
'BlackBerry Playbook',
'Blackberry Playbook', # Earlier versions of ua-parser returns "Blackberry" instead of "BlackBerry"
'Kindle',
'Kindle Fire',
'Kindle Fire HD',
'Galaxy Tab',
'Xoom',
'Dell Streak',
)
TOUCH_CAPABLE_OS_FAMILIES = (
'iOS',
'Android',
'Windows Phone',
'Windows Phone OS',
'Windows RT',
'Windows CE',
'Windows Mobile',
)
TOUCH_CAPABLE_DEVICE_FAMILIES = (
'BlackBerry Playbook',
'Blackberry Playbook',
'Kindle Fire',
)
def parse_version(major=None, minor=None, patch=None, patch_minor=None):
# Returns version number tuple, attributes will be integer if they're numbers
if major is not None and isinstance(major, string_types):
major = int(major) if major.isdigit() else major
if minor is not None and isinstance(minor, string_types):
minor = int(minor) if minor.isdigit() else minor
if patch is not None and isinstance(patch, string_types):
patch = int(patch) if patch.isdigit() else patch
if patch_minor is not None and isinstance(patch_minor, string_types):
patch_minor = int(patch_minor) if patch_minor.isdigit() else patch_minor
if patch_minor:
return (major, minor, patch, patch_minor)
elif patch:
return (major, minor, patch)
elif minor:
return (major, minor)
elif major:
return (major,)
else:
return tuple()
Browser = namedtuple('Browser', ['family', 'version', 'version_string'])
def parse_browser(family, major=None, minor=None, patch=None, patch_minor=None):
# Returns a browser object
version = parse_version(major, minor, patch)
version_string = '.'.join([str(v) for v in version])
return Browser(family, version, version_string)
OperatingSystem = namedtuple('OperatingSystem', ['family', 'version', 'version_string'])
def parse_operating_system(family, major=None, minor=None, patch=None, patch_minor=None):
version = parse_version(major, minor, patch)
version_string = '.'.join([str(v) for v in version])
return OperatingSystem(family, version, version_string)
Device = namedtuple('Device', ['family'])
def parse_device(family):
return Device(family)
class UserAgent(object):
def __init__(self, user_agent_string):
ua_dict = user_agent_parser.Parse(user_agent_string)
self.ua_string = user_agent_string
self.os = parse_operating_system(**ua_dict['os'])
self.browser = parse_browser(**ua_dict['user_agent'])
self.device = parse_device(**ua_dict['device'])
def __str__(self):
device = self.is_pc and "PC" or self.device.family
os = ("%s %s" % (self.os.family, self.os.version_string)).strip()
browser = ("%s %s" % (self.browser.family, self.browser.version_string)).strip()
return " / ".join([device, os, browser])
def __unicode__(self):
return unicode(str(self))
def _is_android_tablet(self):
# Newer Android tablets don't have "Mobile" in their user agent string,
# older ones like Galaxy Tab still have "Mobile" though they're not
if ('Mobile Safari' not in self.ua_string and
self.browser.family != "Firefox Mobile"):
return True
return False
def _is_blackberry_touch_capable_device(self):
# A helper to determine whether a BB phone has touch capabilities
# Blackberry Bold Touch series begins with 99XX
if 'Blackberry 99' in self.device.family:
return True
if 'Blackberry 95' in self.device.family: # BB Storm devices
return True
if 'Blackberry 95' in self.device.family: # BB Torch devices
return True
return False
@property
def is_tablet(self):
if self.device.family in TABLET_DEVICE_FAMILIES:
return True
if (self.os.family == 'Android' and self._is_android_tablet()):
return True
if self.os.family.startswith('Windows RT'):
return True
return False
@property
def is_mobile(self):
# First check for mobile device and mobile browser families
if self.device.family in MOBILE_DEVICE_FAMILIES:
return True
if self.browser.family in MOBILE_BROWSER_FAMILIES:
return True
# Device is considered Mobile OS is Android and not tablet
# This is not fool proof but would have to suffice for now
if self.os.family == 'Android' and not self.is_tablet:
return True
if self.os.family == 'BlackBerry OS' and self.device.family != 'Blackberry Playbook':
return True
if self.os.family in MOBILE_OS_FAMILIES:
return True
# TODO: remove after https://github.com/tobie/ua-parser/issues/126 is closed
if 'J2ME' in self.ua_string or 'MIDP' in self.ua_string:
return True
# This is here mainly to detect Google's Mobile Spider
if 'iPhone;' in self.ua_string:
return True
# Mobile Spiders should be identified as mobile
if self.device.family == 'Spider' and 'Mobile' in self.browser.family:
return True
return False
@property
def is_touch_capable(self):
# TODO: detect touch capable Nokia devices
if self.os.family in TOUCH_CAPABLE_OS_FAMILIES:
return True
if self.device.family in TOUCH_CAPABLE_DEVICE_FAMILIES:
return True
if self.os.family.startswith('Windows 8') and 'Touch' in self.ua_string:
return True
if 'BlackBerry' in self.os.family and self._is_blackberry_touch_capable_device():
return True
return False
@property
def is_pc(self):
# Returns True for "PC" devices (Windows, Mac and Linux)
if 'Windows NT' in self.ua_string or self.os.family in PC_OS_FAMILIES:
return True
# TODO: remove after https://github.com/tobie/ua-parser/issues/127 is closed
if self.os.family == 'Mac OS X' and 'Silk' not in self.ua_string:
return True
if 'Linux' in self.ua_string and 'X11' in self.ua_string:
return True
return False
@property
def is_bot(self):
return True if self.device.family == 'Spider' else False
def parse(user_agent_string):
return UserAgent(user_agent_string)
|
zpzgone/python-user-agents
|
user_agents/parsers.py
|
Python
|
mit
| 7,147
|
[
"Galaxy"
] |
e0b165c03267b7a6d00d564b0d6101404994bb5b17e9b364563d1291dcdc4b53
|
# $Id$
#
# Copyright (C) 2002-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the EState atom typing
validation values are from the paper (JCICS _35_ 1039-1045 (1995))
"""
from __future__ import print_function
import unittest
from rdkit import Chem
from rdkit.Chem import EState
from rdkit.Chem.EState import AtomTypes
class TestCase(unittest.TestCase):
def setUp(self):
pass
def _validate(self,vals,tol=1e-2,show=0):
for smi,ans in vals:
mol = Chem.MolFromSmiles(smi)
types = AtomTypes.TypeAtoms(mol)
if show: print(types)
assert len(ans)==len(types),'bad type len for smiles: %s'%(smi)
lens = [len(x) for x in types]
assert max(lens)==1,'atom matched multiple types for smiles: %s'%(smi)
types = [x[0] for x in types]
for a,b in zip(ans,types):
assert a==b,'bad type for SMILES: %s'%(smi)
def test1(self):
""" simple molecules
"""
data = [
('CC',['sCH3','sCH3']),
('CCC',['sCH3','ssCH2','sCH3']),
('CCOC',['sCH3','ssCH2','ssO','sCH3']),
('c1ccccc1[NH3+]',['aaCH','aaCH','aaCH','aaCH','aaCH','aasC','sNH3']),
('c1ccccc1N',['aaCH','aaCH','aaCH','aaCH','aaCH','aasC','sNH2']),
('C#C',['tCH','tCH']),
('C=C=C',['dCH2','ddC','dCH2']),
]
self._validate(data,show=0)
def test2(self):
""" more complex molecules
"""
data = [
('c1[nH]cnc1CC(N)C(O)=O',['aaCH','aaNH','aaCH','aaN','aasC','ssCH2','sssCH',
'sNH2','dssC','sOH','dO']),
('c1nc[n-]c1CC(N)C(O)=O',['aaCH','aaN','aaCH','aaN','aasC','ssCH2','sssCH',
'sNH2','dssC','sOH','dO']),
('c1cccc2c1cccc2',['aaCH','aaCH','aaCH','aaCH','aaaC','aaaC','aaCH','aaCH','aaCH','aaCH']),
]
self._validate(data,show=0)
if __name__ == '__main__':
unittest.main()
|
adalke/rdkit
|
rdkit/Chem/EState/UnitTestTypes.py
|
Python
|
bsd-3-clause
| 2,103
|
[
"RDKit"
] |
7869770c3c1c539f6ba8191f5b73b0b137c7dcaa6aa52b8b0100df8b28ba2a80
|
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
from functools import reduce
from operator import mul
from pathlib import Path
from subprocess import DEVNULL, PIPE, run
from time import time as seq_time
import os
import cgen as c
from sympy import S
from devito.ir.iet import (ExpressionBundle, List, TimedList, Section,
Iteration, FindNodes, Transformer)
from devito.ir.support import IntervalGroup
from devito.logger import warning, error
from devito.mpi import MPI
from devito.mpi.routines import MPICall, MPIList, RemainderCall
from devito.parameters import configuration
from devito.passes.iet import BusyWait
from devito.symbolics import subs_op_args
from devito.tools import DefaultOrderedDict, flatten
__all__ = ['create_profile']
SectionData = namedtuple('SectionData', 'ops sops points traffic itermaps')
PerfKey = namedtuple('PerfKey', 'name rank')
PerfInput = namedtuple('PerfInput', 'time ops points traffic sops itershapes')
PerfEntry = namedtuple('PerfEntry', 'time gflopss gpointss oi ops itershapes')
class Profiler(object):
_default_includes = []
_default_libs = []
_ext_calls = []
"""Metadata for a profiled code section."""
def __init__(self, name):
self.name = name
# Operation reductions observed in sections
self._ops = []
# C-level code sections
self._sections = OrderedDict()
self._subsections = OrderedDict()
# Python-level timers
self.py_timers = OrderedDict()
self.initialized = True
def analyze(self, iet):
"""
Analyze the Sections in the given IET. This populates `self._sections`.
"""
sections = FindNodes(Section).visit(iet)
for s in sections:
if s.name in self._sections:
continue
bundles = FindNodes(ExpressionBundle).visit(s)
# Total operation count
ops = sum(i.ops*i.ispace.size for i in bundles)
# Operation count at each section iteration
sops = sum(i.ops for i in bundles)
# Total memory traffic
mapper = {}
for i in bundles:
for k, v in i.traffic.items():
mapper.setdefault(k, []).append(v)
traffic = 0
for i in mapper.values():
try:
traffic += IntervalGroup.generate('union', *i).size
except ValueError:
# Over different iteration spaces
traffic += sum(j.size for j in i)
# Each ExpressionBundle lives in its own iteration space
itermaps = [i.ispace.dimension_map for i in bundles]
# Track how many grid points are written within `s`
points = set()
for i in bundles:
if any(e.write.is_TimeFunction for e in i.exprs):
points.add(i.size)
points = sum(points, S.Zero)
self._sections[s.name] = SectionData(ops, sops, points, traffic, itermaps)
def track_subsection(self, sname, name):
v = self._subsections.setdefault(sname, OrderedDict())
v[name] = SectionData(S.Zero, S.Zero, S.Zero, S.Zero, [])
def instrument(self, iet, timer):
"""
Instrument the given IET for C-level performance profiling.
"""
sections = FindNodes(Section).visit(iet)
if sections:
mapper = {}
for i in sections:
n = i.name
assert n in timer.fields
mapper[i] = i._rebuild(body=TimedList(timer=timer, lname=n, body=i.body))
return Transformer(mapper, nested=True).visit(iet)
else:
return iet
@contextmanager
def timer_on(self, name, comm=None):
"""
Measure the execution time of a Python-level code region.
Parameters
----------
name : str
A representative string for the timed region.
comm : MPI communicator, optional
If provided, the global execution time is derived by a single MPI
rank, with timers started and stopped right after an MPI barrier.
"""
if comm and comm is not MPI.COMM_NULL:
comm.Barrier()
tic = MPI.Wtime()
yield
comm.Barrier()
toc = MPI.Wtime()
else:
tic = seq_time()
yield
toc = seq_time()
self.py_timers[name] = toc - tic
def record_ops_variation(self, initial, final):
"""
Record the variation in operation count experienced by a section due to
a flop-reducing transformation.
"""
self._ops.append((initial, final))
@property
def all_sections(self):
return list(self._sections) + flatten(self._subsections.values())
@property
def trackable_subsections(self):
return ()
def summary(self, args, dtype, reduce_over=None):
"""
Return a PerformanceSummary of the profiled sections.
Parameters
----------
args : dict
A mapper from argument names to run-time values from which the Profiler
infers iteration space and execution times of a run.
dtype : data-type
The data type of the objects in the profiled sections. Used to compute
the operational intensity.
"""
comm = args.comm
summary = PerformanceSummary()
for name, data in self._sections.items():
# Time to run the section
time = max(getattr(args[self.name]._obj, name), 10e-7)
# Add performance data
if comm is not MPI.COMM_NULL:
# With MPI enabled, we add one entry per section per rank
times = comm.allgather(time)
assert comm.size == len(times)
for rank in range(comm.size):
summary.add(name, rank, times[rank])
else:
summary.add(name, None, time)
return summary
class ProfilerVerbose1(Profiler):
@property
def trackable_subsections(self):
return (MPIList, RemainderCall, BusyWait)
class ProfilerVerbose2(Profiler):
@property
def trackable_subsections(self):
return (MPICall, BusyWait)
class AdvancedProfiler(Profiler):
# Override basic summary so that arguments other than runtime are computed.
def summary(self, args, dtype, reduce_over=None):
grid = args.grid
comm = args.comm
# Produce sections summary
summary = PerformanceSummary()
for name, data in self._sections.items():
# Time to run the section
time = max(getattr(args[self.name]._obj, name), 10e-7)
# Number of FLOPs performed
ops = int(subs_op_args(data.ops, args))
# Number of grid points computed
points = int(subs_op_args(data.points, args))
# Compulsory traffic
traffic = float(subs_op_args(data.traffic, args)*dtype().itemsize)
# Runtime itermaps/itershapes
itermaps = [OrderedDict([(k, int(subs_op_args(v, args)))
for k, v in i.items()])
for i in data.itermaps]
itershapes = tuple(tuple(i.values()) for i in itermaps)
# Add local performance data
if comm is not MPI.COMM_NULL:
# With MPI enabled, we add one entry per section per rank
times = comm.allgather(time)
assert comm.size == len(times)
opss = comm.allgather(ops)
pointss = comm.allgather(points)
traffics = comm.allgather(traffic)
sops = [data.sops]*comm.size
itershapess = comm.allgather(itershapes)
items = list(zip(times, opss, pointss, traffics, sops, itershapess))
for rank in range(comm.size):
summary.add(name, rank, *items[rank])
else:
summary.add(name, None, time, ops, points, traffic, data.sops, itershapes)
# Enrich summary with subsections data
for sname, v in self._subsections.items():
for name, data in v.items():
# Time to run the section
time = max(getattr(args[self.name]._obj, name), 10e-7)
# Add local performance data
if comm is not MPI.COMM_NULL:
# With MPI enabled, we add one entry per section per rank
times = comm.allgather(time)
assert comm.size == len(times)
for rank in range(comm.size):
summary.add_subsection(sname, name, rank, time)
else:
summary.add_subsection(sname, name, None, time)
# Add global performance data
if reduce_over is not None:
# Vanilla metrics
summary.add_glb_vanilla(self.py_timers[reduce_over])
# Typical finite difference benchmark metrics
if grid is not None:
dimensions = (grid.time_dim,) + grid.dimensions
if all(d.max_name in args for d in dimensions):
max_t = args[grid.time_dim.max_name] or 0
min_t = args[grid.time_dim.min_name] or 0
nt = max_t - min_t + 1
points = reduce(mul, (nt,) + grid.shape)
summary.add_glb_fdlike(points, self.py_timers[reduce_over])
return summary
class AdvancedProfilerVerbose1(AdvancedProfiler):
@property
def trackable_subsections(self):
return (MPIList, RemainderCall, BusyWait)
class AdvancedProfilerVerbose2(AdvancedProfiler):
@property
def trackable_subsections(self):
return (MPICall, BusyWait)
class AdvisorProfiler(AdvancedProfiler):
"""
Rely on Intel Advisor ``v >= 2020`` for performance profiling.
Tested versions of Intel Advisor:
- As contained in Intel Parallel Studio 2020 v 2020 Update 2
- As contained in Intel oneAPI 2021 beta08
"""
_api_resume = '__itt_resume'
_api_pause = '__itt_pause'
_default_includes = ['ittnotify.h']
_default_libs = ['ittnotify']
_ext_calls = [_api_resume, _api_pause]
def __init__(self, name):
self.path = locate_intel_advisor()
if self.path is None:
self.initialized = False
else:
super(AdvisorProfiler, self).__init__(name)
# Make sure future compilations will get the proper header and
# shared object files
compiler = configuration['compiler']
compiler.add_include_dirs(self.path.joinpath('include').as_posix())
compiler.add_libraries(self._default_libs)
libdir = self.path.joinpath('lib64').as_posix()
compiler.add_library_dirs(libdir)
compiler.add_ldflags('-Wl,-rpath,%s' % libdir)
def analyze(self, iet):
return
def instrument(self, iet, timer):
# Look for the presence of a time loop within the IET of the Operator
mapper = {}
for i in FindNodes(Iteration).visit(iet):
if i.dim.is_Time:
# The calls to Advisor's Collection Control API are only for Operators
# with a time loop
mapper[i] = List(header=c.Statement('%s()' % self._api_resume),
body=i,
footer=c.Statement('%s()' % self._api_pause))
return Transformer(mapper).visit(iet)
# Return the IET intact if no time loop is found
return iet
class PerformanceSummary(OrderedDict):
def __init__(self, *args, **kwargs):
super(PerformanceSummary, self).__init__(*args, **kwargs)
self.subsections = DefaultOrderedDict(lambda: OrderedDict())
self.input = OrderedDict()
self.globals = {}
def add(self, name, rank, time,
ops=None, points=None, traffic=None, sops=None, itershapes=None):
"""
Add performance data for a given code section. With MPI enabled, the
performance data is local, that is "per-rank".
"""
# Do not show unexecuted Sections (i.e., loop trip count was 0)
if ops == 0 or traffic == 0:
return
k = PerfKey(name, rank)
if ops is None:
self[k] = PerfEntry(time, 0.0, 0.0, 0.0, 0, [])
else:
gflops = float(ops)/10**9
gpoints = float(points)/10**9
gflopss = gflops/time
gpointss = gpoints/time
oi = float(ops/traffic)
self[k] = PerfEntry(time, gflopss, gpointss, oi, sops, itershapes)
self.input[k] = PerfInput(time, ops, points, traffic, sops, itershapes)
def add_subsection(self, sname, name, rank, time):
k0 = PerfKey(sname, rank)
assert k0 in self
self.subsections[sname][name] = time
def add_glb_vanilla(self, time):
"""
Reduce the following performance data:
* ops
* traffic
over a given global timing.
"""
if not self.input:
return
ops = sum(v.ops for v in self.input.values())
traffic = sum(v.traffic for v in self.input.values())
gflops = float(ops)/10**9
gflopss = gflops/time
oi = float(ops/traffic)
self.globals['vanilla'] = PerfEntry(time, gflopss, None, oi, None, None)
def add_glb_fdlike(self, points, time):
"""
Add the typical GPoints/s finite-difference metric.
"""
gpoints = float(points)/10**9
gpointss = gpoints/time
self.globals['fdlike'] = PerfEntry(time, None, gpointss, None, None, None)
@property
def gflopss(self):
return OrderedDict([(k, v.gflopss) for k, v in self.items()])
@property
def oi(self):
return OrderedDict([(k, v.oi) for k, v in self.items()])
@property
def timings(self):
return OrderedDict([(k, v.time) for k, v in self.items()])
def create_profile(name):
"""Create a new Profiler."""
if configuration['log-level'] in ['DEBUG', 'PERF'] and \
configuration['profiling'] == 'basic':
# Enforce performance profiling in DEBUG mode
level = 'advanced'
else:
level = configuration['profiling']
profiler = profiler_registry[level](name)
if profiler.initialized:
return profiler
else:
warning("Couldn't set up `%s` profiler; reverting to `advanced`" % level)
profiler = profiler_registry['basic'](name)
# We expect the `advanced` profiler to always initialize successfully
assert profiler.initialized
return profiler
profiler_registry = {
'basic': Profiler,
'basic1': ProfilerVerbose1,
'basic2': ProfilerVerbose2,
'advanced': AdvancedProfiler,
'advanced1': AdvancedProfilerVerbose1,
'advanced2': AdvancedProfilerVerbose2,
'advisor': AdvisorProfiler
}
"""Profiling levels."""
def locate_intel_advisor():
"""
Detect if Intel Advisor is installed on the machine and return
its location if it is.
"""
path = None
try:
# Check if the directory to Intel Advisor is specified
path = Path(os.environ['DEVITO_ADVISOR_DIR'])
except KeyError:
# Otherwise, 'sniff' the location of Advisor's directory
error_msg = 'Intel Advisor cannot be found on your system, consider if you'\
' have sourced its environment variables correctly. Information can'\
' be found at https://software.intel.com/content/www/us/en/develop/'\
'documentation/advisor-user-guide/top/launch-the-intel-advisor/'\
'intel-advisor-cli/setting-and-using-intel-advisor-environment'\
'-variables.html'
try:
res = run(["advixe-cl", "--version"], stdout=PIPE, stderr=DEVNULL)
ver = res.stdout.decode("utf-8")
if not ver:
error(error_msg)
return None
except (UnicodeDecodeError, FileNotFoundError):
error(error_msg)
return None
env_path = os.environ["PATH"]
env_path_dirs = env_path.split(":")
for env_path_dir in env_path_dirs:
# intel/advisor is the advisor directory for Intel Parallel Studio,
# intel/oneapi/advisor is the directory for Intel oneAPI
if "intel/advisor" in env_path_dir or "intel/oneapi/advisor" in env_path_dir:
path = Path(env_path_dir)
if path.name.startswith('bin'):
path = path.parent
if not path:
error(error_msg)
return None
if path.joinpath('bin64').joinpath('advixe-cl').is_file():
return path
else:
warning("Requested `advisor` profiler, but couldn't locate executable"
"in advisor directory")
return None
|
opesci/devito
|
devito/operator/profiling.py
|
Python
|
mit
| 17,233
|
[
"VisIt"
] |
4725347a8a3b308f11f396ce1d2d0cc05590779f22a4d21129a776f99bfaba8e
|
"""
A collection of functions to find the weights and abscissas for
Gaussian Quadrature.
These calculations are done by finding the eigenvalues of a
tridiagonal matrix whose entries are dependent on the coefficients
in the recursion formula for the orthogonal polynomials with the
corresponding weighting function over the interval.
Many recursion relations for orthogonal polynomials are given:
.. math::
a1n f_{n+1} (x) = (a2n + a3n x ) f_n (x) - a4n f_{n-1} (x)
The recursion relation of interest is
.. math::
P_{n+1} (x) = (x - A_n) P_n (x) - B_n P_{n-1} (x)
where :math:`P` has a different normalization than :math:`f`.
The coefficients can be found as:
.. math::
A_n = -a2n / a3n
\\qquad
B_n = ( a4n / a3n \\sqrt{h_n-1 / h_n})^2
where
.. math::
h_n = \\int_a^b w(x) f_n(x)^2
assume:
.. math::
P_0 (x) = 1
\\qquad
P_{-1} (x) == 0
For the mathematical background, see [golub.welsch-1969-mathcomp]_ and
[abramowitz.stegun-1965]_.
Functions::
gen_roots_and_weights -- Generic roots and weights.
j_roots -- Jacobi
js_roots -- Shifted Jacobi
la_roots -- Generalized Laguerre
h_roots -- Hermite
he_roots -- Hermite (unit-variance)
cg_roots -- Ultraspherical (Gegenbauer)
t_roots -- Chebyshev of the first kind
u_roots -- Chebyshev of the second kind
c_roots -- Chebyshev of the first kind ([-2,2] interval)
s_roots -- Chebyshev of the second kind ([-2,2] interval)
ts_roots -- Shifted Chebyshev of the first kind.
us_roots -- Shifted Chebyshev of the second kind.
p_roots -- Legendre
ps_roots -- Shifted Legendre
l_roots -- Laguerre
.. [golub.welsch-1969-mathcomp]
Golub, Gene H, and John H Welsch. 1969. Calculation of Gauss
Quadrature Rules. *Mathematics of Computation* 23, 221-230+s1--s10.
.. [abramowitz.stegun-1965]
Abramowitz, Milton, and Irene A Stegun. (1965) *Handbook of
Mathematical Functions: with Formulas, Graphs, and Mathematical
Tables*. Gaithersburg, MD: National Bureau of Standards.
http://www.math.sfu.ca/~cbm/aands/
"""
#
# Author: Travis Oliphant 2000
# Updated Sep. 2003 (fixed bugs --- tested to be accurate)
from __future__ import division, print_function, absolute_import
# Scipy imports.
import numpy as np
from numpy import all, any, exp, inf, pi, sqrt
from scipy import linalg
# Local imports.
from . import _ufuncs as cephes
_gam = cephes.gamma
__all__ = ['legendre', 'chebyt', 'chebyu', 'chebyc', 'chebys',
'jacobi', 'laguerre', 'genlaguerre', 'hermite', 'hermitenorm',
'gegenbauer', 'sh_legendre', 'sh_chebyt', 'sh_chebyu', 'sh_jacobi',
'p_roots', 'ps_roots', 'j_roots', 'js_roots', 'l_roots', 'la_roots',
'he_roots', 'ts_roots', 'us_roots', 's_roots', 't_roots', 'u_roots',
'c_roots', 'cg_roots', 'h_roots',
'eval_legendre', 'eval_chebyt', 'eval_chebyu', 'eval_chebyc',
'eval_chebys', 'eval_jacobi', 'eval_laguerre', 'eval_genlaguerre',
'eval_hermite', 'eval_hermitenorm', 'eval_gegenbauer',
'eval_sh_legendre', 'eval_sh_chebyt', 'eval_sh_chebyu',
'eval_sh_jacobi', 'poch', 'binom']
# For backward compatibility
poch = cephes.poch
class orthopoly1d(np.poly1d):
def __init__(self, roots, weights=None, hn=1.0, kn=1.0, wfunc=None,
limits=None, monic=False, eval_func=None):
np.poly1d.__init__(self, roots, r=1)
equiv_weights = [weights[k] / wfunc(roots[k]) for
k in range(len(roots))]
self.__dict__['weights'] = np.array(list(zip(roots,
weights, equiv_weights)))
self.__dict__['weight_func'] = wfunc
self.__dict__['limits'] = limits
mu = sqrt(hn)
if monic:
evf = eval_func
if evf:
eval_func = lambda x: evf(x) / kn
mu = mu / abs(kn)
kn = 1.0
self.__dict__['normcoef'] = mu
self.__dict__['coeffs'] *= kn
# Note: eval_func will be discarded on arithmetic
self.__dict__['_eval_func'] = eval_func
def __call__(self, v):
if self._eval_func and not isinstance(v, np.poly1d):
return self._eval_func(v)
else:
return np.poly1d.__call__(self, v)
def _scale(self, p):
if p == 1.0:
return
self.__dict__['coeffs'] *= p
evf = self.__dict__['_eval_func']
if evf:
self.__dict__['_eval_func'] = lambda x: evf(x) * p
self.__dict__['normcoef'] *= p
def _gen_roots_and_weights(n, mu0, an_func, bn_func, f, df, symmetrize, mu):
"""[x,w] = gen_roots_and_weights(n,an_func,sqrt_bn_func,mu)
Returns the roots (x) of an nth order orthogonal polynomial,
and weights (w) to use in appropriate Gaussian quadrature with that
orthogonal polynomial.
The polynomials have the recurrence relation
P_n+1(x) = (x - A_n) P_n(x) - B_n P_n-1(x)
an_func(n) should return A_n
sqrt_bn_func(n) should return sqrt(B_n)
mu ( = h_0 ) is the integral of the weight over the orthogonal
interval
"""
k = np.arange(n, dtype='d')
c = np.zeros((2, n))
c[0,1:] = bn_func(k[1:])
c[1,:] = an_func(k)
x = linalg.eigvals_banded(c, overwrite_a_band=True)
# improve roots by one application of Newton's method
y = f(n, x)
dy = df(n, x)
x -= y/dy
fm = f(n-1, x)
fm /= np.abs(fm).max()
dy /= np.abs(dy).max()
w = 1.0 / (fm * dy)
if symmetrize:
w = (w + w[::-1]) / 2
x = (x - x[::-1]) / 2
w *= mu0 / w.sum()
if mu:
return x, w, mu0
else:
return x, w
# Jacobi Polynomials 1 P^(alpha,beta)_n(x)
def j_roots(n, alpha, beta, mu=False):
"""Gauss-Jacobi quadrature
Computes the sample points and weights for Gauss-Jacobi quadrature. The
sample points are the roots of the `n`th degree Jacobi polynomial,
:math:`P^{\\alpha, \\beta}_n(x)`. These sample points and weights
correctly integrate polynomials of degree :math:`2*n - 1` or less over the
interval :math:`[-1, 1]` with weight function
:math:`f(x) = (1 - x)^{\\alpha} (1 + x)^{\\beta}`.
Parameters
----------
n : int
quadrature order
alpha : float
alpha must be > -1
beta : float
beta must be > 0
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
if alpha <= -1 or beta <= -1:
raise ValueError("alpha and beta must be greater than -1.")
if alpha == 0.0 and beta == 0.0:
return p_roots(m, mu)
if alpha == beta:
return cg_roots(m, alpha+0.5, mu)
mu0 = 2.0**(alpha+beta+1)*cephes.beta(alpha+1, beta+1)
a = alpha
b = beta
if a + b == 0.0:
an_func = lambda k: np.where(k == 0, (b-a)/(2+a+b), 0.0)
else:
an_func = lambda k: np.where(k == 0, (b-a)/(2+a+b),
(b*b - a*a) / ((2.0*k+a+b)*(2.0*k+a+b+2)))
bn_func = lambda k: 2.0 / (2.0*k+a+b)*np.sqrt((k+a)*(k+b) / (2*k+a+b+1)) \
* np.where(k == 1, 1.0, np.sqrt(k*(k+a+b) / (2.0*k+a+b-1)))
f = lambda n, x: cephes.eval_jacobi(n, a, b, x)
df = lambda n, x: 0.5 * (n + a + b + 1) \
* cephes.eval_jacobi(n-1, a+1, b+1, x)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu)
def jacobi(n, alpha, beta, monic=False):
"""Returns the nth order Jacobi polynomial, P^(alpha,beta)_n(x)
orthogonal over [-1,1] with weighting function
(1-x)**alpha (1+x)**beta with alpha,beta > -1.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
wfunc = lambda x: (1 - x)**alpha * (1 + x)**beta
if n == 0:
return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic,
eval_func=np.ones_like)
x, w, mu = j_roots(n, alpha, beta, mu=True)
ab1 = alpha + beta + 1.0
hn = 2**ab1 / (2 * n + ab1) * _gam(n + alpha + 1)
hn *= _gam(n + beta + 1.0) / _gam(n + 1) / _gam(n + ab1)
kn = _gam(2 * n + ab1) / 2.0**n / _gam(n + 1) / _gam(n + ab1)
# here kn = coefficient on x^n term
p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic,
lambda x: eval_jacobi(n, alpha, beta, x))
return p
# Jacobi Polynomials shifted G_n(p,q,x)
def js_roots(n, p1, q1, mu=False):
"""Gauss-Jacobi (shifted) quadrature
Computes the sample points and weights for Gauss-Jacobi (shifted)
quadrature. The sample points are the roots of the `n`th degree shifted
Jacobi polynomial, :math:`G^{p,q}_n(x)`. These sample points and weights
correctly integrate polynomials of degree :math:`2*n - 1` or less over the
interval :math:`[0, 1]` with weight function
:math:`f(x) = (1 - x)^{p-q} x^{q-1}`
Parameters
----------
n : int
quadrature order
p1 : float
(p1 - q1) must be > -1
q1 : float
q1 must be > 0
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
if (p1-q1) <= -1 or q1 <= 0:
raise ValueError("(p - q) must be greater than -1, and q must be greater than 0.")
xw = j_roots(n, p1-q1, q1-1, mu)
return ((xw[0] + 1) / 2,) + xw[1:]
def sh_jacobi(n, p, q, monic=False):
"""Returns the nth order Jacobi polynomial, G_n(p,q,x)
orthogonal over [0,1] with weighting function
(1-x)**(p-q) (x)**(q-1) with p>q-1 and q > 0.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
wfunc = lambda x: (1.0 - x)**(p - q) * (x)**(q - 1.)
if n == 0:
return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic,
eval_func=np.ones_like)
n1 = n
x, w, mu0 = js_roots(n1, p, q, mu=True)
hn = _gam(n + 1) * _gam(n + q) * _gam(n + p) * _gam(n + p - q + 1)
hn /= (2 * n + p) * (_gam(2 * n + p)**2)
# kn = 1.0 in standard form so monic is redundant. Kept for compatibility.
kn = 1.0
pp = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(0, 1), monic=monic,
eval_func=lambda x: eval_sh_jacobi(n, p, q, x))
return pp
# Generalized Laguerre L^(alpha)_n(x)
def la_roots(n, alpha, mu=False):
"""Gauss-generalized Laguerre quadrature
Computes the sample points and weights for Gauss-generalized Laguerre
quadrature. The sample points are the roots of the `n`th degree generalized
Laguerre polynomial, :math:`L^{\\alpha}_n(x)`. These sample points and
weights correctly integrate polynomials of degree :math:`2*n - 1` or less
over the interval :math:`[0, inf]` with weight function
:math:`f(x) = x^{\\alpha} e^{-x}`.
Parameters
----------
n : int
quadrature order
alpha : float
alpha must be > -1
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
if alpha < -1:
raise ValueError("alpha must be greater than -1.")
mu0 = cephes.gamma(alpha + 1)
if m == 1:
x = np.array([alpha+1.0], 'd')
w = np.array([mu0], 'd')
if mu:
return x, w, mu0
else:
return x, w
an_func = lambda k: 2 * k + alpha + 1
bn_func = lambda k: -np.sqrt(k * (k + alpha))
f = lambda n, x: cephes.eval_genlaguerre(n, alpha, x)
df = lambda n, x: (n*cephes.eval_genlaguerre(n, alpha, x)
- (n + alpha)*cephes.eval_genlaguerre(n-1, alpha, x))/x
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu)
def genlaguerre(n, alpha, monic=False):
"""Returns the nth order generalized (associated) Laguerre polynomial,
L^(alpha)_n(x), orthogonal over [0,inf) with weighting function
exp(-x) x**alpha with alpha > -1
"""
if any(alpha <= -1):
raise ValueError("alpha must be > -1")
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w, mu0 = la_roots(n1, alpha, mu=True)
wfunc = lambda x: exp(-x) * x**alpha
if n == 0:
x, w = [], []
hn = _gam(n + alpha + 1) / _gam(n + 1)
kn = (-1)**n / _gam(n + 1)
p = orthopoly1d(x, w, hn, kn, wfunc, (0, inf), monic,
lambda x: eval_genlaguerre(n, alpha, x))
return p
# Laguerre L_n(x)
def l_roots(n, mu=False):
"""Gauss-Laguerre quadrature
Computes the sample points and weights for Gauss-Laguerre quadrature.
The sample points are the roots of the `n`th degree Laguerre polynomial,
:math:`L_n(x)`. These sample points and weights correctly integrate
polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[0, inf]` with weight function :math:`f(x) = e^{-x}`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
numpy.polynomial.laguerre.laggauss
"""
return la_roots(n, 0.0, mu=mu)
def laguerre(n, monic=False):
"""Return the nth order Laguerre polynoimal, L_n(x), orthogonal over
[0,inf) with weighting function exp(-x)
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w, mu0 = l_roots(n1, mu=True)
if n == 0:
x, w = [], []
hn = 1.0
kn = (-1)**n / _gam(n + 1)
p = orthopoly1d(x, w, hn, kn, lambda x: exp(-x), (0, inf), monic,
lambda x: eval_laguerre(n, x))
return p
# Hermite 1 H_n(x)
def h_roots(n, mu=False):
"""Gauss-Hermite (physicst's) quadrature
Computes the sample points and weights for Gauss-Hermite quadrature.
The sample points are the roots of the `n`th degree Hermite polynomial,
:math:`H_n(x)`. These sample points and weights correctly integrate
polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[-inf, inf]` with weight function :math:`f(x) = e^{-x^2}`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
numpy.polynomial.hermite.hermgauss
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
mu0 = np.sqrt(np.pi)
an_func = lambda k: 0.0*k
bn_func = lambda k: np.sqrt(k/2.0)
f = cephes.eval_hermite
df = lambda n, x: 2.0 * n * cephes.eval_hermite(n-1, x)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
def hermite(n, monic=False):
"""Return the nth order Hermite polynomial, H_n(x), orthogonal over
(-inf,inf) with weighting function exp(-x**2)
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w, mu0 = h_roots(n1, mu=True)
wfunc = lambda x: exp(-x * x)
if n == 0:
x, w = [], []
hn = 2**n * _gam(n + 1) * sqrt(pi)
kn = 2**n
p = orthopoly1d(x, w, hn, kn, wfunc, (-inf, inf), monic,
lambda x: eval_hermite(n, x))
return p
# Hermite 2 He_n(x)
def he_roots(n, mu=False):
"""Gauss-Hermite (statistician's) quadrature
Computes the sample points and weights for Gauss-Hermite quadrature.
The sample points are the roots of the `n`th degree Hermite polynomial,
:math:`He_n(x)`. These sample points and weights correctly integrate
polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[-inf, inf]` with weight function :math:`f(x) = e^{-(x/2)^2}`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
numpy.polynomial.hermite_e.hermegauss
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
mu0 = np.sqrt(np.pi/2.0)
an_func = lambda k: 0.0*k
bn_func = lambda k: np.sqrt(k)
f = cephes.eval_hermitenorm
df = lambda n, x: n * cephes.eval_hermitenorm(n-1, x)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
def hermitenorm(n, monic=False):
"""Return the nth order normalized Hermite polynomial, He_n(x), orthogonal
over (-inf,inf) with weighting function exp(-(x/2)**2)
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w, mu0 = he_roots(n1, mu=True)
wfunc = lambda x: exp(-x * x / 4.0)
if n == 0:
x, w = [], []
hn = sqrt(2 * pi) * _gam(n + 1)
kn = 1.0
p = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(-inf, inf), monic=monic,
eval_func=lambda x: eval_hermitenorm(n, x))
return p
# The remainder of the polynomials can be derived from the ones above.
# Ultraspherical (Gegenbauer) C^(alpha)_n(x)
def cg_roots(n, alpha, mu=False):
"""Gauss-Gegenbauer quadrature
Computes the sample points and weights for Gauss-Gegenbauer quadrature.
The sample points are the roots of the `n`th degree Gegenbauer polynomial,
:math:`C^{\\alpha}_n(x)`. These sample points and weights correctly
integrate polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[-1, 1]` with weight function :math:`f(x) = (1-x^2)^{\\alpha-1/2}`.
Parameters
----------
n : int
quadrature order
alpha : float
alpha must be > -0.5
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
if alpha < -0.5:
raise ValueError("alpha must be greater than -0.5.")
elif alpha == 0.0:
# C(n,0,x) == 0 uniformly, however, as alpha->0, C(n,alpha,x)->T(n,x)
# strictly, we should just error out here, since the roots are not
# really defined, but we used to return something useful, so let's
# keep doing so.
return t_roots(n, mu)
mu0 = np.sqrt(np.pi) * cephes.gamma(alpha + 0.5) / cephes.gamma(alpha + 1)
an_func = lambda k: 0.0 * k
bn_func = lambda k: np.sqrt(k * (k + 2 * alpha - 1)
/ (4 * (k + alpha) * (k + alpha - 1)))
f = lambda n, x: cephes.eval_gegenbauer(n, alpha, x)
df = lambda n, x: (-n*x*cephes.eval_gegenbauer(n, alpha, x)
+ (n + 2*alpha - 1)*cephes.eval_gegenbauer(n-1, alpha, x))/(1-x**2)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
def gegenbauer(n, alpha, monic=False):
"""Return the nth order Gegenbauer (ultraspherical) polynomial,
C^(alpha)_n(x), orthogonal over [-1,1] with weighting function
(1-x**2)**(alpha-1/2) with alpha > -1/2
"""
base = jacobi(n, alpha - 0.5, alpha - 0.5, monic=monic)
if monic:
return base
# Abrahmowitz and Stegan 22.5.20
factor = (_gam(2*alpha + n) * _gam(alpha + 0.5) /
_gam(2*alpha) / _gam(alpha + 0.5 + n))
base._scale(factor)
base.__dict__['_eval_func'] = lambda x: eval_gegenbauer(float(n), alpha, x)
return base
# Chebyshev of the first kind: T_n(x) =
# n! sqrt(pi) / _gam(n+1./2)* P^(-1/2,-1/2)_n(x)
# Computed anew.
def t_roots(n, mu=False):
"""Gauss-Chebyshev (first kind) quadrature
Computes the sample points and weights for Gauss-Chebyshev quadrature.
The sample points are the roots of the `n`th degree Chebyshev polynomial of
the first kind, :math:`T_n(x)`. These sample points and weights correctly
integrate polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[-1, 1]` with weight function :math:`f(x) = 1/\sqrt{1 - x^2}`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
numpy.polynomial.chebyshev.chebgauss
"""
m = int(n)
if n < 1 or n != m:
raise ValueError('n must be a positive integer.')
x = np.cos(np.arange(2 * m - 1, 0, -2) * pi / (2 * m))
w = np.empty_like(x)
w.fill(pi/m)
if mu:
return x, w, pi
else:
return x, w
def chebyt(n, monic=False):
"""Return nth order Chebyshev polynomial of first kind, Tn(x). Orthogonal
over [-1,1] with weight function (1-x**2)**(-1/2).
"""
if n < 0:
raise ValueError("n must be nonnegative.")
wfunc = lambda x: 1.0 / sqrt(1 - x * x)
if n == 0:
return orthopoly1d([], [], pi, 1.0, wfunc, (-1, 1), monic,
lambda x: eval_chebyt(n, x))
n1 = n
x, w, mu = t_roots(n1, mu=True)
hn = pi / 2
kn = 2**(n - 1)
p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic,
lambda x: eval_chebyt(n, x))
return p
# Chebyshev of the second kind
# U_n(x) = (n+1)! sqrt(pi) / (2*_gam(n+3./2)) * P^(1/2,1/2)_n(x)
def u_roots(n, mu=False):
"""Gauss-Chebyshev (second kind) quadrature
Computes the sample points and weights for Gauss-Chebyshev quadrature.
The sample points are the roots of the `n`th degree Chebyshev polynomial of
the second kind, :math:`U_n(x)`. These sample points and weights correctly
integrate polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[-1, 1]` with weight function :math:`f(x) = \sqrt{1 - x^2}`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
m = int(n)
if n < 1 or n != m:
raise ValueError('n must be a positive integer.')
t = np.arange(m, 0, -1) * pi / (m + 1)
x = np.cos(t)
w = pi * np.sin(t)**2 / (m + 1)
if mu:
return x, w, pi / 2
else:
return x, w
def chebyu(n, monic=False):
"""Return nth order Chebyshev polynomial of second kind, Un(x). Orthogonal
over [-1,1] with weight function (1-x**2)**(1/2).
"""
base = jacobi(n, 0.5, 0.5, monic=monic)
if monic:
return base
factor = sqrt(pi) / 2.0 * _gam(n + 2) / _gam(n + 1.5)
base._scale(factor)
return base
# Chebyshev of the first kind C_n(x)
def c_roots(n, mu=False):
"""Gauss-Chebyshev (first kind) quadrature
Computes the sample points and weights for Gauss-Chebyshev quadrature.
The sample points are the roots of the `n`th degree Chebyshev polynomial of
the first kind, :math:`C_n(x)`. These sample points and weights correctly
integrate polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[-2, 2]` with weight function :math:`f(x) = 1/\sqrt{1 - (x/2)^2}`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
xw = t_roots(n, mu)
return (2 * xw[0],) + xw[1:]
def chebyc(n, monic=False):
"""Return nth order Chebyshev polynomial of first kind, Cn(x). Orthogonal
over [-2,2] with weight function (1-(x/2)**2)**(-1/2).
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w, mu0 = c_roots(n1, mu=True)
if n == 0:
x, w = [], []
hn = 4 * pi * ((n == 0) + 1)
kn = 1.0
p = orthopoly1d(x, w, hn, kn,
wfunc=lambda x: 1.0 / sqrt(1 - x * x / 4.0),
limits=(-2, 2), monic=monic)
if not monic:
p._scale(2.0 / p(2))
p.__dict__['_eval_func'] = lambda x: eval_chebyc(n, x)
return p
# Chebyshev of the second kind S_n(x)
def s_roots(n, mu=False):
"""Gauss-Chebyshev (second kind) quadrature
Computes the sample points and weights for Gauss-Chebyshev quadrature.
The sample points are the roots of the `n`th degree Chebyshev polynomial of
the second kind, :math:`S_n(x)`. These sample points and weights correctly
integrate polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[-2, 2]` with weight function :math:`f(x) = \sqrt{1 - (x/2)^2}`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
xw = u_roots(n, mu)
return (2 * xw[0],) + xw[1:]
def chebys(n, monic=False):
"""Return nth order Chebyshev polynomial of second kind, Sn(x). Orthogonal
over [-2,2] with weight function (1-(x/)**2)**(1/2).
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w, mu0 = s_roots(n1, mu=True)
if n == 0:
x, w = [], []
hn = pi
kn = 1.0
p = orthopoly1d(x, w, hn, kn,
wfunc=lambda x: sqrt(1 - x * x / 4.0),
limits=(-2, 2), monic=monic)
if not monic:
factor = (n + 1.0) / p(2)
p._scale(factor)
p.__dict__['_eval_func'] = lambda x: eval_chebys(n, x)
return p
# Shifted Chebyshev of the first kind T^*_n(x)
def ts_roots(n, mu=False):
"""Gauss-Chebyshev (first kind, shifted) quadrature
Computes the sample points and weights for Gauss-Chebyshev quadrature.
The sample points are the roots of the `n`th degree shifted Chebyshev
polynomial of the first kind, :math:`T_n(x)`. These sample points and
weights correctly integrate polynomials of degree :math:`2*n - 1` or less
over the interval :math:`[0, 1]` with weight function
:math:`f(x) = 1/\sqrt{x - x^2}`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
xw = t_roots(n, mu)
return ((xw[0] + 1) / 2,) + xw[1:]
def sh_chebyt(n, monic=False):
"""Return nth order shifted Chebyshev polynomial of first kind, Tn(x).
Orthogonal over [0,1] with weight function (x-x**2)**(-1/2).
"""
base = sh_jacobi(n, 0.0, 0.5, monic=monic)
if monic:
return base
if n > 0:
factor = 4**n / 2.0
else:
factor = 1.0
base._scale(factor)
return base
# Shifted Chebyshev of the second kind U^*_n(x)
def us_roots(n, mu=False):
"""Gauss-Chebyshev (second kind, shifted) quadrature
Computes the sample points and weights for Gauss-Chebyshev quadrature.
The sample points are the roots of the `n`th degree shifted Chebyshev
polynomial of the second kind, :math:`U_n(x)`. These sample points and
weights correctly integrate polynomials of degree :math:`2*n - 1` or less
over the interval :math:`[0, 1]` with weight function
:math:`f(x) = \sqrt{x - x^2}`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
xw = u_roots(n, mu)
return ((xw[0] + 1) / 2,) + xw[1:]
def sh_chebyu(n, monic=False):
"""Return nth order shifted Chebyshev polynomial of second kind, Un(x).
Orthogonal over [0,1] with weight function (x-x**2)**(1/2).
"""
base = sh_jacobi(n, 2.0, 1.5, monic=monic)
if monic:
return base
factor = 4**n
base._scale(factor)
return base
# Legendre
def p_roots(n, mu=False):
"""Gauss-Legendre quadrature
Computes the sample points and weights for Gauss-Legendre quadrature.
The sample points are the roots of the `n`th degree Legendre polynomial
:math:`P_n(x)`. These sample points and weights correctly integrate
polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[-1, 1]` with weight function :math:`f(x) = 1.0`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
numpy.polynomial.legendre.leggauss
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
mu0 = 2.0
an_func = lambda k: 0.0 * k
bn_func = lambda k: k * np.sqrt(1.0 / (4 * k * k - 1))
f = cephes.eval_legendre
df = lambda n, x: (-n*x*cephes.eval_legendre(n, x)
+ n*cephes.eval_legendre(n-1, x))/(1-x**2)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
def legendre(n, monic=False):
"""
Legendre polynomial coefficients
Returns the nth-order Legendre polynomial, P_n(x), orthogonal over
[-1, 1] with weight function 1.
Parameters
----------
n
Order of the polynomial
monic : bool, optional
If True, output is a monic polynomial (normalized so the leading
coefficient is 1). Default is False.
Returns
-------
P : orthopoly1d
The Legendre polynomial object
Examples
--------
Generate the 3rd-order Legendre polynomial 1/2*(5x^3 + 0x^2 - 3x + 0):
>>> legendre(3)
poly1d([ 2.5, 0. , -1.5, -0. ])
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w, mu0 = p_roots(n1, mu=True)
if n == 0:
x, w = [], []
hn = 2.0 / (2 * n + 1)
kn = _gam(2 * n + 1) / _gam(n + 1)**2 / 2.0**n
p = orthopoly1d(x, w, hn, kn, wfunc=lambda x: 1.0, limits=(-1, 1),
monic=monic, eval_func=lambda x: eval_legendre(n, x))
return p
# Shifted Legendre P^*_n(x)
def ps_roots(n, mu=False):
"""Gauss-Legendre (shifted) quadrature
Computes the sample points and weights for Gauss-Legendre quadrature.
The sample points are the roots of the `n`th degree shifted Legendre
polynomial :math:`P^*_n(x)`. These sample points and weights correctly
integrate polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[0, 1]` with weight function :math:`f(x) = 1.0`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
xw = p_roots(n, mu)
return ((xw[0] + 1) / 2,) + xw[1:]
def sh_legendre(n, monic=False):
"""Returns the nth order shifted Legendre polynomial, P^*_n(x), orthogonal
over [0,1] with weighting function 1.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
wfunc = lambda x: 0.0 * x + 1.0
if n == 0:
return orthopoly1d([], [], 1.0, 1.0, wfunc, (0, 1), monic,
lambda x: eval_sh_legendre(n, x))
x, w, mu0 = ps_roots(n, mu=True)
hn = 1.0 / (2 * n + 1.0)
kn = _gam(2 * n + 1) / _gam(n + 1)**2
p = orthopoly1d(x, w, hn, kn, wfunc, limits=(0, 1), monic=monic,
eval_func=lambda x: eval_sh_legendre(n, x))
return p
# -----------------------------------------------------------------------------
# Vectorized functions for evaluation
# -----------------------------------------------------------------------------
from ._ufuncs import (binom, eval_jacobi, eval_sh_jacobi, eval_gegenbauer,
eval_chebyt, eval_chebyu, eval_chebys, eval_chebyc,
eval_sh_chebyt, eval_sh_chebyu, eval_legendre,
eval_sh_legendre, eval_genlaguerre, eval_laguerre,
eval_hermite, eval_hermitenorm)
|
nvoron23/scipy
|
scipy/special/orthogonal.py
|
Python
|
bsd-3-clause
| 34,897
|
[
"Gaussian"
] |
dea1c27161e265a28f68705c91469e1dedf0b7bb4ae5d1f60dc9c92db6d2abf5
|
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import vtk
class manualTransform(ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self._config.scale = (1.0, 1.0, 1.0)
self._config.orientation = (0.0, 0.0, 0.0)
self._config.translation = (0.0, 0.0, 0.0)
configList = [
('Scaling:', 'scale', 'tuple:float,3', 'tupleText',
'Scale factor in the x, y and z directions in world units.'),
('Orientation:', 'orientation', 'tuple:float,3', 'tupleText',
'Rotation, in order, around the x, the new y and the new z axes '
'in degrees.'),
('Translation:', 'translation', 'tuple:float,3', 'tupleText',
'Translation in the x,y,z directions.')]
self._transform = vtk.vtkTransform()
# we want changes here to happen AFTER the transformations
# represented by the input
self._transform.PostMultiply()
# has no progress!
ScriptedConfigModuleMixin.__init__(
self, configList,
{'Module (self)' : self,
'vtkTransform' : self._transform})
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
ScriptedConfigModuleMixin.close(self)
# get rid of our reference
del self._transform
def execute_module(self):
self._transform.Update()
def get_input_descriptions(self):
return ()
def set_input(self, idx, inputStream):
raise Exception
def get_output_descriptions(self):
return ('VTK Transform',)
def get_output(self, idx):
return self._transform
def logic_to_config(self):
self._config.scale = self._transform.GetScale()
self._config.orientation = self._transform.GetOrientation()
self._config.translation = self._transform.GetPosition()
def config_to_logic(self):
# we have to reset the transform firstn
self._transform.Identity()
self._transform.Scale(self._config.scale)
self._transform.RotateX(self._config.orientation[0])
self._transform.RotateY(self._config.orientation[1])
self._transform.RotateZ(self._config.orientation[2])
self._transform.Translate(self._config.translation)
|
nagyistoce/devide
|
modules/misc/manualTransform.py
|
Python
|
bsd-3-clause
| 2,750
|
[
"VTK"
] |
b7bd546c6a3265983a0e1da71e5e644166aa3c6a1c264ea98439609de9c936d4
|
#!/usr/bin/env python3
"""
Copyright 2020 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
from http import cookies
import dbSession
import dbShared
import ghShared
import cgi
import pymysql
import ghNames
#
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
form = cgi.FieldStorage()
# Get Cookies
errorstr = ''
C = cookies.SimpleCookie()
try:
C.load(os.environ['HTTP_COOKIE'])
except KeyError:
errorstr = 'no cookies\n'
if errorstr == '':
try:
currentUser = C['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = C['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = C['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
else:
currentUser = ''
loginResult = 'success'
sid = form.getfirst('gh_sid', '')
spawnName = form.getfirst('spawn', '')
galaxy = form.getfirst('galaxy', '')
planets = form.getfirst('planets', '')
spawnID = form.getfirst('spawnID', '')
availability = form.getfirst('availability', '')
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
spawnName = dbShared.dbInsertSafe(spawnName)
galaxy = dbShared.dbInsertSafe(galaxy)
planets = dbShared.dbInsertSafe(planets)
spawnID = dbShared.dbInsertSafe(spawnID)
availability = dbShared.dbInsertSafe(availability)
# Get a session
logged_state = 0
linkappend = ''
sess = dbSession.getSession(sid)
if (sess != ''):
logged_state = 1
currentUser = sess
linkappend = 'gh_sid=' + sid
# Main program
print('Content-type: text/html\n')
if (logged_state > 0):
try:
conn = dbShared.ghConn()
cursor = conn.cursor()
except Exception:
result = "Error: could not connect to database"
if (cursor):
row = None
enteredBy = ''
# lookup spawn id if it was not passed
if (spawnID == ''):
if galaxy != '':
cursor.execute('SELECT spawnID, resourceType, CR, CD, DR, FL, HR, MA, PE, OQ, SR, UT, ER, enteredBy FROM tResources WHERE galaxy=' + galaxy + ' AND spawnName="' + spawnName + '";')
row = cursor.fetchone()
if (row != None):
spawnID = str(row[0])
enteredBy = row[13]
# lookup galaxy if it was not passed
if (galaxy == ''):
cursor.execute('SELECT galaxy FROM tResources WHERE spawnID=' + spawnID + ';')
row = cursor.fetchone()
if (row != None):
galaxy = str(row[0])
# lookup spawn name if it was not passed
if (spawnName == ''):
cursor.execute('SELECT spawnName, enteredBy FROM tResources WHERE spawnID=' + spawnID + ';')
row = cursor.fetchone()
if (row != None):
spawnName = row[0]
enteredBy = row[1]
sqlStr = ""
if (spawnID != '' and galaxy != ''):
if (dbShared.galaxyState(galaxy) == 1):
if (availability == "0"):
# mark unavailable on planet(s)
if (planets == "all"):
sqlStr = "UPDATE tResourcePlanet SET unavailable=NOW(), unavailableBy='" + currentUser + "' WHERE spawnID=" + spawnID + ";"
result = "Marked unavailable on all planets."
else:
sqlStr = "UPDATE tResourcePlanet SET unavailable=NOW(), unavailableBy='" + currentUser + "' WHERE spawnID=" + spawnID + " AND planetID=" + dbShared.dbInsertSafe(planets) + ";"
result = "Marked unavailable on " + ghNames.getPlanetName(planets)
elif (availability == "1"):
# mark (re)available on planet
cursor.execute("SELECT enteredBy, unavailable FROM tResourcePlanet WHERE spawnID=" + str(spawnID) + " AND planetID=" + str(planets) + ";")
row = cursor.fetchone()
if row == None:
# insert spawn planet record
sqlStr = "INSERT INTO tResourcePlanet (spawnID, planetID, entered, enteredBy) VALUES (" + str(spawnID) + "," + str(planets) + ",NOW(),'" + currentUser + "');"
result = "Marked available on " + ghNames.getPlanetName(planets)
else:
sqlStr = "UPDATE tResourcePlanet SET unavailable = NULL WHERE spawnID=" + str(spawnID) + " AND planetID=" + str(planets) + ";"
result = "Marked re-available on " + ghNames.getPlanetName(planets)
availability = -1
# Only allow update if user has positive reputation
stats = dbShared.getUserStats(currentUser, galaxy).split(",")
admin = dbShared.getUserAdmin(conn, currentUser, galaxy)
if (int(stats[2]) < ghShared.MIN_REP_VALS['REMOVE_RESOURCE'] and enteredBy != currentUser) and availability == "0" and not admin:
result = "Error: You must earn a little reputation on the site before you can edit resources. Try adding or verifying some first. \r\n"
else:
cursor.execute(sqlStr)
if (availability == "0"):
# add cleanup event
if not planets.isdigit():
planets = 0
dbShared.logEvent("INSERT INTO tResourceEvents (galaxy, spawnID, userID, eventTime, eventType, planetID) VALUES (" + str(galaxy) + "," + str(spawnID) + ",'" + currentUser + "',NOW(),'r'," + str(planets) + ");", 'r', currentUser, galaxy, str(spawnID))
elif (availability == "1"):
# add resource planet add event
dbShared.logEvent("INSERT INTO tResourceEvents (galaxy, spawnID, userID, eventTime, eventType, planetID) VALUES (" + str(galaxy) + "," + str(spawnID) + ",'" + currentUser + "',NOW(),'p'," + str(planets) + ");", 'p', currentUser, galaxy, str(spawnID))
result = spawnName
else:
result = "Error: That galaxy is currently inactive."
else:
result = "Error: You must provide a spawn ID or a spawn Name and galaxy ID"
cursor.close()
else:
result = "Error: No existing resource data"
conn.close()
else:
result = "Error: You must be logged in to change resource availability."
print(result)
if (result.find("Error:") > -1):
sys.exit(500)
else:
sys.exit(200)
|
pwillworth/galaxyharvester
|
html/changeAvailability.py
|
Python
|
gpl-3.0
| 6,314
|
[
"Galaxy"
] |
d5508fd3931798107ca10693bc0cd4bff596e15788519e68a66f40bd66f6a936
|
import Bio
import os
import numpy as np
import random as r
import time
import sys
#print sys.path
#sys.path.append("C:\\Users\\Andrey\\SkyDrive\\Guttlab\\Python")
import pydna
import genbankUtils as gbU
import fileUtils as filU
import bioUtils as biU
import stochastickinetics as stoc
def makeOverhangs(seqList,length,digest='5prime',cutoff=0,concentration = 100):
"""makes a list of overhangs produced by gibson assembly
from a list of dsDNA sequences. Cutoff is for 5' restriction
overhangs!"""
overarry = None
endlist = [[] for a in range(len(seqList)*2)]
polymerlist = {}
X = []
for sequenceNum in range(len(seqList)):
sequence = seqList[sequenceNum][1]
#print "sequence now is {}".format(sequence)
if(cutoff > 0):
sequence = sequence[cutoff:-cutoff]
odelseq = sequence[length:-length]
concentration = int(stoc.random.normalvariate(concentration,concentration*.05))
#calculate a random concentration right here!!
X+=[concentration,concentration] #both fragments at the same concentration!
over1 = {'oseq':sequence[:length], \
'midseq':odelseq,\
'attheend':0,\
'sequencenumber':sequenceNum}
over2 = {'oseq':sequence[-length:],\
'midseq':odelseq,\
'attheend':1,\
'sequencenumber':sequenceNum}
polymername = "{:03}".format(sequenceNum)
polymerlist[polymername]=(concentration,sequenceNum*2,sequenceNum*2+1)
endlist[sequenceNum*2].append((polymername,0))
endlist[sequenceNum*2+1].append((polymername,1))
if(digest == '5prime'):
over1['oseq'] = biU.rc(over1['oseq'])
else:
over2['oseq'] = biU.rc(over2['oseq'])
#print "over1 is {}".format(over1['oseq'])
#print "over2 is {}".format(over2['oseq'])
if(overarry == None):
#print "newarray"
overarry = np.array([over1,over2])
else:
#print "append"
overarry = np.append(overarry,[over1,over2])
#print overarry
enddatabase = open(gbU.guttDNA+'\\temp\\testdb.fas','wb')
for end in overarry:
enddatabase.write(">{}_{}\r\n{}\r\n".format(end['sequencenumber'],end['attheend'],end['oseq']))
enddatabase.close()
return overarry,endlist,polymerlist,X
def readBlast(infile,threshold = 25):
blastfile = open(infile,'rb')
matchlist = []
for line in blastfile:
#print "the line is {}".format(line)
if( len(line) < 5):
continue;
lspl = line.split(',')
query = lspl[0].split("_")
subject = lspl[1].split("_")
reactant1 = int(query[0])*2+int(query[1])#"{:03}".format(int(query[0])*2+int(query[1]))
reactant2 = int(subject[0])*2+int(subject[1])#"{:03}".format(int(subject[0])*2+int(subject[1]))
aligned = lspl[3]
mismatched = lspl[4]
matched = int(aligned)-int(mismatched)
score = float(matched)/float(threshold)*0.5
if(matched > threshold):
matchlist += [(reactant1,reactant2,score)]
return matchlist
def makeBLASTDatabase(dbfilename,makeblastdb = "",outfilename="outdb"):
"""creates a BLAST database using the makeblastdb function"""
if(makeblastdb==""):
makeblastdb = "makeblastdb -dbtype {} -out \"{}\" -title {} -in \"{}\""
os.system(makeblastdb.format('nucl',gbU.guttDNA+"temp\\"+outfilename,outfilename,dbfilename))
print "made db!"
def blastSearch(querypath,outpath=gbU.guttDNA+"temp\\searchout.txt",dbpath=gbU.guttDNA+"temp\\outdb",blastdbcommand = ""):
"""searches a given database with a given query file"""
if(blastdbcommand == ""):
blastdbcommand = "blastn -query \"{}\" -db \"{}\" -evalue {} -task {} -strand plus -outfmt 10 -out \"{}\""
os.system(blastdbcommand.format(querypath,dbpath,0.01,'blastn-short',outpath))
print "search done!"
def partD(X,R,P,endlist,time = 1.0,makeMovie = True,frames=300,movieout = "C:\\Users\\Andrey\\Desktop\\movie"):
#time = 1.0 #1 second
frametime = time/frames
#X = [1,1,100]
#X[2] = int(random.random()*20+90)
accX = [biU.dc(X)]
accT = [0]
worktime = 0.0
sizelist = [0]*(len(X))
stoc.plt.ion()
fig = stoc.plt.figure(figsize=(5,5))
ax = fig.add_subplot(111)
line, = ax.plot(sizelist)
stoc.plt.xlim([1,len(X)/2])
stoc.plt.ylim([0,X[0]/2])
count = 0
lastframe = 0
while worktime < time:
count+=1
#print 'iteration!'
t,newX,newendlist,newP = stoc.timestep(X,R,P,endlist)
#print t
#accX=newP
''
klist = newP.keys()
sizelist = [0]*(len(X))
''
mult = 400
if(int(worktime/frametime)>lastframe and makeMovie):
nc = int(worktime/frametime)
for polymer in klist:
sizelist[len(polymer.split('_'))]+=newP[polymer][0]
accX+=[biU.dc(sizelist)]
#stoc.plt.clear()
line.set_ydata(sizelist)
stoc.plt.draw()
fname = movieout+'\\_tmp%06d.png'%nc
#print 'Saving frame', fname
fig.savefig(fname)
lastframe = int(worktime/frametime)
#files.append(fname)
#stoc.plt.show()
#'''
X = newX
endlist = newendlist
P = newP
worktime+=t
accT+=[worktime]
print worktime/time
#actual
#print accX
#print levelslist
return accT,sizelist,newP
def run140716():
desktopPath = "C:\\Users\\Andrey\\Desktop\\"
filePath = gbU.guttDNA
print 'reading seqs'
inseqs = filU.readOligoSeqs(filePath+'Assemblies\\malat1\\malat1.fas')
print 'making overhangs'
overarry, endlist, polymerlist,X = makeOverhangs(inseqs,50,concentration=2000)
print 'endlist {} '.format(endlist[3])
print 'polymerlist {} '.format(polymerlist)
endsquery = open(filePath+'temp\\testquery.fas','wb')
for end in overarry:
endsquery.write("\n>{}_{}\n{}".format(end['sequencenumber'],end['attheend'],biU.rc(end['oseq'])))
endsquery.close()
print "making blast database"
makeBLASTDatabase(filePath+"temp\\testdb.fas")
print "performing a query"
blastSearch(filePath+"temp\\testquery.fas")
print 'matching ends'
matchlist = readBlast(filePath+"temp\\searchout.txt")
times,answer,polymers = partD(X,matchlist,polymerlist,endlist,frames=300,time = .5,movieout=desktopPath+"movie5")
#print polymers
keypol = polymers.keys()
pollist = []
for key in keypol:
pollist += [(polymers[key][0],key)]
pollist = sorted(pollist)
print pollist[-5:]
#print len(answer)
#print len(answer[0])
#stoc.plt.plot(answer)#[times,answer])#imshow(answer[:200])
#stoc.plt.show()
#print matchlist
run140716()
'''
C:\Program Files\NCBI\blast-2.2.29+\bin>makeblastdb.exe -dbtype nucl -out "C:\Users\Andrey\SkyDrive\Guttlab\DNA\temp\newDB" -title mydatabase -in "C:\Users\Andrey\SkyDrive\Guttlab\DNA\temp\WTassy.fas"
'''
|
dr3y/gibsonsimulator
|
gibson_simulator.py
|
Python
|
mit
| 7,516
|
[
"BLAST"
] |
eb271de90604c846849a35aa306bd270c21fe210bfaa5f3e8fbd606d1b93762b
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''Method collection to obtain optical system information
This module contains a method collection to obtain information, and analize
optical systems
'''
from pyoptools.raytrace.ray import Ray
from pyoptools.misc.pmisc import cross
from pyoptools.raytrace.system import System
from pyoptools.raytrace.component import Component
from pyoptools.raytrace.comp_lib import CCD
from pyoptools.raytrace.surface import Spherical
#from gui.plot_frame import PlotFrame
from pyoptools.raytrace.shape import Circular
from numpy import inf, sqrt, square, pi, dot, array, arctan2, alltrue, isnan,\
nan, mgrid,where
from scipy.optimize.minpack import fsolve
from numpy.random import normal
import multiprocessing as mp
#******Logger definition *******#
#import logging
#log= logging.getLogger("ray_trace.calc")
def intersection(r1,r2):
'''
Return the point of intersection between the rays r1 and r2.
**Arguments:**
== ===================================
r1 First Ray to test for intersection
r2 Second Ray to test for intersection
== ===================================
**Return Value:**
Tuple (ip,rv) where:
== ============================================================
ip Intersection point coordinates. If the rays do not intersect
ip=(nan,nan,nan)
rv Boolean that indicates if the intersection point represent a
real image (rv=true) , or a virtual image (rv=false).
== ============================================================
'''
d1=r1.dir
d2=r2.dir
p1=r1.pos
p2=r2.pos
d1xd2=cross(d1,d2)
# check if the rays are parallel
#log.info("Vector cross product:"+str(d1xd2))
if dot(d1xd2,d1xd2)==0. :
return array((nan,nan,nan)),False
p2p1xv2=cross(p2-p1,d2)
p2p1xv1=cross(p2-p1,d1)
a=p2p1xv2/d1xd2
b=p2p1xv1/d1xd2
# Remove the nan from the list
keep=~isnan(a)
an=a[keep]
keep=~isnan(b)
bn=b[keep]
ip=array((nan,nan,nan))
rv=False
#print an,bn
if len(an)>0:
if alltrue(an==an[0]) :
ip=p1+an[0]*d1
# check if all the solutions are equal
if alltrue(an>=0) and alltrue(bn>=0):
rv=True
#log.info("Intersection point found at:"+str(ip)+" "+str(rv))
return ip,rv
def nearest_points(ray1, ray2):
'''
Return the nearest points between 2 rays.
The image point locations in optical systems are usually found by
calculating the intersection between rays coming from a single object
point, but in aberrated systems, the 2 rays will not really intersect.
This function is used to find the point in space where the rays
are closest to each other. If the rays intersect the values returned
will be the intersection point.
The solution was taken from:
http://homepage.univie.ac.at/Franz.Vesely/notes/hard_sticks/hst/hst.html
**Arguments:**
== ===================================
r1 First Ray to test for intersection
r2 Second Ray to test for intersection
== ===================================
**Return Value**
The return value is a tuple (p1,p2,d,rv) where:
== ===========================================================
p1 The point liying on the ray 1, closest to the ray 2
p2 The point liying on the ray 2, closest to the ray 1
d The distance between p1 and p2
rv a boolean indicating if the intersection is real or virtual
rv=True for real, rv=False for virtual
== ===========================================================
'''
r1=ray1.pos
e1=ray1.dir
r2=ray2.pos
e2=ray2.dir
r12=r2-r1
t1= (dot(r12, e1) - (dot(r12, e2)*dot(e1, e2)))/(1-(dot(e1, e2))**2)
t2= -(dot(r12, e2) - (dot(r12, e1)*dot(e1, e2)))/(1-(dot(e1, e2))**2)
p1=r1+t1*e1
p2=r2+t2*e2
#log.info("nearest points"+str(p1)+" "+str(p2))
#log.info("tvalues "+str(t1)+" "+str(t2))
if t1>=0 and t2>=0:
rv=True
else:
rv=False
return p1, p2, sqrt(dot(p1-p2, p1-p2)), rv
def chief_ray_search(opsys,ccds,o=(0.,0.,0.),rt=(0.,0.,0.),er=0.1,w=pi/2.,maxiter=1000,wavelength=.58929):
'''
This function uses a random search algorithm to find the chief_ray for a
given optical system and object point.
**Algorithm description:**
The algorithm starts using a given ray, propagating it in the optical
system, and finding the intersection point of this test ray and the
aperture plane. The distance from this point and the optical axis is
recorded.
Using a gaussian random generator, two rotation angles are calculated,
to generate a new test ray that is propagated in the optical system,
and its distance to the optical axis is found at the aperture plane.
If this distance is less than the distance found for the previous ray,
this ray is taken as the new *chief ray* candidate, and the algorithm
is repeated until the number of iterations reaches *maxiter*, or until
the distance is less than *er*.
the *rt* parameter gives the rotations made to a ray originating in
*o*, and propagating in the *Z* direction, to find the first test ray.
A detector object *ccds* should be placed at the aperture plane. It is used
to find the point where the ray intersects the aperture. To increase the
convergense speed of the algorithm, it is better to make sure that the first
test ray intersects the detector.
**Parameters:**
========== ======================================================
opsys Optical system that will be used to find the chief ray
ccds Detector placed in the aperture plane. Should be
centred in the optical axis
o Tuple, list or numpy array indicating the coordinates
of the object point used to find the chief ray
rt Tuple with the rotations made to a ray propagating in
the z direction to obtain the first test ray
er Maximum acceptable distance between the ray and the
center of the aperture
w Gaussian width in radians
wavelength Wavelength of the ray used to find the principal ray given
in micrometers (.58929 by default).
========== ======================================================
**Return Value:**
Chief ray found. (Ray instance)
.. todo::
Implement a function similar to this one, using a minimization
algorithm
'''
#log.info("Entering chief_ray_search function")
test_ray=Ray(wavelength=wavelength)
opsys.clear_ray_list()
btx,bty,btz=rt #btz is not used
ntry=0
nt=0
#Check the initial test ray
retray=test_ray.ch_coord_sys_inv(o,(btx,bty,0))
#log.info("Calculating test_ray")
opsys.clear_ray_list()
opsys.reset()
opsys.ray_add(retray)
opsys.propagate()
try:
x,y,z=ccds.hit_list[0][0]
dist=sqrt(square(x)+square(y))
except:
dist=inf
p_dist=dist
while (p_dist> er)and (ntry<maxiter):
ntry=ntry+1
nt=nt+1
rx=normal(btx,w)
ry=normal(bty,w)
tray=test_ray.ch_coord_sys_inv(o,(rx,ry,0))
opsys.clear_ray_list()
opsys.reset()
opsys.ray_add(tray)
opsys.propagate()
try:
x,y,z=ccds.hit_list[0][0]
dist=sqrt(square(x)+square(y))
except:
#log.info("CCD not hitted by ray")
dist=inf
if p_dist>dist:
#Select this ray as new generator ray
btx=rx
bty=ry
p_dist=dist
nt=0
retray=tray
#log.info("distance to aperture center="+str(dist))
if (nt>10)and p_dist<inf:
nt=0
w=w/2
#limit the minimum value of w
if w<.0000001: w=.0000001
# print p_dist,ntry
return retray
def pupil_location(opsys,ccds,opaxis):
'''
Function to find the optical system pupils position
.. note:
For this function to operate, the system should have a rotational
symmetry around the optical axis.
**Parameters:**
opsys Optical system to use.
opaxis Ray representing the optical axis
ccds Surface that represents a detector in the aperture plane
**Return Value**
(enpl,expl)
enpl tuple (xen,yen,zen) containing the entrance pupil coordinates
expl tuple (xex,yex,zex) containing the exit pupil coordinates
'''
#log.info("Propagate Optical axis ray")
opsys.clear_ray_list()
opsys.reset()
#opsys.ray_add(cray)
opsys.ray_add(opaxis)
opsys.propagate()
if (len(ccds.hit_list)==0):
raise Exception, "The optical axis did not intersect the aperture"
if(len(ccds.hit_list)>1):
raise Exception, "The optical axis intersected the aperture more than "
"once"
aip=ccds.hit_list[0][0]
air=ccds.hit_list[0][1]
#log.info("Optical Axis Intersection point= "+str(aip))
#log.info("Intersection Ray= "+str(air))
#Getting Intersection point in global coordinates
if(len(air.childs)!=1):
raise Exception, "The intersected ray can only have one child"
ip=air.childs[0].pos
d=air.childs[0].dir
#log.info("Intersection point in world coordinates= "+str(ip))
#log.info("Direction of the optical axis at the intersection point"+str(d))
#Todo: Check if the optical axis and the aperture are perpendicular
# Calculate vectors perpendicular to the optical axis and to the XYZ axes
pv1= cross(d,(0,0,1))
pv2= cross(d,(0,1,0))
pv3= cross(d,(1,0,0))
pv=[pv1,pv2,pv3]
# Search for the longest pv
pvn=array((dot(pv1,pv1),dot(pv2,pv2),dot(pv3,pv3)))
pvm=pv[pvn.argmax()]
#log.info("Displacement vector found: "+str(pvm))
# Create ray to calculate the exit pupil
expuray=air.childs[0].copy()
expuray.dir=expuray.dir+pvm*.0001
# Create the ray to calculate the entrance pupil
enpuray=expuray.reverse()
opsys.clear_ray_list()
opsys.reset()
opsys.ray_add(enpuray)
opsys.ray_add(expuray)
opsys.propagate()
enp=enpuray.get_final_rays(inc_zeros = False)
exp=expuray.get_final_rays(inc_zeros = False)
oax=opaxis.get_final_rays(inc_zeros = False)
#log.info("enp="+str(enp))
#log.info("exp="+str(exp))
#log.info("oax="+str(oax))
if len(enp)!=1 or len(exp)!=1 or len(oax)!=1:
raise Exception, "The principal ray or the optical axis ray have more"
" than one final ray"
#log.info("Calculating entrance pupil location")
# Find the nearest points between the rays.
# Some times because of numerical errors, or some aberrations in the optical
# system, the rays do not trully intersect.
# Use instead the nearest points and issue a warning when the rays do not trully
# intersect.
enpl=intersection(opaxis,enp[0])[0]
if (isnan(enpl)).all():
p1, p2, d, rv =nearest_points(opaxis,enp[0])
print"Warning: The optical axis does not intersect the principal ray at the entrance"
print "pupil. The minimum distance is:", d
enpl=(p1+p2)/2
#log.info("Calculating exit pupil location")
expl=intersection(oax[0],exp[0])[0]
if (isnan(expl)).all():
p1, p2, d, rv =nearest_points(oax[0],exp[0])
print"Warning: The optical axis does not intersect the principal ray at the exit"
print "pupil. The minimum distance is:", d
expl=(p1+p2)/2
return enpl,expl
def paraxial_location(opsys, opaxis):
"""Function to find the paraxial image location
This function finds the paraxial image location of a point
located in the optical axis, and a boolean indicating if the image
is real or virtual (image_location, real_virtual).
The origin of the opaxis location is taken as the object location
Parameters:
*opsys*
Optical system to use.
*opaxis*
Ray representating the optical axis
For this function to operate, the system should have a rotational symetry
around the optical axis.
"""
#log.info("Propagate Optical axis ray")
opsys.clear_ray_list()
opsys.reset()
#opsys.ray_add(cray)
opsys.ray_add(opaxis)
opsys.propagate()
# Calculate vectors perpendicular to the optical axis and to the XYZ axes
d=opaxis.dir
pv1= cross(d,(0,0,1))
pv2= cross(d,(0,1,0))
pv3= cross(d,(1,0,0))
pv=[pv1,pv2,pv3]
# Search for the longest pv
pvn=array((dot(pv1,pv1),dot(pv2,pv2),dot(pv3,pv3)))
pvm=pv[pvn.argmax()]
#log.info("Displacement vector found: "+str(pvm))
# Create paraxial ray
par_ray=opaxis.copy()
par_ray.dir=par_ray.dir+pvm*.001
opsys.clear_ray_list()
opsys.reset()
opsys.ray_add(par_ray)
opsys.propagate()
par=par_ray.get_final_rays(inc_zeros = False)
oax=opaxis.get_final_rays(inc_zeros = False)
#log.info("par="+str(par))
#log.info("oax="+str(oax))
if len(par)!=1 or len(oax)!=1:
raise Exception, "The paraxial ray or the optical axis ray have more"
" than one final ray"
#log.info("Calculating object location")
expl=intersection(oax[0],par[0])
return expl
def find_apperture(ccd, size=(50,50)):
'''Function to find a mask representing the apperture
This function returns a array containing 1's and 0's representing
the apperture shape. The apperture shape will be approximated from
the CCD hit_list
Attributes:
*ccd*
CCD object that will be used to get the shape information
*size*
Array shape
Note: Right now only works for round appertures.
'''
hl=ccd.hit_list
sx,sy=ccd.size
tx,ty=size
dx,dy=sx/(tx-1),sy/(ty-1)
CG= mgrid[float(-sx/2.):float(sx/2.+dx):float(dx),
float(-sy/2.):float(sy/2.+dy):float(dy)]
rm = sqrt(CG[0]**2+CG[1]**2)
maxr=0.
for i in hl:
X,Y,Z= i[0]
r=sqrt(X*X+Y*Y)
if maxr<r:
maxr=r
return where(rm<maxr,1.,0.)
def find_ppp(opsys, opaxis):
"""Function to find the primary principal plane location of a lens or an
optical component
Arguments:
opsys
Optical system or optical component whose principal planes are to be
found
opaxis
Ray defining the optical axis of the system
For this function to operate, the system should have a rotational symetry
around the optical axis.
Note:
This function is returns the intersection point of the optical axis and
the principal plane.
"""
# Create a system with the component
if isinstance(opsys,(Component)):
c=opsys
opsys=System(complist=[(c,(0,0,0),(0,0,0)),
],n=1)
# To create a ray parallel to the optical axis, find a displacement vector
# perpendicular to the optical axis, and to the XYZ axes
d=opaxis.dir
pv1= cross(d,(0,0,1))
pv2= cross(d,(0,1,0))
pv3= cross(d,(1,0,0))
pv=[pv1,pv2,pv3]
# Search for the longest pv
pvn=array((dot(pv1,pv1),dot(pv2,pv2),dot(pv3,pv3)))
pvm=pv[pvn.argmax()]
# Create parallel ray
par_ray=opaxis.copy()
par_ray.pos=par_ray.pos+pvm*.0001
opsys.clear_ray_list()
opsys.ray_add([opaxis, par_ray])
opsys.propagate()
par_ray_end=par_ray.get_final_rays(inc_zeros = False)
if len(par_ray_end)!=1:
raise Exception, "The paraxial ray has more than one final ray"
pppl=intersection(par_ray,par_ray_end[0])
#Move the intersection point toward the optical axis
ppp=pppl[0]-pvm*.0001
return ppp #, pppl[1])
def get_optical_path_ep(opsys, opaxis, raylist, stop=None, r=None):
"""Returns the optical path traveled by a ray up to the exit pupil
The optical path is measured from the ray origin until it crosses the
exit pupil of the system.
If a stop (aperture) is not given, the measurement is made up to the primary
principal plane.
Arguments:
opsys
Optical system under analisis
opaxis
Ray indicating the optical axis the origin of the optical axis, must be
the position of the object used in the image formation. This is needed
to be able to calculate the radius of the reference sphere.
raylist
List of rays that will be used to sample the optical path
stop
Apperture stop of the system. It must belong to opsys. In not given it
will be assumed that the exit pupil is at the primary principal plane.
r
If None, measure up to the exit pupil plane. If given, use a reference
sphere with a vertex coinciding with the optical vertex.
Return Value (hcl,opl,pc)
hcl
List containing the coordinates of the hits in the pupil coordinate
system.
opl
list containing the optical paths measured
pc
intersection point between the optical axis, and the pupil plane.
hcl[i] corresponds to opl[i]
Note: This method only works if the optical axis coincides with the Z axis.
This must be corrected.
"""
if stop != None:
enp,exp=pupil_location(opsys,stop,opaxis)
else:
exp= find_ppp(opsys, opaxis)
#Reset the system
opsys.clear_ray_list()
opsys.reset()
# Propagate the rays
#print "***", raylist
opsys.ray_add(raylist)
opsys.propagate()
#pf=PlotFrame(opsys=opsys)
rl=[]
l=[]
# Get the optical path up to the final element in the system
for i in raylist:
a=i.get_final_rays()
if a[0].intensity!=0:
# Reverse the rays to calculate the optical path from the final element
#to the exit pupil
nray=a[0].reverse()
rl.append(nray)
#TODO: This should not be done using the label
nray.label=str(a[0].optical_path_parent())
# Create a dummy system to calculate the wavefront at the exit pupil
if r==None:
#TODO: This ccd should be infinitely big. Have to see how this can be done
ccd=CCD(size=(1000,1000))
else:
ccds=Spherical(shape=Circular(radius=0.9*r), curvature=1./r)
ccd=Component(surflist=[(ccds, (0, 0, 0), (0, 0, 0)), ])
#print rl
dummy=System(complist=[(ccd,exp,(0,0,0)),
],n=1.)
#Calculate the optical path from the final element to the exit pupil plane
dummy.ray_add(rl)
dummy.propagate()
#PlotFrame(opsys=dummy)
hcl=[]
opl=[]
for ip,r in ccd.hit_list:
#print ip
x,y,z= ip
#TODO: This should not be done using the label
d= float(r.label)-r.optical_path()
hcl.append((x, y, z))
opl.append(d)
return (hcl, opl, exp)
#rv=bisplrep(X,Y,Z)
#data=bisplev(array(range(-20,20)),array(range(-20,20)),rv)
#data=(data-data.mean())
#print "Gaussian reference sphere radius =",sqrt(dot(impos-exp,impos-exp))
def find_reference_sphere_radius(ip, pl):
"""Find the radius os the reference sphere that best fits the input data.
This method asumes that the optical axis coincides with the z axis. This
means that the center of the sphere, has coordinates (0,0,r).
Attributes:
ip
list of the points where the optical path is measured, that are being
fitted. Each point is (XYZ) tuple. It can be also an array with a shape
n,3 where n is the numbre of points.
pl
List of path lengths. pl[i] corresponds to the point ip[i].
"""
ipa=array(ip)
pla=array(pl)
n, t=ipa.shape
# Find the point closest to the center of the apperture.
rm=sqrt(dot(ipa[0], ipa[0]))
im=0
for i in range (n):
if rm>sqrt(dot(ipa[i], ipa[i])):
rm=sqrt(dot(ipa[i], ipa[i]))
im=i
#Make the OPL 0 at the center of the aperture
pla=pla-pla[im]
#Encontrar el radio de la esfera de mejor ajuste
def F(z):
dist=pla-(sqrt(ipa[:, 0]**2+ipa[:, 1]**2+(ipa[:, 2]-z)**2)-z)
u=sqrt((dist**2).sum())
#print "*", u
#u=dist[-1]
#print u
return u
r=fsolve(F, -10.)
return r
def aux_paral_f(x):
"""
Auxiliary function needed in parallel propagate
"""
os,rb=x
os.ray_add(rb)
os.propagate()
return os
def parallel_propagate(os,r , np=None):
"""Perform a propagation of the rays in the system using all cores
present on a computer
os gets reset before beginning the propagation, so the only rays
used in the simulation are the rays given in r
Parameters
== ============================================================
os Optical system used in the simulation
r List containing the rays to propagate
np Number if processes used in the simulation. If not given use
one process per cpu
== ============================================================
"""
if np==None:
cpus=mp.cpu_count()
else:
cpus=np
pool=mp.Pool(cpus)
os.reset()
#Split the ray list in the number of CPUS
nr=len(r)
r_list=[]
r_list.append((os,r[:nr/cpus]))
for i in range(2,cpus):
r_list.append((os,r[(nr/cpus)*(i-1):(nr/cpus)*(i)]))
r_list.append((os,r[(nr/cpus)*(cpus-1):]))
osi=pool.map(aux_paral_f,r_list)
pool.close()
pool.join()
for osp in osi:
os.merge(osp)
return os
def aux_paral_f_ns(x):
"""
Auxiliary function needed in parallel propagate
"""
#os optical system
#rg guide ray
#dp Path (key) of the destination surface.
#rb rays to propagate
os,rg,dp,rb=x
os.ray_add(rb)
os.propagate_ray_ns(rg,dp)
return os
def parallel_propagate_ns(os,rg, dp, r, np=None):
"""Perform a propagation of the rays in the system using all cores
present on a computer
os gets reset before beginning the propagation, so the only rays
used in the simulation are the rays given in r
Parameters
== ============================================================
os Optical system used in the simulation
rg Guide ray
dp Destination path
r List containing the rays to propagate
np Number if processes used in the simulation. If not given use
one process per cpu
== ============================================================
"""
if np==None:
cpus=mp.cpu_count()
else:
cpus=np
pool=mp.Pool(cpus)
os.reset()
#Split the ray list in the number of CPUS
nr=len(r)
r_list=[]
r_list.append((os,rg,dp,r[:nr/cpus]))
for i in range(2,cpus):
#os,rg,dp,rb=x
r_list.append((os,rg,dp,r[(nr/cpus)*(i-1):(nr/cpus)*(i)]))
r_list.append((os,rg,dp,r[(nr/cpus)*(cpus-1):]))
osi=pool.map(aux_paral_f_ns,r_list)
pool.close()
pool.join()
for osp in osi:
os.merge(osp)
return os
def ray_paths(r):
'''
Return lists with all the possible paths traveled by the ray r.
r must be previously propagated in an optical system
When there are beam splitters, there is more than one path
'''
def rt(r):
l=[]
rays=r.childs
for ray in rays:
a=rt(ray)
for ray1 in a:
l.append([ray]+ray1)
if len(a)==0: l.append([ray])
return l
A=rt(r)
B=[]
for rp in A:
t=[r]+rp
B.append(t)
return B
|
coupdair/pyoptools
|
pyoptools/raytrace/calc/calc.py
|
Python
|
bsd-3-clause
| 24,577
|
[
"Gaussian"
] |
c7b7bb3941933e22a3d84161873efb22200ed3378a0482f0b49e84ead62862fb
|
# moosehandler.py ---
#
# Filename: moosehandler.py
# Description:
# Author: subhasis ray
# Maintainer:
# Created: Thu Jan 28 15:08:29 2010 (+0530)
# Version:
# Last-Updated: Fri Feb 25 11:18:27 2011 (+0530)
# By: Subhasis Ray
# Update #: 868
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
from __future__ import with_statement
import os
import sys
import random
from collections import defaultdict
import re
import xml.sax as sax
import xml.sax.handler as saxhandler
import xml.sax.xmlreader as saxreader
import xml.sax.saxutils as saxutils
from PyQt4 import QtCore
import moose
import config
from glclient import GLClient
## for python neuroml import
from moose import neuroml
class MooseXMLHandler(saxhandler.ContentHandler):
def __init__(self):
saxhandler.ContentHandler.__init__(self)
self.model_type = None
def startElement(self, name, attrs):
"""Signal the start of an element.
This method looks for neuroml/sbml tags to recognize the type of the model.
"""
if name == 'sbml':
self.model_type = MooseHandler.type_sbml
elif name == 'neuroml' or name == 'networkml':
self.model_type = MooseHandler.type_neuroml
else:
pass
class MooseHandler(QtCore.QObject):
"""Access to MOOSE functionalities"""
# A list keys for known filetypes Note that type_genesis includes
# kkit (both have same extension and we separate them only after
# looking for 'include kkit' statement inside the file. Similarly,
# both type_neuroml and type_sbml are of type_xml. We recognise
# the exact type only after looking inside the file.
type_genesis = 'GENESIS'
type_kkit = 'KKIT'
type_xml = 'XML'
type_neuroml = 'NEUROML'
type_sbml = 'SBML'
type_python = 'PYTHON'
# Map between file extension and known broad filetypes.
fileExtensionMap = {
'Genesis Script(*.g)': type_genesis,
'neuroML/SBML(*.xml *.nml *.bz2 *.zip *.gz)': type_xml,
'Python script(*.py)': type_python
}
DEFAULT_SIMDT = 2.5e-4
DEFAULT_PLOTDT = 2e-3
#DEFAULT_GLDT = 50e-3
DEFAULT_RUNTIME = 1.0
DEFAULT_PLOTUPDATE_DT = 1e-1
DEFAULT_SIMDT_KKIT = 0.1
DEFAULT_RUNTIME_KKIT = 100.0
DEFAULT_PLOTDT_KKIT = 1.0
DEFAULT_PLOTUPDATE_DT_KKIT = 5.0
DEFAULT_GLDT_KKIT = 5.0
simdt = DEFAULT_SIMDT
plotdt = DEFAULT_PLOTDT
#gldt = DEFAULT_GLDT
runtime = DEFAULT_RUNTIME
plotupdate_dt = DEFAULT_PLOTUPDATE_DT
def __init__(self):
QtCore.QObject.__init__(self)
self._context = moose.PyMooseBase.getContext()
self._lib = moose.Neutral('/library')
self._proto = moose.Neutral('/proto')
self._data = moose.Neutral('/data')
self._gl = moose.Neutral('/gl')
self._current_element = moose.Neutral('/')
self._xmlreader = sax.make_parser()
self._saxhandler = MooseXMLHandler()
self._xmlreader.setContentHandler(self._saxhandler)
self.fieldTableMap = {}
self._tableIndex = 0
self._tableSuffix = random.randint(1, 999)
self._connSrcObj = None
self._connDestObj = None
self._connSrcMsg = None
self._connDestMsg = None
# The follwoing maps for managing 3D visualization objects
self._portClientMap = {}
self._portPathMap = {}
self._pathPortMap = defaultdict(set)
self._portServerMap = {}
def getCurrentTime(self):
clock = moose.ClockJob('/sched/cj')
return clock.currentTime
def getCurrentElement(self):
return self._current_element
def runGenesisCommand(self, cmd):
"""Runs a GENESIS command and returns the output string"""
self._context.runG(cmd)
return 'In current PyMOOSE implementation running a GENESIS command does not return anything.'
def loadModel(self, filename, filetype, target='/'):
"""Load a model from file."""
directory = os.path.dirname(filename)
os.chdir(directory)
filename = os.path.basename(filename) # ideally this should not be required - but neuroML reader has a bug and gets a segmentation fault when given abosolute path.
moose.Property.addSimPath(directory)
if filetype == MooseHandler.type_genesis:
return self.loadGenesisModel(filename, target)
elif filetype == MooseHandler.type_xml:
return self.loadXMLModel(filename, target)
elif filetype == MooseHandler.type_python:
sys.path.append(directory)
return self.loadPythonScript(filename)
def loadGenesisModel(self, filename, target):
"""Load a model specified in a GENESIS Script.
If the file is a kinetikit model (the criterion is 'include
kkit' statement somewhere near the beginning of the file, it
returns MooseHandler.type_kkit.
Returns MooseHandler.type_genesis otherwise.
"""
filetype = MooseHandler.type_genesis
kkit_pattern = re.compile('include\s+kkit')
in_comment = False
with open(filename, 'r') as infile:
while True:
sentence = ''
in_sentence = False
line = infile.readline()
if not line:
break
line = line.strip()
# print '#', line
if line.find('//') == 0: # skip c++ style comments
# print 'c++ comment'
continue
comment_start = line.find('/*')
if comment_start >= 0:
in_comment = True
line = line[:comment_start]
in_sentence = line.endswith('\\')
while in_comment and line:
comment_end = line.find('*/')
if comment_end >= 0:
in_comment = False
sentence = line[comment_end+2:] # add the rest of the line to sentence
line = infile.readline()
line = line.strip()
while line and in_sentence:
sentence += line[:-1]
line = infile.readline()
if line:
line = line.strip()
in_sentence = line.endswith('\\')
if line:
sentence += line
iskkit = re.search(kkit_pattern, sentence)
# print iskkit, sentence
if iskkit:
filetype = MooseHandler.type_kkit
break
current = self._context.getCwe()
self._context.setCwe(target)
self._context.loadG(filename)
self._context.setCwe(current)
return filetype
def loadXMLModel(self, filename, target):
"""Load a model in some XML format.
Looks inside the XML to figure out if this is a neuroML or an
SBML file and calls the corresponding loader functions.
Currently only SBML and neuroML are support. In future 9ml
support will be provided as the specification becomes stable.
"""
with open(filename, 'r') as xmlfile:
for line in xmlfile:
self._xmlreader.feed(line)
if self._saxhandler.model_type is not None:
break
ret = self._saxhandler.model_type
self._saxhandler.model_type = None
self._xmlreader.reset()
if ret == MooseHandler.type_neuroml:
#self._context.readNeuroML(filename, target)
nmlReader = neuroml.NeuroML()
## I need to allow arbitrary targets,
## presently '/' by default - Aditya.
nmlReader.readNeuroMLFromFile(filename)
elif ret == MooseHandler.type_sbml:
self._context.readSBML(filename, target)
return ret
def loadPythonScript(self, filename):
"""Evaluate a python script."""
extension_start = filename.rfind('.py')
script = filename[:extension_start]
exec 'import %s' % (script)
def addFieldTable(self, full_field_path):
"""
adds a field to the list of fields to be plotted.
full_field_path -- complete path to the field.
"""
try:
table = self.fieldTableMap[full_field_path]
except KeyError:
fstart = full_field_path.rfind('/')
fieldName = full_field_path[fstart+1:]
objPath = full_field_path[:fstart]
# tableName = '%s_%d_%d' % (fieldName, self._tableSuffix, self._tableIndex)
tableName = full_field_path[1:].replace('/', '_')
#print "###",full_field_path,names,tableName
table = moose.Table(tableName, self._data)
self.fieldTableMap[full_field_path] = table
table.stepMode = 3
target = moose.Neutral(objPath)
connected = table.connect('inputRequest', target, fieldName)
config.LOGGER.info('Connected %s to %s/%s' % (table.path, target.path, fieldName))
self._tableIndex += 1
return table
#def doReset(self, simdt, plotdt, gldt, plotupdate_dt):
def doReset(self, simdt, plotdt, plotupdate_dt):
"""Reset moose.
simdt -- dt for simulation (step size for numerical
methods. Clock tick 0, 1 and 2 will have this dt.
plotdt -- time interval for recording data.
gldt -- time interval for OpenGL display.
We put all the table objects under /data on clock 3, all the
GLcell and GLview objects under /gl on clock 4.
"""
self._context.setClock(0, simdt)
self._context.setClock(1, simdt)
self._context.setClock(2, simdt)
self._context.setClock(3, plotdt)
#self._context.setClock(4, gldt)
self._context.useClock(3, self._data.path + '/##[TYPE=Table]')
#self._context.useClock(4, self._gl.path + '/##[TYPE=GLcell]')
#self._context.useClock(4, self._gl.path + '/##[TYPE=GLview]')
MooseHandler.simdt = simdt
MooseHandler.plotdt = plotdt
#MooseHandler.gldt = gldt
MooseHandler.plotupdate_dt = plotupdate_dt
self._context.reset()
def doRun(self, time):
"""Just runs the simulation.
If time is float, it is absolute time in seconds.
If an integer, it is the number of time steps.
"""
MooseHandler.runtime = time
next_stop = MooseHandler.plotupdate_dt
while next_stop <= MooseHandler.runtime:
self._context.step(MooseHandler.plotupdate_dt)
next_stop = next_stop + MooseHandler.plotupdate_dt
self.emit(QtCore.SIGNAL('updatePlots(float)'), self._context.getCurrentTime())
time_left = MooseHandler.runtime + MooseHandler.plotupdate_dt - next_stop
if MooseHandler.runtime < MooseHandler.plotupdate_dt:
time_left = MooseHandler.runtime
self._context.step(time_left)
self.emit(QtCore.SIGNAL('updatePlots(float)'), self._context.getCurrentTime())
#def doResetAndRun(self, runtime, simdt=None, plotdt=None, gldt=None, plotupdate_dt=None):
def doResetAndRun(self, runtime, simdt=None, plotdt=None, plotupdate_dt=None):
"""Reset and run the simulation.
This is to replace separate reset and run methods as two
separate steps to run a simulation is awkward for the
end-user.
"""
if simdt is not None and isinstance(simdt, float):
MooseHandler.simdt = simdt
if plotdt is not None and isinstance(plotdt, float):
MooseHandler.plotdt_err = plotdt
#if gldt is not None and isinstance(gldt, float):
# MooseHandler.gldt = gldt
if plotupdate_dt is not None and isinstance(plotupdate_dt, float):
MooseHandler.plotupdate_dt = plotupdate_dt
if runtime is not None and isinstance(runtime, float):
MooseHandler.runtime = runtime
self._context.setClock(0, MooseHandler.simdt)
self._context.setClock(1, MooseHandler.simdt)
self._context.setClock(2, MooseHandler.simdt)
self._context.setClock(3, MooseHandler.plotdt)
#self._context.setClock(4, MooseHandler.gldt)
self._context.useClock(3, self._data.path + '/##[TYPE=Table]')
if self._context.exists('/graphs'):
self._context.useClock(3, '/graphs/##[TYPE=Table]')
if self._context.exists('/moregraphs'):
self._context.useClock(3, '/moregraphs/##[TYPE=Table]')
#self._context.useClock(4, self._gl.path + '/##[TYPE=GLcell]')
#self._context.useClock(4, self._gl.path + '/##[TYPE=GLview]')
self._context.reset()
MooseHandler.runtime = runtime
next_stop = MooseHandler.plotupdate_dt
while next_stop <= MooseHandler.runtime:
self._context.step(MooseHandler.plotupdate_dt)
next_stop = next_stop + MooseHandler.plotupdate_dt
self.emit(QtCore.SIGNAL('updatePlots(float)'), self._context.getCurrentTime())
time_left = MooseHandler.runtime + MooseHandler.plotupdate_dt - next_stop
if MooseHandler.runtime < MooseHandler.plotupdate_dt:
time_left = MooseHandler.runtime
self._context.step(time_left)
self.emit(QtCore.SIGNAL('updatePlots(float)'), self._context.getCurrentTime())
def doConnect(self):
ret = False
if self._connSrcObj and self._connDestObj and self._connSrcMsg and self._connDestMsg:
ret = self._connSrcObj.connect(self._connSrcMsg, self._connDestObj, self._connDestMsg)
# print 'Connected %s/%s to %s/%s: ' % (self._connSrcObj.path, self._connSrcMsg, self._connDestObj.path, self._connDestMsg), ret
self._connSrcObj = None
self._connDestObj = None
self._connSrcMsg = None
self._connDestMsg = None
return ret
def setConnSrc(self, fieldPath):
pos = fieldPath.rfind('/')
moosePath = fieldPath[:pos]
field = fieldPath[pos+1:]
self._connSrcObj = moose.Neutral(moosePath)
self._connSrcMsg = field
def setConnDest(self, fieldPath):
pos = fieldPath.rfind('/')
moosePath = fieldPath[:pos]
field = fieldPath[pos+1:]
self._connDestObj = moose.Neutral(moosePath)
self._connDestMsg = field
def getSrcFields(self, mooseObj):
srcFields = self._context.getFieldList(mooseObj.id, moose.FTYPE_SOURCE)
sharedFields = self._context.getFieldList(mooseObj.id, moose.FTYPE_SHARED)
ret = []
for field in srcFields:
ret.append(field)
for field in sharedFields:
ret.append(field)
return ret
def getDestFields(self, mooseObj):
destFields = self._context.getFieldList(mooseObj.id, moose.FTYPE_DEST)
sharedFields = self._context.getFieldList(mooseObj.id, moose.FTYPE_SHARED)
ret = []
for field in destFields:
ret.append(field)
for field in sharedFields:
ret.append(field)
return ret
def makeGLCell(self, mooseObjPath, port, field=None, threshold=None, lowValue=None, highValue=None, vscale=None, bgColor=None, sync=None):
"""Make a GLcell instance.
mooseObjPath -- path of the moose object to be monitored
port -- string representation of the port number for the client.
field -- name of the field to be observed. Vm by default.
threshold -- the % change in the field value that will be
taken up for visualization. 1% by default.
highValue -- value represented by the last line of the
colourmap file. Any value of the field above this will be
represented by the colour corresponding to this value.
lowValue -- value represented by the first line of the
colourmap file. Any value of the field below this will be
represented by the colour corresponding to this value.
vscale -- Scaling of thickness for visualization of very thin
compartments.
bgColor -- background colour of the visualization window.
sync -- Run simulation in sync with the visualization. If on,
it may slowdown the simulation.
"""
print 'Parameter types:', 'port:', type(port), 'field:', type(field), 'threshold:', type(threshold), 'highValue:', type(highValue), highValue, 'lowValue:', type(lowValue), 'vscale:', type(vscale), 'bgColor:', type(bgColor), 'sync:', type(sync)
print 'Background colour:', bgColor
if not self._context.exists(mooseObjPath):
return None
glCellPath = mooseObjPath.replace('/', '_') + str(random.randint(0,999))
glCell = moose.GLcell(glCellPath, self._gl)
glCell.useClock(4)
glCell.vizpath = mooseObjPath
glCell.port = port
self._portPathMap[port] = mooseObjPath
self._pathPortMap[mooseObjPath].add(port)
self._portServerMap[port] = glCell
if field is not None:
glCell.attribute = field
if threshold is not None and isinstance(threshold, float):
glCell.threhold = threshold
if highValue is not None and isinstance(highValue, float):
glCell.highvalue = highValue
if lowValue is not None and isinstance(lowValue, float):
glCell.lowvalue = lowValue
if vscale is not None and isinstance(vscale, float):
glCell.vscale = vscale
if bgColor is not None:
glCell.bgcolor = bgColor
if sync is not None:
glCell.sync = sync
print 'Created GLCell for object', mooseObjPath, ' on port', port
return glCell
def makeGLView(self, mooseObjPath, wildcard, port, fieldList, minValueList, maxValueList, colorFieldIndex, morphFieldIndex=None, grid=None, bgColor=None, sync=None):
"""
Make a GLview object to visualize some field of a bunch of
moose elements.
mooseObjPath -- GENESIS-style path for elements to be
observed.
wildcard -- for selecting sub elements to be viewed
port -- port to use for communicating with the client.
fieldList -- list of fields to be observed.
minValueList -- minimum value for fields in fieldList.
maxValueList -- maximum value for fields in fieldList.
colorFieldIndex -- index of the field to be represented by the
colour of the 3-D shapes in visualization.
morphFieldIndex -- index of the field to be represented by the
size of the 3D shape.
grid -- whether to put the 3D shapes in a grid or to use the
x, y, z coordinates in the objects for positioning them in
space.
bgColor -- background colour of visualization window.
sync -- synchronize simulation with visualization.
"""
if not self._context.exists(mooseObjPath):
return None
glViewPath = mooseObjPath.replace('/', '_') + str(random.randint(0,999))
print 'Created GLView object', glViewPath
glView = moose.GLview(glViewPath, self._gl)
glView.useClock(4)
self._portPathMap[port] = mooseObjPath
self._pathPortMap[mooseObjPath].add(port)
self._portServerMap[port] = glView
glView.vizpath = mooseObjPath
if wildcard:
glView.vizpath = glView.vizpath + '/' + wildcard
print 'Set vizpath to', mooseObjPath
print 'client Port: ', port
glView.port = port
if len(fieldList) > 5:
fieldList = fieldList[:5]
for ii in range(len(fieldList)):
visField = 'value%d' % (ii+1)
if (not fieldList[ii]) or (len(fieldList[ii].strip()) == 0):
continue
setattr(glView, visField, fieldList[ii])
try:
if isinstance(minValueList[ii], float):
setattr(glView, 'value%dmin' % (ii+1), minValueList[ii])
if isinstance(maxValueList[ii], float):
setattr(glView, 'value%dmax' % (ii+1), maxValueList[ii])
except IndexError:
break
glView.color_val = int(colorFieldIndex)
if morphFieldIndex is not None:
glView.morph_val = int(morphFieldIndex)
if grid and (grid != 'off'):
glView.grid = 'on'
if bgColor is not None:
glView.bgcolor = bgColor
if sync and (sync != 'off'):
glView.sync = 'on'
print 'Created GLView', glView.path, 'for object', mooseObjPath, ' on port', port
return glView
def startGLClient(self, executable, port, mode, colormap):
"""Start the glclient subprocess.
executable -- path to the client program.
port -- network port used to communicate with the server.
mode -- The kind of moose GL-element to interact with (glcell or
glview)
colormap -- path to colormap file for 3D rendering.
"""
client = GLClient(executable, port, mode, colormap)
ret = client.child.poll()
if ret is not None: # The child terminated immediately
print 'Child process exited with return code:', ret
return None
self._portClientMap[port] = client
print 'Created GLclient on port', port
return client
def stopGLClientOnPort(self, port):
"""Stop the glclient process listening to the specified
port."""
try:
client = self._portClientMap.pop(port)
client.stop()
except KeyError:
config.LOGGER.error('%s: port not used by any client' % (port))
def stopGLClientsOnObject(self, mooseObject):
"""Stop the glclient processes listening to glcell/glview
objects on the specified moose object."""
path = mooseObject.path
try:
portSet = self._pathPortMap.pop(path)
for port in portSet:
self.stopGLClientOnPort(port)
except KeyError:
config.LOGGER.error('%s: no 3D visualization clients for this object.' % (path))
def stopGL(self):
"""Make the dt of the clock on GLview and GLcell objects very
large. Kill all the GLClient processes"""
self._context.setClock(4, 1e10)
for port, client in self._portClientMap.items():
client.stop()
def getKKitGraphs(self):
tableList = []
for container in moose.Neutral('/graphs').children():
for child in moose.Neutral(container).children():
if moose.Neutral(child).className == 'Table':
tableList.append(moose.Table(child))
return tableList
def getKKitMoreGraphs(self):
tableList = []
for container in moose.Neutral('/moregraphs').children():
for child in moose.Neutral(container).children():
if moose.Neutral(child).className == 'Table':
tableList.append(moose.Table(child))
return tableList
def getDataTables(self):
tableList = []
for table in self._data.children():
if moose.Neutral(table).className == 'Table':
tableList.append(moose.Table(table))
return tableList
#
# moosehandler.py ends here
|
BhallaLab/moose-thalamocortical
|
pymoose/gui/qt/moosehandler.py
|
Python
|
lgpl-2.1
| 24,389
|
[
"MOOSE"
] |
deb77c56d448070b64cd93b1804dfc13ea5dfb129ad702d050ffaf60db2a9c7d
|
"""Fork of urllib2.
When reading this, don't assume that all code in here is reachable. Code in
the rest of mechanize may be used instead.
Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Python
Software Foundation; All Rights Reserved
Copyright 2002-2009 John J Lee <jjl@pobox.com>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
# XXX issues:
# If an authentication error handler that tries to perform
# authentication for some reason but fails, how should the error be
# signalled? The client needs to know the HTTP error code. But if
# the handler knows that the problem was, e.g., that it didn't know
# that hash algo that requested in the challenge, it would be good to
# pass that information along to the client, too.
# ftp errors aren't handled cleanly
# check digest against correct (i.e. non-apache) implementation
# Possible extensions:
# complex proxies XXX not sure what exactly was meant by this
# abstract factory for opener
import copy
import base64
import httplib
import mimetools
import logging
import os
import posixpath
import random
import re
import socket
import sys
import time
import urllib
import urlparse
import bisect
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import hashlib
except ImportError:
# python 2.4
import md5
import sha
def sha1_digest(bytes):
return sha.new(bytes).hexdigest()
def md5_digest(bytes):
return md5.new(bytes).hexdigest()
else:
def sha1_digest(bytes):
return hashlib.sha1(bytes).hexdigest()
def md5_digest(bytes):
return hashlib.md5(bytes).hexdigest()
try:
socket._fileobject("fake socket", close=True)
except TypeError:
# python <= 2.4
create_readline_wrapper = socket._fileobject
else:
def create_readline_wrapper(fh):
return socket._fileobject(fh, close=True)
# python 2.4 splithost has a bug in empty path component case
_hostprog = None
def splithost(url):
"""splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
global _hostprog
if _hostprog is None:
import re
_hostprog = re.compile('^//([^/?]*)(.*)$')
match = _hostprog.match(url)
if match: return match.group(1, 2)
return None, url
from urllib import (unwrap, unquote, splittype, quote,
addinfourl, splitport,
splitattr, ftpwrapper, splituser, splitpasswd, splitvalue)
# support for FileHandler, proxies via environment variables
from urllib import localhost, url2pathname, getproxies
from urllib2 import HTTPError, URLError
import _request
import _rfc3986
import _sockettimeout
from _clientcookie import CookieJar
from _response import closeable_response
# used in User-Agent header sent
__version__ = sys.version[:3]
_opener = None
def urlopen(url, data=None, timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
global _opener
if _opener is None:
_opener = build_opener()
return _opener.open(url, data, timeout)
def install_opener(opener):
global _opener
_opener = opener
# copied from cookielib.py
_cut_port_re = re.compile(r":\d+$")
def request_host(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.get_full_url()
host = urlparse.urlparse(url)[1]
if host == "":
host = request.get_header("Host", "")
# remove port, if present
host = _cut_port_re.sub("", host, 1)
return host.lower()
class Request:
def __init__(self, url, data=None, headers={},
origin_req_host=None, unverifiable=False):
# unwrap('<URL:type://host/path>') --> 'type://host/path'
self.__original = unwrap(url)
self.type = None
# self.__r_type is what's left after doing the splittype
self.host = None
self.port = None
self._tunnel_host = None
self.data = data
self.headers = {}
for key, value in headers.items():
self.add_header(key, value)
self.unredirected_hdrs = {}
if origin_req_host is None:
origin_req_host = request_host(self)
self.origin_req_host = origin_req_host
self.unverifiable = unverifiable
def __getattr__(self, attr):
# XXX this is a fallback mechanism to guard against these
# methods getting called in a non-standard order. this may be
# too complicated and/or unnecessary.
# XXX should the __r_XXX attributes be public?
if attr[:12] == '_Request__r_':
name = attr[12:]
if hasattr(Request, 'get_' + name):
getattr(self, 'get_' + name)()
return getattr(self, attr)
raise AttributeError, attr
def get_method(self):
if self.has_data():
return "POST"
else:
return "GET"
# XXX these helper methods are lame
def add_data(self, data):
self.data = data
def has_data(self):
return self.data is not None
def get_data(self):
return self.data
def get_full_url(self):
return self.__original
def get_type(self):
if self.type is None:
self.type, self.__r_type = splittype(self.__original)
if self.type is None:
raise ValueError, "unknown url type: %s" % self.__original
return self.type
def get_host(self):
if self.host is None:
self.host, self.__r_host = splithost(self.__r_type)
if self.host:
self.host = unquote(self.host)
return self.host
def get_selector(self):
scheme, authority, path, query, fragment = _rfc3986.urlsplit(
self.__r_host)
if path == "":
path = "/" # RFC 2616, section 3.2.2
fragment = None # RFC 3986, section 3.5
return _rfc3986.urlunsplit([scheme, authority, path, query, fragment])
def set_proxy(self, host, type):
orig_host = self.get_host()
if self.get_type() == 'https' and not self._tunnel_host:
self._tunnel_host = orig_host
else:
self.type = type
self.__r_host = self.__original
self.host = host
def has_proxy(self):
"""Private method."""
# has non-HTTPS proxy
return self.__r_host == self.__original
def get_origin_req_host(self):
return self.origin_req_host
def is_unverifiable(self):
return self.unverifiable
def add_header(self, key, val):
# useful for something like authentication
self.headers[key.capitalize()] = val
def add_unredirected_header(self, key, val):
# will not be added to a redirected request
self.unredirected_hdrs[key.capitalize()] = val
def has_header(self, header_name):
return (header_name in self.headers or
header_name in self.unredirected_hdrs)
def get_header(self, header_name, default=None):
return self.headers.get(
header_name,
self.unredirected_hdrs.get(header_name, default))
def header_items(self):
hdrs = self.unredirected_hdrs.copy()
hdrs.update(self.headers)
return hdrs.items()
class OpenerDirector:
def __init__(self):
client_version = "Python-urllib/%s" % __version__
self.addheaders = [('User-agent', client_version)]
# manage the individual handlers
self.handlers = []
self.handle_open = {}
self.handle_error = {}
self.process_response = {}
self.process_request = {}
def add_handler(self, handler):
if not hasattr(handler, "add_parent"):
raise TypeError("expected BaseHandler instance, got %r" %
type(handler))
added = False
for meth in dir(handler):
if meth in ["redirect_request", "do_open", "proxy_open"]:
# oops, coincidental match
continue
i = meth.find("_")
protocol = meth[:i]
condition = meth[i+1:]
if condition.startswith("error"):
j = condition.find("_") + i + 1
kind = meth[j+1:]
try:
kind = int(kind)
except ValueError:
pass
lookup = self.handle_error.get(protocol, {})
self.handle_error[protocol] = lookup
elif condition == "open":
kind = protocol
lookup = self.handle_open
elif condition == "response":
kind = protocol
lookup = self.process_response
elif condition == "request":
kind = protocol
lookup = self.process_request
else:
continue
handlers = lookup.setdefault(kind, [])
if handlers:
bisect.insort(handlers, handler)
else:
handlers.append(handler)
added = True
if added:
# the handlers must work in an specific order, the order
# is specified in a Handler attribute
bisect.insort(self.handlers, handler)
handler.add_parent(self)
def close(self):
# Only exists for backwards compatibility.
pass
def _call_chain(self, chain, kind, meth_name, *args):
# Handlers raise an exception if no one else should try to handle
# the request, or return None if they can't but another handler
# could. Otherwise, they return the response.
handlers = chain.get(kind, ())
for handler in handlers:
func = getattr(handler, meth_name)
result = func(*args)
if result is not None:
return result
def _open(self, req, data=None):
result = self._call_chain(self.handle_open, 'default',
'default_open', req)
if result:
return result
protocol = req.get_type()
result = self._call_chain(self.handle_open, protocol, protocol +
'_open', req)
if result:
return result
return self._call_chain(self.handle_open, 'unknown',
'unknown_open', req)
def error(self, proto, *args):
if proto in ('http', 'https'):
# XXX http[s] protocols are special-cased
dict = self.handle_error['http'] # https is not different than http
proto = args[2] # YUCK!
meth_name = 'http_error_%s' % proto
http_err = 1
orig_args = args
else:
dict = self.handle_error
meth_name = proto + '_error'
http_err = 0
args = (dict, proto, meth_name) + args
result = self._call_chain(*args)
if result:
return result
if http_err:
args = (dict, 'default', 'http_error_default') + orig_args
return self._call_chain(*args)
# XXX probably also want an abstract factory that knows when it makes
# sense to skip a superclass in favor of a subclass and when it might
# make sense to include both
def build_opener(*handlers):
"""Create an opener object from a list of handlers.
The opener will use several default handlers, including support
for HTTP, FTP and when applicable, HTTPS.
If any of the handlers passed as arguments are subclasses of the
default handlers, the default handlers will not be used.
"""
import types
def isclass(obj):
return isinstance(obj, (types.ClassType, type))
opener = OpenerDirector()
default_classes = [ProxyHandler, UnknownHandler, HTTPHandler,
HTTPDefaultErrorHandler, HTTPRedirectHandler,
FTPHandler, FileHandler, HTTPErrorProcessor]
if hasattr(httplib, 'HTTPS'):
default_classes.append(HTTPSHandler)
skip = set()
for klass in default_classes:
for check in handlers:
if isclass(check):
if issubclass(check, klass):
skip.add(klass)
elif isinstance(check, klass):
skip.add(klass)
for klass in skip:
default_classes.remove(klass)
for klass in default_classes:
opener.add_handler(klass())
for h in handlers:
if isclass(h):
h = h()
opener.add_handler(h)
return opener
class BaseHandler:
handler_order = 500
def add_parent(self, parent):
self.parent = parent
def close(self):
# Only exists for backwards compatibility
pass
def __lt__(self, other):
if not hasattr(other, "handler_order"):
# Try to preserve the old behavior of having custom classes
# inserted after default ones (works only for custom user
# classes which are not aware of handler_order).
return True
return self.handler_order < other.handler_order
class HTTPErrorProcessor(BaseHandler):
"""Process HTTP error responses.
The purpose of this handler is to to allow other response processors a
look-in by removing the call to parent.error() from
AbstractHTTPHandler.
For non-2xx error codes, this just passes the job on to the
Handler.<proto>_error_<code> methods, via the OpenerDirector.error method.
Eventually, HTTPDefaultErrorHandler will raise an HTTPError if no other
handler handles the error.
"""
handler_order = 1000 # after all other processors
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
# According to RFC 2616, "2xx" code indicates that the client's
# request was successfully received, understood, and accepted.
if not (200 <= code < 300):
# hardcoded http is NOT a bug
response = self.parent.error(
'http', request, response, code, msg, hdrs)
return response
https_response = http_response
class HTTPDefaultErrorHandler(BaseHandler):
def http_error_default(self, req, fp, code, msg, hdrs):
# why these error methods took the code, msg, headers args in the first
# place rather than a response object, I don't know, but to avoid
# multiple wrapping, we're discarding them
if isinstance(fp, HTTPError):
response = fp
else:
response = HTTPError(
req.get_full_url(), code, msg, hdrs, fp)
assert code == response.code
assert msg == response.msg
assert hdrs == response.hdrs
raise response
class HTTPRedirectHandler(BaseHandler):
# maximum number of redirections to any single URL
# this is needed because of the state that cookies introduce
max_repeats = 4
# maximum total number of redirections (regardless of URL) before
# assuming we're in a loop
max_redirections = 10
# Implementation notes:
# To avoid the server sending us into an infinite loop, the request
# object needs to track what URLs we have already seen. Do this by
# adding a handler-specific attribute to the Request object. The value
# of the dict is used to count the number of times the same URL has
# been visited. This is needed because visiting the same URL twice
# does not necessarily imply a loop, thanks to state introduced by
# cookies.
# Always unhandled redirection codes:
# 300 Multiple Choices: should not handle this here.
# 304 Not Modified: no need to handle here: only of interest to caches
# that do conditional GETs
# 305 Use Proxy: probably not worth dealing with here
# 306 Unused: what was this for in the previous versions of protocol??
def redirect_request(self, req, fp, code, msg, headers, newurl):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a
redirection response is received. If a redirection should
take place, return a new Request to allow http_error_30x to
perform the redirect. Otherwise, raise HTTPError if no-one
else should try to handle this url. Return None if you can't
but another Handler might.
"""
m = req.get_method()
if (code in (301, 302, 303, 307, "refresh") and m in ("GET", "HEAD")
or code in (301, 302, 303, "refresh") and m == "POST"):
# Strictly (according to RFC 2616), 301 or 302 in response
# to a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib2, in this case). In practice,
# essentially all clients do redirect in this case, so we do
# the same.
# TODO: really refresh redirections should be visiting; tricky to fix
new = _request.Request(
newurl,
headers=req.headers,
origin_req_host=req.get_origin_req_host(),
unverifiable=True,
visit=False,
timeout=req.timeout)
new._origin_req = getattr(req, "_origin_req", req)
return new
else:
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
if 'location' in headers:
newurl = headers.getheaders('location')[0]
elif 'uri' in headers:
newurl = headers.getheaders('uri')[0]
else:
return
newurl = _rfc3986.clean_url(newurl, "latin-1")
newurl = _rfc3986.urljoin(req.get_full_url(), newurl)
# XXX Probably want to forget about the state of the current
# request, although that might interact poorly with other
# handlers that also use handler-specific request attributes
new = self.redirect_request(req, fp, code, msg, headers, newurl)
if new is None:
return
# loop detection
# .redirect_dict has a key url if url was previously visited.
if hasattr(req, 'redirect_dict'):
visited = new.redirect_dict = req.redirect_dict
if (visited.get(newurl, 0) >= self.max_repeats or
len(visited) >= self.max_redirections):
raise HTTPError(req.get_full_url(), code,
self.inf_msg + msg, headers, fp)
else:
visited = new.redirect_dict = req.redirect_dict = {}
visited[newurl] = visited.get(newurl, 0) + 1
# Don't close the fp until we are sure that we won't use it
# with HTTPError.
fp.read()
fp.close()
return self.parent.open(new)
http_error_301 = http_error_303 = http_error_307 = http_error_302
http_error_refresh = http_error_302
inf_msg = "The HTTP server returned a redirect error that would " \
"lead to an infinite loop.\n" \
"The last 30x error message was:\n"
def _parse_proxy(proxy):
"""Return (scheme, user, password, host/port) given a URL or an authority.
If a URL is supplied, it must have an authority (host:port) component.
According to RFC 3986, having an authority component means the URL must
have two slashes after the scheme:
>>> _parse_proxy('file:/ftp.example.com/')
Traceback (most recent call last):
ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
The first three items of the returned tuple may be None.
Examples of authority parsing:
>>> _parse_proxy('proxy.example.com')
(None, None, None, 'proxy.example.com')
>>> _parse_proxy('proxy.example.com:3128')
(None, None, None, 'proxy.example.com:3128')
The authority component may optionally include userinfo (assumed to be
username:password):
>>> _parse_proxy('joe:password@proxy.example.com')
(None, 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('joe:password@proxy.example.com:3128')
(None, 'joe', 'password', 'proxy.example.com:3128')
Same examples, but with URLs instead:
>>> _parse_proxy('http://proxy.example.com/')
('http', None, None, 'proxy.example.com')
>>> _parse_proxy('http://proxy.example.com:3128/')
('http', None, None, 'proxy.example.com:3128')
>>> _parse_proxy('http://joe:password@proxy.example.com/')
('http', 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('http://joe:password@proxy.example.com:3128')
('http', 'joe', 'password', 'proxy.example.com:3128')
Everything after the authority is ignored:
>>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128')
('ftp', 'joe', 'password', 'proxy.example.com')
Test for no trailing '/' case:
>>> _parse_proxy('http://joe:password@proxy.example.com')
('http', 'joe', 'password', 'proxy.example.com')
"""
scheme, r_scheme = splittype(proxy)
if not r_scheme.startswith("/"):
# authority
scheme = None
authority = proxy
else:
# URL
if not r_scheme.startswith("//"):
raise ValueError("proxy URL with no authority: %r" % proxy)
# We have an authority, so for RFC 3986-compliant URLs (by ss 3.
# and 3.3.), path is empty or starts with '/'
end = r_scheme.find("/", 2)
if end == -1:
end = None
authority = r_scheme[2:end]
userinfo, hostport = splituser(authority)
if userinfo is not None:
user, password = splitpasswd(userinfo)
else:
user = password = None
return scheme, user, password, hostport
class ProxyHandler(BaseHandler):
# Proxies must be in front
handler_order = 100
def __init__(self, proxies=None, proxy_bypass=None):
if proxies is None:
proxies = getproxies()
assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
self.proxies = proxies
for type, url in proxies.items():
setattr(self, '%s_open' % type,
lambda r, proxy=url, type=type, meth=self.proxy_open: \
meth(r, proxy, type))
if proxy_bypass is None:
proxy_bypass = urllib.proxy_bypass
self._proxy_bypass = proxy_bypass
def proxy_open(self, req, proxy, type):
orig_type = req.get_type()
proxy_type, user, password, hostport = _parse_proxy(proxy)
if proxy_type is None:
proxy_type = orig_type
if req.get_host() and self._proxy_bypass(req.get_host()):
return None
if user and password:
user_pass = '%s:%s' % (unquote(user), unquote(password))
creds = base64.b64encode(user_pass).strip()
req.add_header('Proxy-authorization', 'Basic ' + creds)
hostport = unquote(hostport)
req.set_proxy(hostport, proxy_type)
if orig_type == proxy_type or orig_type == 'https':
# let other handlers take care of it
return None
else:
# need to start over, because the other handlers don't
# grok the proxy's URL type
# e.g. if we have a constructor arg proxies like so:
# {'http': 'ftp://proxy.example.com'}, we may end up turning
# a request for http://acme.example.com/a into one for
# ftp://proxy.example.com/a
return self.parent.open(req)
class HTTPPasswordMgr:
def __init__(self):
self.passwd = {}
def add_password(self, realm, uri, user, passwd):
# uri could be a single URI or a sequence
if isinstance(uri, basestring):
uri = [uri]
if not realm in self.passwd:
self.passwd[realm] = {}
for default_port in True, False:
reduced_uri = tuple(
[self.reduce_uri(u, default_port) for u in uri])
self.passwd[realm][reduced_uri] = (user, passwd)
def find_user_password(self, realm, authuri):
domains = self.passwd.get(realm, {})
for default_port in True, False:
reduced_authuri = self.reduce_uri(authuri, default_port)
for uris, authinfo in domains.iteritems():
for uri in uris:
if self.is_suburi(uri, reduced_authuri):
return authinfo
return None, None
def reduce_uri(self, uri, default_port=True):
"""Accept authority or URI and extract only the authority and path."""
# note HTTP URLs do not have a userinfo component
parts = urlparse.urlsplit(uri)
if parts[1]:
# URI
scheme = parts[0]
authority = parts[1]
path = parts[2] or '/'
else:
# host or host:port
scheme = None
authority = uri
path = '/'
host, port = splitport(authority)
if default_port and port is None and scheme is not None:
dport = {"http": 80,
"https": 443,
}.get(scheme)
if dport is not None:
authority = "%s:%d" % (host, dport)
return authority, path
def is_suburi(self, base, test):
"""Check if test is below base in a URI tree
Both args must be URIs in reduced form.
"""
if base == test:
return True
if base[0] != test[0]:
return False
common = posixpath.commonprefix((base[1], test[1]))
if len(common) == len(base[1]):
return True
return False
class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr):
def find_user_password(self, realm, authuri):
user, password = HTTPPasswordMgr.find_user_password(self, realm,
authuri)
if user is not None:
return user, password
return HTTPPasswordMgr.find_user_password(self, None, authuri)
class AbstractBasicAuthHandler:
# XXX this allows for multiple auth-schemes, but will stupidly pick
# the last one with a realm specified.
# allow for double- and single-quoted realm values
# (single quotes are a violation of the RFC, but appear in the wild)
rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+'
'realm=(["\'])(.*?)\\2', re.I)
# XXX could pre-emptively send auth info already accepted (RFC 2617,
# end of section 2, and section 1.2 immediately after "credentials"
# production).
def __init__(self, password_mgr=None):
if password_mgr is None:
password_mgr = HTTPPasswordMgr()
self.passwd = password_mgr
self.add_password = self.passwd.add_password
def http_error_auth_reqed(self, authreq, host, req, headers):
# host may be an authority (without userinfo) or a URL with an
# authority
# XXX could be multiple headers
authreq = headers.get(authreq, None)
if authreq:
mo = AbstractBasicAuthHandler.rx.search(authreq)
if mo:
scheme, quote, realm = mo.groups()
if scheme.lower() == 'basic':
return self.retry_http_basic_auth(host, req, realm)
def retry_http_basic_auth(self, host, req, realm):
user, pw = self.passwd.find_user_password(realm, host)
if pw is not None:
raw = "%s:%s" % (user, pw)
auth = 'Basic %s' % base64.b64encode(raw).strip()
if req.headers.get(self.auth_header, None) == auth:
return None
newreq = copy.copy(req)
newreq.add_header(self.auth_header, auth)
newreq.visit = False
return self.parent.open(newreq)
else:
return None
class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
auth_header = 'Authorization'
def http_error_401(self, req, fp, code, msg, headers):
url = req.get_full_url()
return self.http_error_auth_reqed('www-authenticate',
url, req, headers)
class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
auth_header = 'Proxy-authorization'
def http_error_407(self, req, fp, code, msg, headers):
# http_error_auth_reqed requires that there is no userinfo component in
# authority. Assume there isn't one, since urllib2 does not (and
# should not, RFC 3986 s. 3.2.1) support requests for URLs containing
# userinfo.
authority = req.get_host()
return self.http_error_auth_reqed('proxy-authenticate',
authority, req, headers)
def randombytes(n):
"""Return n random bytes."""
# Use /dev/urandom if it is available. Fall back to random module
# if not. It might be worthwhile to extend this function to use
# other platform-specific mechanisms for getting random bytes.
if os.path.exists("/dev/urandom"):
f = open("/dev/urandom")
s = f.read(n)
f.close()
return s
else:
L = [chr(random.randrange(0, 256)) for i in range(n)]
return "".join(L)
class AbstractDigestAuthHandler:
# Digest authentication is specified in RFC 2617.
# XXX The client does not inspect the Authentication-Info header
# in a successful response.
# XXX It should be possible to test this implementation against
# a mock server that just generates a static set of challenges.
# XXX qop="auth-int" supports is shaky
def __init__(self, passwd=None):
if passwd is None:
passwd = HTTPPasswordMgr()
self.passwd = passwd
self.add_password = self.passwd.add_password
self.retried = 0
self.nonce_count = 0
self.last_nonce = None
def reset_retry_count(self):
self.retried = 0
def http_error_auth_reqed(self, auth_header, host, req, headers):
authreq = headers.get(auth_header, None)
if self.retried > 5:
# Don't fail endlessly - if we failed once, we'll probably
# fail a second time. Hm. Unless the Password Manager is
# prompting for the information. Crap. This isn't great
# but it's better than the current 'repeat until recursion
# depth exceeded' approach <wink>
raise HTTPError(req.get_full_url(), 401, "digest auth failed",
headers, None)
else:
self.retried += 1
if authreq:
scheme = authreq.split()[0]
if scheme.lower() == 'digest':
return self.retry_http_digest_auth(req, authreq)
def retry_http_digest_auth(self, req, auth):
token, challenge = auth.split(' ', 1)
chal = parse_keqv_list(parse_http_list(challenge))
auth = self.get_authorization(req, chal)
if auth:
auth_val = 'Digest %s' % auth
if req.headers.get(self.auth_header, None) == auth_val:
return None
newreq = copy.copy(req)
newreq.add_unredirected_header(self.auth_header, auth_val)
newreq.visit = False
return self.parent.open(newreq)
def get_cnonce(self, nonce):
# The cnonce-value is an opaque
# quoted string value provided by the client and used by both client
# and server to avoid chosen plaintext attacks, to provide mutual
# authentication, and to provide some message integrity protection.
# This isn't a fabulous effort, but it's probably Good Enough.
dig = sha1_digest("%s:%s:%s:%s" % (self.nonce_count, nonce,
time.ctime(), randombytes(8)))
return dig[:16]
def get_authorization(self, req, chal):
try:
realm = chal['realm']
nonce = chal['nonce']
qop = chal.get('qop')
algorithm = chal.get('algorithm', 'MD5')
# mod_digest doesn't send an opaque, even though it isn't
# supposed to be optional
opaque = chal.get('opaque', None)
except KeyError:
return None
H, KD = self.get_algorithm_impls(algorithm)
if H is None:
return None
user, pw = self.passwd.find_user_password(realm, req.get_full_url())
if user is None:
return None
# XXX not implemented yet
if req.has_data():
entdig = self.get_entity_digest(req.get_data(), chal)
else:
entdig = None
A1 = "%s:%s:%s" % (user, realm, pw)
A2 = "%s:%s" % (req.get_method(),
# XXX selector: what about proxies and full urls
req.get_selector())
if qop == 'auth':
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
self.last_nonce = nonce
ncvalue = '%08x' % self.nonce_count
cnonce = self.get_cnonce(nonce)
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2))
respdig = KD(H(A1), noncebit)
elif qop is None:
respdig = KD(H(A1), "%s:%s" % (nonce, H(A2)))
else:
# XXX handle auth-int.
logger = logging.getLogger("mechanize.auth")
logger.info("digest auth auth-int qop is not supported, not "
"handling digest authentication")
return None
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (user, realm, nonce, req.get_selector(),
respdig)
if opaque:
base += ', opaque="%s"' % opaque
if entdig:
base += ', digest="%s"' % entdig
base += ', algorithm="%s"' % algorithm
if qop:
base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return base
def get_algorithm_impls(self, algorithm):
# algorithm should be case-insensitive according to RFC2617
algorithm = algorithm.upper()
if algorithm == 'MD5':
H = md5_digest
elif algorithm == 'SHA':
H = sha1_digest
# XXX MD5-sess
KD = lambda s, d: H("%s:%s" % (s, d))
return H, KD
def get_entity_digest(self, data, chal):
# XXX not implemented yet
return None
class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
"""An authentication protocol defined by RFC 2069
Digest authentication improves on basic authentication because it
does not transmit passwords in the clear.
"""
auth_header = 'Authorization'
handler_order = 490 # before Basic auth
def http_error_401(self, req, fp, code, msg, headers):
host = urlparse.urlparse(req.get_full_url())[1]
retry = self.http_error_auth_reqed('www-authenticate',
host, req, headers)
self.reset_retry_count()
return retry
class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
auth_header = 'Proxy-Authorization'
handler_order = 490 # before Basic auth
def http_error_407(self, req, fp, code, msg, headers):
host = req.get_host()
retry = self.http_error_auth_reqed('proxy-authenticate',
host, req, headers)
self.reset_retry_count()
return retry
class AbstractHTTPHandler(BaseHandler):
def __init__(self, debuglevel=0):
self._debuglevel = debuglevel
def set_http_debuglevel(self, level):
self._debuglevel = level
def do_request_(self, request):
host = request.get_host()
if not host:
raise URLError('no host given')
if request.has_data(): # POST
data = request.get_data()
if not request.has_header('Content-type'):
request.add_unredirected_header(
'Content-type',
'application/x-www-form-urlencoded')
if not request.has_header('Content-length'):
request.add_unredirected_header(
'Content-length', '%d' % len(data))
sel_host = host
if request.has_proxy():
scheme, sel = splittype(request.get_selector())
sel_host, sel_path = splithost(sel)
if not request.has_header('Host'):
request.add_unredirected_header('Host', sel_host)
for name, value in self.parent.addheaders:
name = name.capitalize()
if not request.has_header(name):
request.add_unredirected_header(name, value)
return request
def do_open(self, http_class, req):
"""Return an addinfourl object for the request, using http_class.
http_class must implement the HTTPConnection API from httplib.
The addinfourl return value is a file-like object. It also
has methods and attributes including:
- info(): return a mimetools.Message object for the headers
- geturl(): return the original request URL
- code: HTTP status code
"""
host_port = req.get_host()
if not host_port:
raise URLError('no host given')
try:
h = http_class(host_port, timeout=req.timeout)
except TypeError:
# Python < 2.6, no per-connection timeout support
h = http_class(host_port)
h.set_debuglevel(self._debuglevel)
headers = dict(req.headers)
headers.update(req.unredirected_hdrs)
# We want to make an HTTP/1.1 request, but the addinfourl
# class isn't prepared to deal with a persistent connection.
# It will try to read all remaining data from the socket,
# which will block while the server waits for the next request.
# So make sure the connection gets closed after the (only)
# request.
headers["Connection"] = "close"
headers = dict(
(name.title(), val) for name, val in headers.items())
if req._tunnel_host:
if not hasattr(h, "set_tunnel"):
if not hasattr(h, "_set_tunnel"):
raise URLError("HTTPS through proxy not supported "
"(Python >= 2.6.4 required)")
else:
# python 2.6
set_tunnel = h._set_tunnel
else:
set_tunnel = h.set_tunnel
set_tunnel(req._tunnel_host)
try:
h.request(req.get_method(), req.get_selector(), req.data, headers)
r = h.getresponse()
except socket.error, err: # XXX what error?
raise URLError(err)
# Pick apart the HTTPResponse object to get the addinfourl
# object initialized properly.
# Wrap the HTTPResponse object in socket's file object adapter
# for Windows. That adapter calls recv(), so delegate recv()
# to read(). This weird wrapping allows the returned object to
# have readline() and readlines() methods.
# XXX It might be better to extract the read buffering code
# out of socket._fileobject() and into a base class.
r.recv = r.read
fp = create_readline_wrapper(r)
resp = closeable_response(fp, r.msg, req.get_full_url(),
r.status, r.reason)
return resp
class HTTPHandler(AbstractHTTPHandler):
def http_open(self, req):
return self.do_open(httplib.HTTPConnection, req)
http_request = AbstractHTTPHandler.do_request_
if hasattr(httplib, 'HTTPS'):
class HTTPSConnectionFactory:
def __init__(self, key_file, cert_file):
self._key_file = key_file
self._cert_file = cert_file
def __call__(self, hostport):
return httplib.HTTPSConnection(
hostport,
key_file=self._key_file, cert_file=self._cert_file)
class HTTPSHandler(AbstractHTTPHandler):
def __init__(self, client_cert_manager=None):
AbstractHTTPHandler.__init__(self)
self.client_cert_manager = client_cert_manager
def https_open(self, req):
if self.client_cert_manager is not None:
key_file, cert_file = self.client_cert_manager.find_key_cert(
req.get_full_url())
conn_factory = HTTPSConnectionFactory(key_file, cert_file)
else:
conn_factory = httplib.HTTPSConnection
return self.do_open(conn_factory, req)
https_request = AbstractHTTPHandler.do_request_
class HTTPCookieProcessor(BaseHandler):
"""Handle HTTP cookies.
Public attributes:
cookiejar: CookieJar instance
"""
def __init__(self, cookiejar=None):
if cookiejar is None:
cookiejar = CookieJar()
self.cookiejar = cookiejar
def http_request(self, request):
self.cookiejar.add_cookie_header(request)
return request
def http_response(self, request, response):
self.cookiejar.extract_cookies(response, request)
return response
https_request = http_request
https_response = http_response
class UnknownHandler(BaseHandler):
def unknown_open(self, req):
type = req.get_type()
raise URLError('unknown url type: %s' % type)
def parse_keqv_list(l):
"""Parse list of key=value strings where keys are not duplicated."""
parsed = {}
for elt in l:
k, v = elt.split('=', 1)
if v[0] == '"' and v[-1] == '"':
v = v[1:-1]
parsed[k] = v
return parsed
def parse_http_list(s):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Neither commas nor quotes count if they are escaped.
Only double-quotes count, not single-quotes.
"""
res = []
part = ''
escape = quote = False
for cur in s:
if escape:
part += cur
escape = False
continue
if quote:
if cur == '\\':
escape = True
continue
elif cur == '"':
quote = False
part += cur
continue
if cur == ',':
res.append(part)
part = ''
continue
if cur == '"':
quote = True
part += cur
# append last part
if part:
res.append(part)
return [part.strip() for part in res]
class FileHandler(BaseHandler):
# Use local file or FTP depending on form of URL
def file_open(self, req):
url = req.get_selector()
if url[:2] == '//' and url[2:3] != '/':
req.type = 'ftp'
return self.parent.open(req)
else:
return self.open_local_file(req)
# names for the localhost
names = None
def get_names(self):
if FileHandler.names is None:
try:
FileHandler.names = (socket.gethostbyname('localhost'),
socket.gethostbyname(socket.gethostname()))
except socket.gaierror:
FileHandler.names = (socket.gethostbyname('localhost'),)
return FileHandler.names
# not entirely sure what the rules are here
def open_local_file(self, req):
try:
import email.utils as emailutils
except ImportError:
# python 2.4
import email.Utils as emailutils
import mimetypes
host = req.get_host()
file = req.get_selector()
localfile = url2pathname(file)
try:
stats = os.stat(localfile)
size = stats.st_size
modified = emailutils.formatdate(stats.st_mtime, usegmt=True)
mtype = mimetypes.guess_type(file)[0]
headers = mimetools.Message(StringIO(
'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' %
(mtype or 'text/plain', size, modified)))
if host:
host, port = splitport(host)
if not host or \
(not port and socket.gethostbyname(host) in self.get_names()):
return addinfourl(open(localfile, 'rb'),
headers, 'file:'+file)
except OSError, msg:
# urllib2 users shouldn't expect OSErrors coming from urlopen()
raise URLError(msg)
raise URLError('file not on local host')
class FTPHandler(BaseHandler):
def ftp_open(self, req):
import ftplib
import mimetypes
host = req.get_host()
if not host:
raise URLError('ftp error: no host given')
host, port = splitport(host)
if port is None:
port = ftplib.FTP_PORT
else:
port = int(port)
# username/password handling
user, host = splituser(host)
if user:
user, passwd = splitpasswd(user)
else:
passwd = None
host = unquote(host)
user = unquote(user or '')
passwd = unquote(passwd or '')
try:
host = socket.gethostbyname(host)
except socket.error, msg:
raise URLError(msg)
path, attrs = splitattr(req.get_selector())
dirs = path.split('/')
dirs = map(unquote, dirs)
dirs, file = dirs[:-1], dirs[-1]
if dirs and not dirs[0]:
dirs = dirs[1:]
try:
fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout)
type = file and 'I' or 'D'
for attr in attrs:
attr, value = splitvalue(attr)
if attr.lower() == 'type' and \
value in ('a', 'A', 'i', 'I', 'd', 'D'):
type = value.upper()
fp, retrlen = fw.retrfile(file, type)
headers = ""
mtype = mimetypes.guess_type(req.get_full_url())[0]
if mtype:
headers += "Content-type: %s\n" % mtype
if retrlen is not None and retrlen >= 0:
headers += "Content-length: %d\n" % retrlen
sf = StringIO(headers)
headers = mimetools.Message(sf)
return addinfourl(fp, headers, req.get_full_url())
except ftplib.all_errors, msg:
raise URLError, ('ftp error: %s' % msg), sys.exc_info()[2]
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
try:
fw = ftpwrapper(user, passwd, host, port, dirs, timeout)
except TypeError:
# Python < 2.6, no per-connection timeout support
fw = ftpwrapper(user, passwd, host, port, dirs)
## fw.ftp.set_debuglevel(1)
return fw
class CacheFTPHandler(FTPHandler):
# XXX would be nice to have pluggable cache strategies
# XXX this stuff is definitely not thread safe
def __init__(self):
self.cache = {}
self.timeout = {}
self.soonest = 0
self.delay = 60
self.max_conns = 16
def setTimeout(self, t):
self.delay = t
def setMaxConns(self, m):
self.max_conns = m
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
key = user, host, port, '/'.join(dirs), timeout
if key in self.cache:
self.timeout[key] = time.time() + self.delay
else:
self.cache[key] = ftpwrapper(user, passwd, host, port, dirs, timeout)
self.timeout[key] = time.time() + self.delay
self.check_cache()
return self.cache[key]
def check_cache(self):
# first check for old ones
t = time.time()
if self.soonest <= t:
for k, v in self.timeout.items():
if v < t:
self.cache[k].close()
del self.cache[k]
del self.timeout[k]
self.soonest = min(self.timeout.values())
# then check the size
if len(self.cache) == self.max_conns:
for k, v in self.timeout.items():
if v == self.soonest:
del self.cache[k]
del self.timeout[k]
break
self.soonest = min(self.timeout.values())
|
odicraig/kodi2odi
|
addons/plugin.video.roggerstream-4.0.0/mechanize/_urllib2_fork.py
|
Python
|
gpl-3.0
| 50,481
|
[
"VisIt"
] |
29f2553d12fb8b76d11b7beef7fdcf18f24761e9feca133068fa56b06ee07cdd
|
##############################################################################
# State estimation in LGSS and SV models using Kalman and particle filters
#
# Johan Dahlin <liu (at) johandahlin.com.nospam>
# Documentation at https://github.com/compops/pmh-tutorial
# Published under GNU General Public License
##############################################################################
from __future__ import print_function, division
import numpy as np
from numpy.random import randn, choice
from scipy.stats import norm
##############################################################################
# Kalman filter for the linear Gaussian SSM
##############################################################################
def kalmanFilter(observations, parameters, initialState, initialStateCov):
noObservations = len(observations)
A = parameters[0]
C = 1
Q = parameters[1]**2
R = parameters[2]**2
predictiveCov = initialStateCov
xHatPredicted = initialState * np.ones((noObservations + 1, 1))
xHatFiltered = initialState * np.ones((noObservations, 1))
for t in range(0, noObservations):
# Correction step
S = C * predictiveCov * C + R
kalmanGain = predictiveCov * C / S
filteredCovariance = predictiveCov - kalmanGain * S * kalmanGain
yHatPredicted = C * xHatPredicted[t]
xHatFiltered[t] = xHatPredicted[t] + kalmanGain * (observations[t - 1] - yHatPredicted)
# Prediction step
xHatPredicted[t + 1] = A * xHatFiltered[t]
predictiveCov = A * filteredCovariance * A + Q
return xHatFiltered
##############################################################################
# Fully-adapted particle filter for the linear Gaussian SSM
##############################################################################
def particleFilter(observations, parameters, noParticles, initialState):
noObservations = len(observations) - 1
phi = parameters[0]
sigmav = parameters[1]
sigmae = parameters[2]
particles = np.zeros((noParticles, noObservations))
ancestorIndices = np.zeros((noParticles, noObservations))
weights = np.zeros((noParticles, noObservations))
normalisedWeights = np.zeros((noParticles, noObservations))
xHatFiltered = np.zeros((noObservations, 1))
# Set the initial state and weights
ancestorIndices[: , 0] = range(noParticles)
particles[:, 0] = initialState
xHatFiltered[0] = initialState
normalisedWeights[:, 0] = 1.0 / noParticles
logLikelihood = 0
for t in range(1, noObservations):
# Resample (multinomial)
newAncestors = choice(noParticles, noParticles, p=normalisedWeights[:, t - 1], replace=True)
ancestorIndices[:, 1:t - 1] = ancestorIndices[newAncestors, 1:t - 1]
ancestorIndices[:, t] = newAncestors
# Propagate
part1 = (sigmav**(-2) + sigmae**(-2))**(-1)
part2 = sigmae**(-2) * observations[t]
part2 = part2 + sigmav**(-2) * phi * particles[newAncestors, t - 1]
particles[:, t] = part1 * part2 + np.sqrt(part1) * randn(1, noParticles)
# Compute weights
yhatMean = phi * particles[:, t]
yhatVariance = np.sqrt(sigmav**2 + sigmae**2)
weights[:, t] = norm.logpdf(observations[t + 1], yhatMean, yhatVariance)
maxWeight = np.max(weights[:, t])
weights[:, t] = np.exp(weights[:, t] - maxWeight)
sumWeights = np.sum(weights[:, t])
normalisedWeights[:, t] = weights[:, t] / sumWeights
# Estimate the state
xHatFiltered[t] = np.sum(normalisedWeights[:, t] * particles[:, t])
# Estimate log-likelihood
predictiveLikelihood = maxWeight + np.log(sumWeights) - np.log(noParticles)
logLikelihood += predictiveLikelihood
return xHatFiltered, logLikelihood
##############################################################################
# Bootstrap particle filter for the stochastic volatility model
##############################################################################
def particleFilterSVmodel(observations, parameters, noParticles):
noObservations = len(observations)
mu = parameters[0]
phi = parameters[1]
sigmav = parameters[2]
particles = np.zeros((noParticles, noObservations))
ancestorIndices = np.zeros((noParticles, noObservations))
weights = np.zeros((noParticles, noObservations))
normalisedWeights = np.zeros((noParticles, noObservations))
xHatFiltered = np.zeros((noObservations, 1))
# Set the initial state and weights
particles[:, 0] = mu + sigmav / np.sqrt(1.0 - phi**2) * randn(1, noParticles)
normalisedWeights[:, 0] = 1.0 / noParticles
weights[:, 0] = 1.0
logLikelihood = 0
for t in range(1, noObservations):
# Resample particles
newAncestors = choice(noParticles, noParticles, p=normalisedWeights[:, t - 1], replace=True)
ancestorIndices[:, 1:t - 1] = ancestorIndices[newAncestors, 1:t - 1]
ancestorIndices[:, t] = newAncestors
# Propagate particles
particles[:, t] = mu + phi * (particles[newAncestors, t - 1] - mu) + sigmav * randn(1, noParticles)
# Weight particles
weights[:, t] = norm.logpdf(observations[t - 1], 0, np.exp(particles[:, t] / 2))
maxWeight = np.max(weights[:, t])
weights[:, t] = np.exp(weights[:, t] - maxWeight)
sumWeights = np.sum(weights[:, t])
normalisedWeights[:, t] = weights[:, t] / sumWeights
# Estimate the filtered state
xHatFiltered[t] = np.sum(normalisedWeights[:, t] * particles[:, t])
# Estimate log-likelihood
predictiveLikelihood = maxWeight + np.log(sumWeights) - np.log(noParticles)
logLikelihood += predictiveLikelihood
# Sample the state estimate using the weights at t=T
ancestorIndex = choice(noParticles, 1, p=normalisedWeights[:, noObservations - 1])
stateTrajectory = particles[ancestorIndices[ancestorIndex, noObservations - 1].astype(int), :]
return stateTrajectory, logLikelihood
|
compops/pmh-tutorial
|
python/helpers/stateEstimation.py
|
Python
|
gpl-2.0
| 6,082
|
[
"Gaussian"
] |
c410be9c600af79b3d6767d7b964537f8ecf385ef0c075cdb7454f4acca5b6ba
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-install-web-portal
# Author : Ricardo Graciani
########################################################################
"""
Do the initial installation of a DIRAC Web portal
"""
__RCSID__ = "$Id$"
#
from DIRAC.Core.Utilities import InstallTools
#
InstallTools.exitOnError = True
#
from DIRAC.Core.Base import Script
Script.disableCS()
Script.setUsageMessage('\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ...' % Script.scriptName,
'Arguments:',] ) )
Script.parseCommandLine()
InstallTools.installPortal()
|
avedaee/DIRAC
|
Core/scripts/dirac-install-web-portal.py
|
Python
|
gpl-3.0
| 766
|
[
"DIRAC"
] |
37fab7ff851ee3a6ed662253457984a3c87563d3ec72ff929d0251f209bfb3d1
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAlsace(RPackage):
"""ALS for the Automatic Chemical Exploration of mixtures
Alternating Least Squares (or Multivariate Curve Resolution) for
analytical chemical data, in particular hyphenated data where the first
direction is a retention time axis, and the second a spectral axis.
Package builds on the basic als function from the ALS package and adds
functionality for high-throughput analysis, including definition of time
windows, clustering of profiles, retention time correction, etcetera."""
homepage = "https://bioconductor.org/packages/alsace"
git = "https://git.bioconductor.org/packages/alsace.git"
version('1.26.0', commit='40a76404acb1466723a78a55d87c67eec3e6f306')
version('1.20.0', commit='47f1cf8daafc864e5e3418009f349ce85d6b0389')
version('1.18.0', commit='c9fc43c7b441de43b14ef1be69926c4c4a566191')
version('1.16.0', commit='5a51a19aeccbba0123222201cb7a228559f29653')
version('1.14.0', commit='aebb13b00eb850f9569391c4c92183b55b70ae89')
version('1.12.0', commit='1364c65bbff05786d05c02799fd44fd57748fae3')
depends_on('r@2.10:', type=('build', 'run'))
depends_on('r-als', type=('build', 'run'))
depends_on('r-ptw@1.0.6:', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-alsace/package.py
|
Python
|
lgpl-2.1
| 1,487
|
[
"Bioconductor"
] |
263e30dbccd2b0d2202f26a0ff6612afb5919a7a8fa64abe1b3a778ceaa4d10e
|
"""
The alignment pipeline.
We start with the .fastq files and the reference for a particular genome
and carry out the gauntlet of steps to perform alignments, alignment
cleaning, snv calling, and effect prediction.
"""
from datetime import datetime
import os
import time
from celery import group
from celery import task
from django.conf import settings
from main.celery_util import assert_celery_running
from main.models import AlignmentGroup
from main.models import ReferenceGenome
from main.models import Dataset
from main.models import ExperimentSampleToAlignment
from pipeline.read_alignment import align_with_bwa_mem
from pipeline.variant_calling import find_variants_with_tool
from pipeline.variant_calling import VARIANT_TOOL_PARAMS_MAP
from pipeline.variant_calling import TOOL_FREEBAYES
from pipeline.variant_calling import TOOL_LUMPY
from pipeline.variant_calling import TOOL_PINDEL
from pipeline.variant_calling.common import get_or_create_vcf_output_dir
from pipeline.variant_calling.freebayes import merge_freebayes_parallel
from pipeline.variant_calling.freebayes import freebayes_regions
from pipeline.variant_calling.lumpy import merge_lumpy_vcf
from pipeline.variant_calling.pindel import merge_pindel_vcf
# List of variant callers to use. At time of writing, this was not hooked
# up to the ui and only used internally.
VARIANT_CALLING_OPTION__CALLER_OVERRIDE = 'enabled_variant_callers_override'
def run_pipeline(alignment_group_label, ref_genome, sample_list,
skip_alignment=False, perform_variant_calling=True, alignment_options={},
variant_calling_options={}):
"""Runs the entire bioinformatics pipeline, including alignment and
variant calling.
Steps:
* Create AlignmentGroup if not created
* get_or_create ExperimentSampleToAlignments and respective Datasets
* Kick off alignments
* When all alignments are done, kick off variant calling
Args:
alignment_group_label: Name for this alignment.
ref_genome: ReferenceGenome instance
sample_list: List of sample instances. Must belong to same project as
ReferenceGenomes.
skip_alignment: If True, skip alignment.
perform_variant_calling: Whether to run variant calling.
alignment_options: Control aspects of alignment.
variant_calling_options: Control aspects of calling variants.
Returns:
Tuple pair (alignment_group, async_result).
"""
if not skip_alignment:
_assert_pipeline_is_safe_to_run(alignment_group_label, sample_list)
# Create AlignmentGroup, the entity which groups together the alignments
# of individual samples, and results of variant calling which happens
# for all samples together.
alignment_group, _ = AlignmentGroup.objects.get_or_create(
label=alignment_group_label,
reference_genome=ref_genome,
aligner=AlignmentGroup.ALIGNER.BWA)
alignment_group.alignment_options.update(alignment_options)
# The pipeline has two synchronous phases, each of whose components
# maybe run in parallel:
# 1) Alignments - run in parallel.
# 2) Variant calling - each variant caller runs in parallel, but waits
# for all alignments to be complete before starting.
#
# NOTE: Nested chords in celery don't work so we need to break up the
# pipeline into # two separate pipelines: 1) alignment and 2) variant
# calling. This task is the first task in the variant calling pipeline which
# polls the database until all alignments are complete before kicking off
# parallel variant calling tasks.
# NOTE: Since we don't want results to be passed as arguments in the
# chain, use .si(...) and not .s(...)
# See: http://stackoverflow.com/questions/15224234/celery-chaining-tasks-sequentially
# First we create Models so that we can track status from the ui
# immediately. The ui determines the status of each
# ExperimentSampleToAlignment by looking at the status of its BWA dataset.
sample_alignments_to_run = _get_or_create_sample_alignment_datasets(
alignment_group, sample_list)
# Before we continue, let's update the ref genome object. This is code
# left over from when we were fighting a concurrency bug.
# TODO: Revisit such calls and see if we can clean them up.
ref_genome = ReferenceGenome.objects.get(uid=ref_genome.uid)
# Now we aggregate the alignments that need to be run, collecting their
# signatures in a Celery group so that these alignments can be run in
# parallel.
alignment_task_signatures = []
for sample_alignment in sample_alignments_to_run:
alignment_task_signatures.append(
align_with_bwa_mem.si(
alignment_group, sample_alignment,
project=ref_genome.project))
if len(alignment_task_signatures) > 0:
alignment_task_group = group(alignment_task_signatures)
alignment_task_group_async_result = alignment_task_group.apply_async()
else:
alignment_task_group_async_result = None
# HACK(gleb): Force ALIGNING so that UI starts refreshing. This should be
# right, but I'm open to removing if it's not right for some case I
# didn't think of.
alignment_group.status = AlignmentGroup.STATUS.ALIGNING
alignment_group.start_time = datetime.now()
alignment_group.end_time = None
alignment_group.save(update_fields=['status', 'start_time', 'end_time'])
# Aggregate variant callers, which run in parallel once all alignments
# are done.
if perform_variant_calling:
variant_caller_group = _construct_variant_caller_group(
alignment_group, variant_calling_options)
else:
variant_caller_group = None
# Put together the whole pipeline.
variant_calling_pipeline = start_variant_calling_pipeline_task.si(
alignment_group)
if variant_caller_group is not None:
variant_calling_pipeline = (variant_calling_pipeline |
variant_caller_group)
# Add a final task which runs only after all previous tasks are complete.
pipeline_completion = pipeline_completion_tasks.si(alignment_group)
variant_calling_pipeline = variant_calling_pipeline | pipeline_completion
# TODO(gleb): We had this to deal with race conditions. Do we still need it?
ref_genome.save()
# Run the pipeline. This is a non-blocking call when celery is running so
# the rest of code proceeds immediately.
variant_calling_async_result = variant_calling_pipeline.apply_async()
return (
alignment_group,
alignment_task_group_async_result,
variant_calling_async_result)
def _assert_pipeline_is_safe_to_run(alignment_group_label, sample_list):
"""Helper that checks that pipeline is ready to run.
Raises:
AssertionError if any problems.
"""
assert len(alignment_group_label) > 0, "Name must be non-trivial string."
assert len(sample_list) > 0, (
"Must provide at least one ExperimentSample.")
assert_celery_running()
# Make sure all samples are ready.
relevant_datasets = Dataset.objects.filter(
experimentsample__in=sample_list)
for d in relevant_datasets:
good_statuses = (Dataset.STATUS.READY, Dataset.STATUS.QC)
assert any([d.status == status for status in good_statuses]), (
"Dataset %s for sample %s has status %s. Expected %s." % (
d.label, d.experimentsample_set.all()[0].label,
d.status, Dataset.STATUS.READY))
def _get_or_create_sample_alignment_datasets(alignment_group, sample_list):
"""Creates Dataset models that allow tracking status of alignment from ui.
Does not start alignments.
Returns list of ExperimentSampleToAlignments.
"""
sample_alignments_to_run = []
for sample in sample_list:
sample_alignment, _ = ExperimentSampleToAlignment.objects.get_or_create(
alignment_group=alignment_group, experiment_sample=sample)
# Get or create a Dataset to store the alignment result.
sample_alignment_bwa_datasets = sample_alignment.dataset_set.filter(
type=Dataset.TYPE.BWA_ALIGN)
assert len(sample_alignment_bwa_datasets) <= 1
if len(sample_alignment_bwa_datasets) == 1:
bwa_dataset = sample_alignment_bwa_datasets[0]
else:
bwa_dataset = Dataset.objects.create(
label=Dataset.TYPE.BWA_ALIGN,
type=Dataset.TYPE.BWA_ALIGN,
status=Dataset.STATUS.NOT_STARTED)
sample_alignment.dataset_set.add(bwa_dataset)
# Add it to the list of alignments to run, unless already done.
if not bwa_dataset.status == Dataset.STATUS.READY:
sample_alignments_to_run.append(sample_alignment)
return sample_alignments_to_run
def _construct_variant_caller_group(alignment_group, variant_calling_options):
"""Returns celery Group of variant calling tasks that can be run
in parallel.
"""
# Get fresh copy of ReferenceGenome to avoid potential issues with
# race conditions.
ref_genome = ReferenceGenome.objects.get(
uid=alignment_group.reference_genome.uid)
# Determine which variant callers to use.
if VARIANT_CALLING_OPTION__CALLER_OVERRIDE in variant_calling_options:
effective_variant_callers = variant_calling_options[
VARIANT_CALLING_OPTION__CALLER_OVERRIDE]
else:
effective_variant_callers = settings.ENABLED_VARIANT_CALLERS
# List of tasks that can be run in parallel. These will be combined into a
# single celery.group.
parallel_tasks = []
# Iterate through tools and kick off tasks.
for tool in effective_variant_callers:
# Common params for this tool.
tool_params = VARIANT_TOOL_PARAMS_MAP[tool]
if settings.FREEBAYES_PARALLEL and tool == TOOL_FREEBAYES:
# Special handling for freebayes if running parallel. Break up
# ReferenceGenome into regions and create separate job for each.
fb_regions = freebayes_regions(ref_genome)
assert len(fb_regions) >= 0
for region_num, fb_region in enumerate(fb_regions):
region_params = dict(tool_params)
region_params['tool_kwargs'] = {
'region': fb_region,
'region_num': region_num
}
parallel_tasks.append(find_variants_with_tool.si(
alignment_group, region_params,
project=ref_genome.project))
elif tool in [TOOL_LUMPY, TOOL_PINDEL]:
sample_alignment_list = (
alignment_group.experimentsampletoalignment_set.all())
# Lumpy only works for paired reads right now. Skip Lumpy if
# any unpaired reads.
any_unpaired = False
for sa in sample_alignment_list:
dataset_types = [ds.type for ds in
sa.experiment_sample.dataset_set.all()]
if not Dataset.TYPE.FASTQ2 in dataset_types:
any_unpaired = True
break
if any_unpaired:
continue
# TODO: What if some alignments failed?
for sa in sample_alignment_list:
# Create separate lumpy task for each sample.
per_sample_params = dict(tool_params)
per_sample_params['tool_kwargs'] = {
'region_num': sa.uid,
'sample_alignments': [sa]
}
parallel_tasks.append(find_variants_with_tool.si(
alignment_group, per_sample_params,
project=ref_genome.project))
else:
parallel_tasks.append(find_variants_with_tool.si(
alignment_group, tool_params, project=ref_genome.project))
variant_calling_pipeline = (group(parallel_tasks) |
merge_variant_data.si(alignment_group))
return variant_calling_pipeline
@task
def start_variant_calling_pipeline_task(alignment_group):
"""First task in variant calling pipeline which waits for all alignments
to be complete.
Nested chords in celery don't work so we need to break up the pipeline into
two separate pipelines: 1) alignment and 2) variant calling. This task is
the first task in the variant calling pipeline which polls the database
until all alignments are complete.
"""
print 'START VARIANT CALLING PIPELINE. WAITING FOR ALIGNMENTS TO COMPLETE.'
POLL_INTERVAL_SEC = 5
sample_alignment_list = ExperimentSampleToAlignment.objects.filter(
alignment_group=alignment_group)
all_samples_ready = False
failed = False
while not all_samples_ready:
all_samples_ready = True
for sa in sample_alignment_list:
sa_fresh = ExperimentSampleToAlignment.objects.get(id=sa.id)
bwa_dataset = sa_fresh.dataset_set.get(label=Dataset.TYPE.BWA_ALIGN)
if bwa_dataset.status == Dataset.STATUS.FAILED:
failed = True
# # DEBUG: Uncomment to see alignment error output
# print '---------------ALIGNMENT ERROR OUTPUT---------------\n'
# error_dataset = sa_fresh.dataset_set.get(
# label=Dataset.TYPE.BWA_ALIGN_ERROR)
# with open(error_dataset.get_absolute_location(),'r') as fh:
# for line in fh:
# print line
break
if not bwa_dataset.status == Dataset.STATUS.READY:
all_samples_ready = False
break
if failed:
alignment_group = AlignmentGroup.objects.get(id=alignment_group.id)
alignment_group.status = AlignmentGroup.STATUS.FAILED
alignment_group.save(update_fields=['status'])
raise Exception("Alignment failed.")
if not all_samples_ready:
time.sleep(POLL_INTERVAL_SEC)
# All ready. Set VARIANT_CALLING.
alignment_group.status = AlignmentGroup.STATUS.VARIANT_CALLING
alignment_group.save(update_fields=['status'])
@task
def merge_variant_data(alignment_group):
"""Merges results of variant caller data after pipeline is complete.
"""
try:
merge_freebayes_parallel(alignment_group)
merge_lumpy_vcf(alignment_group)
merge_pindel_vcf(alignment_group)
except:
# Log error.
vcf_output_root = get_or_create_vcf_output_dir(alignment_group)
merge_variant_data_error_path = os.path.join(
vcf_output_root, 'merge_variant_data.error')
with open(merge_variant_data_error_path, 'w') as error_output_fh:
import traceback
error_output_fh.write(traceback.format_exc())
# Set AlignmentGroup status to failed.
alignment_group.status = AlignmentGroup.STATUS.FAILED
alignment_group.end_time = datetime.now()
alignment_group.save(update_fields=['end_time', 'status'])
@task
def pipeline_completion_tasks(alignment_group):
"""Final set of synchronous steps after all alignments and variant callers
are finished.
Sets end_time and status on alignment_group.
"""
print 'START PIPELINE COMPLETION...'
# Get fresh copy of alignment_group.
alignment_group = AlignmentGroup.objects.get(id=alignment_group.id)
# Previous status should have been VARIANT_CALLING. If anything else,
# then pipeline failed.
if AlignmentGroup.STATUS.VARIANT_CALLING == alignment_group.status:
alignment_group.status = AlignmentGroup.STATUS.COMPLETED
else:
alignment_group.status = AlignmentGroup.STATUS.FAILED
alignment_group.end_time = datetime.now()
alignment_group.save(update_fields=['end_time', 'status'])
print 'PIPELINE COMPLETION DONE.'
|
woodymit/millstone
|
genome_designer/pipeline/pipeline_runner.py
|
Python
|
mit
| 16,101
|
[
"BWA"
] |
7dced44b5cffc80c390af7acf3f6693e55a90508612a68e18922a24e6b191ef7
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Modify CCSD object to get CCD method.
'''
from pyscf import gto, scf, cc
mol = gto.M(
atom = 'H 0 0 0; F 0 0 1.1',
basis = 'ccpvdz')
mf = scf.RHF(mol).run()
mycc = cc.CCSD(mf)
mycc.frozen = 1
old_update_amps = mycc.update_amps
def update_amps(t1, t2, eris):
t1, t2 = old_update_amps(t1, t2, eris)
return t1*0, t2
mycc.update_amps = update_amps
mycc.kernel()
print('CCD correlation energy', mycc.e_corr)
|
gkc1000/pyscf
|
examples/cc/43-ccd.py
|
Python
|
apache-2.0
| 500
|
[
"PySCF"
] |
c4e2c81be567204a2e487e08d3d1c1606a85d6d81a6b79ff45d13bb678123f3d
|
#!/usr/bin/env python2.4
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Extract message strings from python modules, page template files
and ZCML files.
$Id: extract.py 69460 2006-08-13 21:56:54Z philikon $
"""
__docformat__ = 'restructuredtext'
import os, sys, fnmatch
import time
import tokenize
import traceback
from pygettext import safe_eval, normalize, make_escapes
from zope.interface import implements
from zope.i18nmessageid import Message
from zope.app.locales.interfaces import IPOTEntry, IPOTMaker, ITokenEater
DEFAULT_CHARSET = 'UTF-8'
DEFAULT_ENCODING = '8bit'
pot_header = '''\
##############################################################################
#
# Copyright (c) 2003-2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
msgid ""
msgstr ""
"Project-Id-Version: %(version)s\\n"
"POT-Creation-Date: %(time)s\\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"
"Language-Team: Zope 3 Developers <zope3-dev@zope.org>\\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=%(charset)s\\n"
"Content-Transfer-Encoding: %(encoding)s\\n"
"Generated-By: zope/app/locales/extract.py\\n"
'''
class POTEntry(object):
r"""This class represents a single message entry in the POT file.
>>> make_escapes(0)
>>> class FakeFile(object):
... def write(self, data):
... print data,
Let's create a message entry:
>>> entry = POTEntry(Message("test", default="default"))
>>> entry.addComment("# Some comment")
>>> entry.addLocationComment(os.path.join("path", "file"), 10)
Then we feed it a fake file:
>>> entry.write(FakeFile())
# Some comment
#: path/file:10
#. Default: "default"
msgid "test"
msgstr ""
<BLANKLINE>
Multiline default values generate correct comments:
>>> entry = POTEntry(Message("test", default="\nline1\n\tline2"))
>>> entry.write(FakeFile())
#. Default: ""
#. "line1\n"
#. "\tline2"
msgid "test"
msgstr ""
<BLANKLINE>
"""
implements(IPOTEntry)
def __init__(self, msgid, comments=None):
self.msgid = msgid
self.comments = comments or ''
def addComment(self, comment):
self.comments += comment + '\n'
def addLocationComment(self, filename, line):
self.comments += '#: %s:%s\n' % (
filename.replace(os.sep, '/'), line)
def write(self, file):
if self.comments:
file.write(self.comments)
if (isinstance(self.msgid, Message) and
self.msgid.default is not None):
default = self.msgid.default.strip()
lines = normalize(default).split("\n")
lines[0] = "#. Default: %s\n" % lines[0]
for i in range(1, len(lines)):
lines[i] = "#. %s\n" % lines[i]
file.write("".join(lines))
file.write('msgid %s\n' % normalize(self.msgid))
file.write('msgstr ""\n')
file.write('\n')
def __cmp__(self, other):
return cmp(self.comments, other.comments)
class POTMaker(object):
"""This class inserts sets of strings into a POT file.
"""
implements(IPOTMaker)
def __init__ (self, output_fn, path):
self._output_filename = output_fn
self.path = path
self.catalog = {}
def add(self, strings, base_dir=None):
for msgid, locations in strings.items():
if msgid == '':
continue
if msgid not in self.catalog:
self.catalog[msgid] = POTEntry(msgid)
for filename, lineno in locations:
if base_dir is not None:
filename = filename.replace(base_dir, '')
self.catalog[msgid].addLocationComment(filename, lineno)
def _getProductVersion(self):
# First, try to get the product version
fn = os.path.join(self.path, 'version.txt')
if os.path.exists(fn):
return open(fn, 'r').read().strip()
# Second, try to find a Zope version
from zope.app.applicationcontrol.zopeversion import ZopeVersionUtility
return ZopeVersionUtility.getZopeVersion()
def write(self):
file = open(self._output_filename, 'w')
file.write(pot_header % {'time': time.ctime(),
'version': self._getProductVersion(),
'charset': DEFAULT_CHARSET,
'encoding': DEFAULT_ENCODING})
# Sort the catalog entries by filename
catalog = self.catalog.values()
catalog.sort()
# Write each entry to the file
for entry in catalog:
entry.write(file)
file.close()
class TokenEater(object):
"""This is almost 100% taken from `pygettext.py`, except that I
removed all option handling and output a dictionary.
>>> eater = TokenEater()
>>> make_escapes(0)
TokenEater eats tokens generated by the standard python module
`tokenize`.
>>> import tokenize
>>> from StringIO import StringIO
We feed it a (fake) file:
>>> file = StringIO(
... "_(u'hello ${name}', u'buenos dias', {'name': 'Bob'}); "
... "_(u'hi ${name}', mapping={'name': 'Bob'})"
... )
>>> tokenize.tokenize(file.readline, eater)
The catalog of collected message ids contains our example
>>> catalog = eater.getCatalog()
>>> items = catalog.items()
>>> items.sort()
>>> items
[(u'hello ${name}', [(None, 1)]), (u'hi ${name}', [(None, 1)])]
The key in the catalog is not a unicode string, it's a real
message id with a default value:
>>> msgid = items.pop(0)[0]
>>> msgid
u'hello ${name}'
>>> msgid.default
u'buenos dias'
>>> msgid = items.pop(0)[0]
>>> msgid
u'hi ${name}'
>>> msgid.default
u''
Note that everything gets converted to unicode.
"""
implements(ITokenEater)
def __init__(self):
self.__messages = {}
self.__state = self.__waiting
self.__data = []
self.__lineno = -1
self.__freshmodule = 1
self.__curfile = None
def __call__(self, ttype, tstring, stup, etup, line):
self.__state(ttype, tstring, stup[0])
def __waiting(self, ttype, tstring, lineno):
if ttype == tokenize.NAME and tstring in ['_']:
self.__state = self.__keywordseen
def __suiteseen(self, ttype, tstring, lineno):
# ignore anything until we see the colon
if ttype == tokenize.OP and tstring == ':':
self.__state = self.__suitedocstring
def __suitedocstring(self, ttype, tstring, lineno):
# ignore any intervening noise
if ttype == tokenize.STRING:
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
self.__state = self.__waiting
elif ttype not in (tokenize.NEWLINE, tokenize.INDENT,
tokenize.COMMENT):
# there was no class docstring
self.__state = self.__waiting
def __keywordseen(self, ttype, tstring, lineno):
if ttype == tokenize.OP and tstring == '(':
self.__data = []
self.__msgid = ''
self.__default = ''
self.__lineno = lineno
self.__state = self.__openseen
else:
self.__state = self.__waiting
def __openseen(self, ttype, tstring, lineno):
if ((ttype == tokenize.OP and tstring == ')') or
(ttype == tokenize.NAME and tstring == 'mapping')):
# We've seen the last of the translatable strings. Record the
# line number of the first line of the strings and update the list
# of messages seen. Reset state for the next batch. If there
# were no strings inside _(), then just ignore this entry.
if self.__data or self.__msgid:
if self.__default:
msgid = self.__msgid
default = self.__default
elif self.__msgid:
msgid = self.__msgid
default = ''.join(self.__data)
else:
msgid = ''.join(self.__data)
default = None
self.__addentry(msgid, default)
self.__state = self.__waiting
elif ttype == tokenize.OP and tstring == ',':
if not self.__msgid:
self.__msgid = ''.join(self.__data)
elif not self.__default:
self.__default = ''.join(self.__data)
self.__data = []
elif ttype == tokenize.STRING:
self.__data.append(safe_eval(tstring))
def __addentry(self, msg, default=None, lineno=None, isdocstring=0):
if lineno is None:
lineno = self.__lineno
if default is not None:
default = unicode(default)
msg = Message(msg, default=default)
entry = (self.__curfile, lineno)
self.__messages.setdefault(msg, {})[entry] = isdocstring
def set_filename(self, filename):
self.__curfile = filename
self.__freshmodule = 1
def getCatalog(self):
catalog = {}
# Sort the entries. First sort each particular entry's keys, then
# sort all the entries by their first item.
reverse = {}
for k, v in self.__messages.items():
keys = v.keys()
keys.sort()
reverse.setdefault(tuple(keys), []).append((k, v))
rkeys = reverse.keys()
rkeys.sort()
for rkey in rkeys:
rentries = reverse[rkey]
rentries.sort()
for msgid, locations in rentries:
catalog[msgid] = []
locations = locations.keys()
locations.sort()
for filename, lineno in locations:
catalog[msgid].append((filename, lineno))
return catalog
def find_files(dir, pattern, exclude=()):
files = []
def visit(files, dirname, names):
names[:] = filter(lambda x:x not in exclude, names)
files += [os.path.join(dirname, name)
for name in fnmatch.filter(names, pattern)
if name not in exclude]
os.path.walk(dir, visit, files)
return files
def py_strings(dir, domain="zope", exclude=()):
"""Retrieve all Python messages from `dir` that are in the `domain`.
"""
eater = TokenEater()
make_escapes(0)
for filename in find_files(
dir, '*.py', exclude=('extract.py', 'pygettext.py')+tuple(exclude)):
fp = open(filename)
try:
eater.set_filename(filename)
try:
tokenize.tokenize(fp.readline, eater)
except tokenize.TokenError, e:
print >> sys.stderr, '%s: %s, line %d, column %d' % (
e[0], filename, e[1][0], e[1][1])
finally:
fp.close()
# One limitation of the Python message extractor is that it cannot
# determine the domain of the string, since it is not contained anywhere
# directly. The only way this could be done is by loading the module and
# inspect the '_' function. For now we simply assume that all the found
# strings have the domain the user specified.
return eater.getCatalog()
def zcml_strings(dir, domain="zope", site_zcml=None):
"""Retrieve all ZCML messages from `dir` that are in the `domain`.
"""
from zope.app.appsetup import config
import zope
dirname = os.path.dirname
if site_zcml is None:
# TODO this assumes a checkout directory structure
site_zcml = os.path.join(dirname(dirname(dirname(zope.__file__))),
"site.zcml")
context = config(site_zcml, features=("devmode",), execute=False)
return context.i18n_strings.get(domain, {})
def tal_strings(dir, domain="zope", include_default_domain=False, exclude=()):
"""Retrieve all TAL messages from `dir` that are in the `domain`.
"""
# We import zope.tal.talgettext here because we can't rely on the
# right sys path until app_dir has run
from zope.tal.talgettext import POEngine, POTALInterpreter
from zope.tal.htmltalparser import HTMLTALParser
engine = POEngine()
class Devnull(object):
def write(self, s):
pass
for filename in find_files(dir, '*.pt', exclude=tuple(exclude)):
try:
engine.file = filename
p = HTMLTALParser()
p.parseFile(filename)
program, macros = p.getCode()
POTALInterpreter(program, macros, engine, stream=Devnull(),
metal=False)()
except: # Hee hee, I love bare excepts!
print 'There was an error processing', filename
traceback.print_exc()
# See whether anything in the domain was found
if not engine.catalog.has_key(domain):
return {}
# We do not want column numbers.
catalog = engine.catalog[domain].copy()
# When the Domain is 'default', then this means that none was found;
# Include these strings; yes or no?
if include_default_domain:
catalog.update(engine.catalog['default'])
for msgid, locations in catalog.items():
catalog[msgid] = map(lambda l: (l[0], l[1][0]), locations)
return catalog
|
Donkyhotay/MoonPy
|
zope/app/locales/extract.py
|
Python
|
gpl-3.0
| 14,466
|
[
"VisIt"
] |
9233fbfc5273dddd28514a9c8fe7d0544ad04d1ebb9ff3a6eb6b5ab1af3c6231
|
'''
Set up Visual Sudio to build a specified MPIR configuration
Copyright (C) 2011, 2012, 2013, 2014 Brian Gladman
'''
from __future__ import print_function
from operator import itemgetter
from os import listdir, walk, unlink, makedirs
from os.path import split, splitext, isdir, relpath, join, exists
from os.path import dirname, normpath
from copy import deepcopy
from sys import argv, exit
from filecmp import cmp
from shutil import copy
from re import compile, search
from collections import defaultdict
from uuid import uuid4
from time import sleep
solution_name = 'mpir.sln'
try:
input = raw_input
except NameError:
pass
app_type, lib_type, dll_type = 0, 1, 2
app_str = ('Application', 'StaticLibrary', 'DynamicLibrary')
app_ext = ('.exe', '.lib', '.dll')
# for script debugging
debug = False
# either add a prebuild step to the project files or do it here
add_prebuild = True
# output a build project for the C++ static library
add_cpp_lib = False
# The path to the mpir root directory
build_vc = 'build.vc10/'
mpir_dir = '../'
build_dir = mpir_dir + build_vc
cfg_dir = './cdata'
solution_dir = join(mpir_dir, build_vc)
# paths that might include source files(*.c, *.h, *.asm)
c_directories = ('', 'build.vc12', 'fft', 'mpf', 'mpq', 'mpz',
'printf', 'scanf')
# files that are to be excluded from the build
exclude_file_list = ('config.guess', 'cfg', 'getopt', 'getrusage',
'gettimeofday', 'cpuid', 'obsolete', 'win_timing',
'gmp-mparam', 'tal-debug', 'tal-notreent', 'new_fft',
'new_fft_with_flint', 'compat', 'udiv_w_sdiv')
# copy from file ipath to file opath but avoid copying if
# opath exists and is the same as ipath (this is to avoid
# triggering an unecessary rebuild).
def write_f(ipath, opath):
if exists(ipath) and not isdir(ipath):
if exists(opath):
if isdir(opath) or cmp(ipath, opath):
return
copy(ipath, opath)
# append a file (ipath) to an existing file (opath)
def append_f(ipath, opath):
try:
with open(opath, 'ab') as out_file:
try:
with open(ipath, 'rb') as in_file:
buf = in_file.read(8192)
while buf:
out_file.write(buf)
buf = in_file.read(8192)
except IOError:
print('error reading {0:s} for input'.format(f))
return
except IOError:
print('error opening {0:s} for output'.format(opath))
# copy files in a list from in_dir to out_dir
def copy_files(file_list, in_dir, out_dir):
try:
makedirs(out_dir)
except IOError:
pass
for f in file_list:
copy(join(in_dir, f), out_dir)
# Recursively search a given directory tree to find header,
# C and assembler code files that either replace or augment
# the generic C source files in the input list 'src_list'.
# As the directory tree is searched, files in each directory
# become the source code files for the current directory and
# the default source code files for its child directories.
#
# Lists of default header, C and assembler source code files
# are maintained as the tree is traversed and if a file in
# the current directory matches the name of a file in the
# default file list (name matches ignore file extensions),
# the name in the list is removed and is replaced by the new
# file found. On return each directory in the tree had an
# entry in the returned dictionary that contains:
#
# 1. The list of header files
#
# 2. The list of C source code files for the directory
#
# 3. The list of assembler code files that replace C files
#
# 4. The list of assembler files that are not C replacements
#
def find_asm(path, cf_list):
d = dict()
for root, dirs, files in walk(path):
if '.svn' in dirs: # ignore SVN directories
dirs.remove('.svn')
if 'fat' in dirs: # ignore fat directory
dirs.remove('fat')
relp = relpath(root, path) # path from asm root
relr = relpath(root, mpir_dir) # path from MPIR root
if relp == '.': # set C files as default
relp = h = t = ''
d[''] = [ [], deepcopy(cf_list), [], [], relr ]
else:
h, _ = split(relp) # h = parent, t = this directory
# copy defaults from this directories parent
d[relp] = [ deepcopy(d[h][0]), deepcopy(d[h][1]),
deepcopy(d[h][2]), deepcopy(d[h][3]), relr]
for f in files: # for the files in this directory
n, x = splitext(f)
if x == '.h': # if it is a header file, remove
for cf in reversed(d[relp][0]): # any matching default
if cf[0] == n:
d[relp][0].remove(cf)
d[relp][0] += [(n, x, relr)] # and add the local header file
if x == '.c': # if it is a C file, remove
for cf in reversed(d[relp][1]): # any matching default
if cf[0] == n:
d[relp][1].remove(cf)
d[relp][1] += [(n, x, relr)] # and add the local C file
if x == '.asm': # if it is an assembler file
match = False
for cf in reversed(d[relp][1]): # remove any matching C file
if cf[0] == n:
d[relp][1].remove(cf)
match = True
break
for cf in reversed(d[relp][2]): # and remove any matching
if cf[0] == n: # assembler file
d[relp][2].remove(cf)
match = True
break
if match: # if a match was found, put the
d[relp][2] += [(n, x, relr)] # file in the replacement list
else: # otherwise look for it in the
for cf in reversed(d[relp][3]): # additional files list
if cf[0] == n:
d[relp][3].remove(cf)
break
d[relp][3] += [(n, x, relr)]
for k in d: # additional assembler list
for i in range(4):
d[k][i].sort(key=itemgetter(0)) # sort the four file lists
return d
# create 4 lists of c, h, cc (or cpp) and asm (or as) files in a directory
def find_src(dir_list):
# list number from file extension
di = { '.h': 0, '.c': 1, '.cc': 2, '.cpp': 2, '.asm': 3, '.as': 3 }
list = [ [], [], [], [] ]
for d in dir_list:
for f in listdir(join(mpir_dir, d)):
if f == '.svn':
continue # ignore SVN directories
if not isdir(f):
n, x = splitext(f) # split into name + extension
if x in di and not n in exclude_file_list:
list[di[x]] += [(n, x, d)] # if of the right type and is
for x in list: # not in the exclude list
x.sort(key=itemgetter(2, 0, 1)) # add it to appropriate list
return list
# scan the files in the input set and find the symbols
# defined in the files
fr_sym = compile(r'LEAF_PROC\s+(\w+)')
lf_sym = compile(r'FRAME_PROC\s+(\w+)')
wf_sym = compile(r'WIN64_GCC_PROC\s+(\w+)')
g3_sym = compile(r'global\s+___g(\w+)')
g2_sym = compile(r'global\s+__g(\w+)')
def get_symbols(setf, sym_dir):
for f in setf:
fn = join(mpir_dir, f[2], f[0] + f[1])
with open(fn, 'r') as inf:
lines = inf.readlines()
for l in lines:
m = fr_sym.search(l)
if m:
sym_dir[f] |= set((m.groups(1)[0],))
m = lf_sym.search(l)
if m:
sym_dir[f] |= set((m.groups(1)[0],))
m = wf_sym.search(l)
if m:
sym_dir[f] |= set((m.groups(1)[0],))
m = g3_sym.search(l)
if m:
sym_dir[f] |= set((m.groups(1)[0],))
else:
m = g2_sym.search(l)
if m:
sym_dir[f] |= set((m.groups(1)[0],))
def file_symbols(cf):
sym_dir = defaultdict(set)
for c in cf:
if c == 'fat':
continue
setf = set()
for f in cf[c][2] + cf[c][3]:
setf |= set((f,))
get_symbols(setf, sym_dir)
return sym_dir
def gen_have_list(c, sym_dir, out_dir):
set_sym2 = set()
for f in c[2]:
set_sym2 |= sym_dir[f]
set_sym3 = set()
for f in c[3]:
set_sym3 |= sym_dir[f]
c += [sorted(list(set_sym2)), sorted(list(set_sym3))]
fd = join(out_dir, c[4])
try:
makedirs(fd)
except IOError:
pass
with open(join(fd, 'cfg.h'), 'w') as outf:
for sym in sorted(set_sym2 | set_sym3):
print(sym, file=outf)
# print('/* assembler symbols also available in C files */', file=outf)
# for sym in sorted(set_sym2):
# print(sym, file=outf)
# print('/* assembler symbols not available in C files */', file=outf)
# for sym in sorted(set_sym3):
# print(sym, file=outf)
# generate Visual Studio 2010 IDE Filter
def filter_folders(cf_list, af_list, outf):
f1 = r''' <ItemGroup>
<Filter Include="Header Files" />
<Filter Include="Source Files" />
'''
f2 = r''' <Filter Include="Source Files\{0:s}" />
'''
f3 = r''' </ItemGroup>
'''
c_dirs = set(i[2] for i in cf_list)
a_dirs = set(i[2] for i in af_list)
if a_dirs:
c_dirs |= set((r'mpn\yasm',))
outf.write(f1)
for d in sorted(c_dirs):
if d:
t = d if d != r'mpn\generic' else r'mpn'
outf.write(f2.format(t))
outf.write(f3)
def filter_headers(hdr_list, relp, outf):
f1 = r''' <ItemGroup>
'''
f2 = r''' <ClInclude Include="{}{}">
<Filter>Header Files</Filter>
</ClInclude>
'''
f3 = r''' </ItemGroup>
'''
outf.write(f1)
for h in hdr_list:
outf.write(f2.format(relp, h))
outf.write(f3)
def filter_csrc(cf_list, relp, outf):
f1 = r''' <ItemGroup>
'''
f2 = r''' <ClCompile Include="{}{}">
<Filter>Source Files</Filter>
</ClCompile>
'''
f3 = r''' <ClCompile Include="{}{}\{}">
<Filter>Source Files\{}</Filter>
</ClCompile>
'''
f4 = r''' </ItemGroup>
'''
outf.write(f1)
for i in cf_list:
if not i[2]:
outf.write(f2.format(relp, i[0] + i[1]))
else:
t = 'mpn' if i[2].endswith('generic') else i[2]
outf.write(f3.format(relp, i[2], i[0] + i[1], t))
outf.write(f4)
def filter_asrc(af_list, relp, outf):
f1 = r''' <ItemGroup>
'''
f2 = r''' <YASM Include="{0:s}{2:s}\{1:s}">
<Filter>Source Files\mpn\yasm</Filter>
</YASM>
'''
f3 = r''' </ItemGroup>
'''
outf.write(f1)
for i in af_list:
outf.write(f2.format(relp, i[0] + i[1], i[2], i[2]))
outf.write(f3)
def gen_filter(name, hf_list, cf_list, af_list):
f1 = r'''<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
'''
f2 = r''' <ItemGroup>
<None Include="..\..\gmp-h.in" />
</ItemGroup>
</Project>
'''
fn = normpath(join(build_dir, name))
relp = split(relpath(mpir_dir, fn))[0] + '\\'
try:
makedirs(split(fn)[0])
except IOError:
pass
with open(fn, 'w') as outf:
outf.write(f1)
filter_folders(cf_list, af_list, outf)
if hf_list:
filter_headers(hf_list, relp, outf)
filter_csrc(cf_list, relp, outf)
if af_list:
filter_asrc(af_list, relp, outf)
outf.write(f2)
# generate vcxproj file
def vcx_proj_cfg(plat, outf):
f1 = r''' <ItemGroup Label="ProjectConfigurations">
'''
f2 = r''' <ProjectConfiguration Include="{1:s}|{0:s}">
<Configuration>{1:s}</Configuration>
<Platform>{0:s}</Platform>
</ProjectConfiguration>
'''
f3 = r''' </ItemGroup>
'''
outf.write(f1)
for pl in plat:
for conf in ('Release', 'Debug'):
outf.write(f2.format(pl, conf))
outf.write(f3)
def vcx_globals(name, guid, outf):
f1 = r''' <PropertyGroup Label="Globals">
<RootNamespace>{0:s}</RootNamespace>
<Keyword>Win32Proj</Keyword>
<ProjectGuid>{1:s}</ProjectGuid>
</PropertyGroup>
'''
outf.write(f1.format(name, guid))
def vcx_default_cpp_props(outf):
f1 = r''' <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
'''
outf.write(f1)
def vcx_library_type(plat, proj_type, outf):
f1 = r''' <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='{1:s}|{0:s}'" Label="Configuration">
<ConfigurationType>{2:s}</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
</PropertyGroup>
'''
for pl in plat:
for conf in ('Release', 'Debug'):
outf.write(f1.format(pl, conf, app_str[proj_type]))
def vcx_cpp_props(outf):
f1 = r''' <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
'''
outf.write(f1)
def vcx_extensions(outf):
f1 = r''' <ImportGroup Label="ExtensionSettings">
<Import Project="..\vsyasm.props" />
</ImportGroup>
'''
outf.write(f1)
def vcx_user_props(plat, outf):
f1 = r''' <ImportGroup Condition="'$(Configuration)|$(Platform)'=='{1:s}|{0:s}'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" />
</ImportGroup>
'''
for pl in plat:
for conf in ('Release', 'Debug'):
outf.write(f1.format(pl, conf))
def vcx_target_name_and_dirs(name, plat, proj_type, outf):
f1 = r''' <PropertyGroup>
<_ProjectFileVersion>10.0.21006.1</_ProjectFileVersion>
'''
f2 = r''' <TargetName Condition="'$(Configuration)|$(Platform)'=='{1:s}|{0:s}'">{2:s}</TargetName>
<IntDir Condition="'$(Configuration)|$(Platform)'=='{1:s}|{0:s}'">$(Platform)\$(Configuration)\</IntDir>
<OutDir Condition="'$(Configuration)|$(Platform)'=='{1:s}|{0:s}'">$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
'''
f3 = r''' </PropertyGroup>
'''
outf.write(f1)
for pl in plat:
for conf in ('Release', 'Debug'):
outf.write(f2.format(pl, conf, name))
outf.write(f3)
def yasm_options(plat, proj_type, outf):
f1 = r''' <YASM>
<Defines>{0:s}</Defines>
<IncludePaths>..\..\mpn\x86{1:s}w\</IncludePaths>
<Debug>true</Debug>
<ObjectFileName>$(IntDir)mpn\</ObjectFileName>
<ObjectFile>$(IntDir)mpn\</ObjectFile>
</YASM>
'''
outf.write(f1.format('DLL' if proj_type == dll_type else '', '' if plat == 'Win32' else '_64'))
def compiler_options(plat, proj_type, is_debug, outf):
f1 = r''' <ClCompile>
<Optimization>{0:s}</Optimization>
<IntrinsicFunctions>true</IntrinsicFunctions>
<AdditionalIncludeDirectories>..\..\</AdditionalIncludeDirectories>
<PreprocessorDefinitions>{1:s}%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreaded{2:s}</RuntimeLibrary>
<ProgramDataBaseFileName>$(TargetDir)$(TargetName).pdb</ProgramDataBaseFileName>
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
</ClCompile>
'''
if proj_type == app_type:
s1 = 'DEBUG;WIN32;_CONSOLE'
s2 = ''
if proj_type == dll_type:
s1 = 'DEBUG;WIN32;HAVE_CONFIG_H;MSC_BUILD_DLL;'
s2 = 'DLL'
elif proj_type == lib_type:
s1 = 'DEBUG;WIN32;_LIB;HAVE_CONFIG_H;'
s2 = ''
else:
pass
if plat == 'x64':
s1 = s1 + '_WIN64;'
if is_debug:
opt, defines, crt = 'Disabled', '_' + s1, 'Debug' + s2
else:
opt, defines, crt = 'Full', 'N' + s1, s2
outf.write(f1.format(opt, defines, crt))
def linker_options(outf):
f1 = r''' <Link>
<GenerateDebugInformation>true</GenerateDebugInformation>
<LargeAddressAware>true</LargeAddressAware>
</Link>
'''
outf.write(f1)
def vcx_pre_build(name, plat, outf):
f1 = r''' <PreBuildEvent>
<Command>cd ..\
prebuild {0:s} {1:s}
</Command>
</PreBuildEvent>
'''
outf.write(f1.format(name, plat))
def vcx_post_build(is_cpp, outf):
f1 = r'''
<PostBuildEvent>
<Command>cd ..\
postbuild "$(TargetPath)"
</Command>
</PostBuildEvent>
'''
outf.write(f1)
def vcx_tool_options(config, plat, proj_type, is_cpp, af_list, outf):
f1 = r''' <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='{1:s}|{0:s}'">
'''
f2 = r''' </ItemDefinitionGroup>
'''
for pl in plat:
for is_debug in (False, True):
outf.write(f1.format(pl, 'Debug' if is_debug else 'Release'))
if add_prebuild and not is_cpp:
vcx_pre_build(config, pl, outf)
if af_list:
yasm_options(plat, proj_type, outf)
compiler_options(pl, proj_type, is_debug, outf)
if proj_type != lib_type:
linker_options(outf)
vcx_post_build(is_cpp, outf)
outf.write(f2)
def vcx_hdr_items(hdr_list, relp, outf):
f1 = r''' <ItemGroup>
'''
f2 = r''' <ClInclude Include="{}{}" />
'''
f3 = r''' </ItemGroup>
'''
outf.write(f1)
for i in hdr_list:
outf.write(f2.format(relp, i))
outf.write(f3)
def vcx_c_items(cf_list, plat, relp, outf):
f1 = r''' <ItemGroup>
'''
f2 = r''' <ClCompile Include="{0:s}{1[0]:s}{1[1]:s}" />
'''
f3 = r''' <ClCompile Include="{0:s}{1[2]:s}\{1[0]:s}{1[1]:s}">
'''
f4 = r''' <ObjectFileName Condition="'$(Configuration)|$(Platform)'=='{0:s}|{1:s}'">$(IntDir){2:s}\</ObjectFileName>
'''
f5 = r''' </ClCompile>
'''
f6 = r''' </ItemGroup>
'''
outf.write(f1)
for nxd in cf_list:
if nxd[2] == '':
outf.write(f2.format(relp, nxd))
else:
outf.write(f3.format(relp, nxd))
for cf in ('Release', 'Debug'):
for pl in plat:
outf.write(f4.format(cf, pl, 'mpn' if nxd[2].endswith('generic') else nxd[2]))
outf.write(f5)
outf.write(f6)
def vcx_a_items(af_list, relp, outf):
f1 = r''' <ItemGroup>
'''
f2 = r''' <YASM Include="{0:s}{1[2]:s}\{1[0]:s}{1[1]:s}" />
'''
f3 = r''' </ItemGroup>
'''
outf.write(f1)
for nxd in af_list:
outf.write(f2.format(relp, nxd))
outf.write(f3)
def gen_vcxproj(proj_name, file_name, guid, config, plat, proj_type,
is_cpp, hf_list, cf_list, af_list):
f1 = r'''<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
'''
f2 = r''' <PropertyGroup Label="UserMacros" />
'''
f3 = r''' <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
'''
f4 = r''' <ImportGroup Label="ExtensionTargets">
<Import Project="..\vsyasm.targets" />
</ImportGroup>
'''
f5 = r'''<ItemGroup>
<None Include="..\..\gmp-h.in" />
</ItemGroup>
</Project>
'''
fn = normpath(join(build_dir, file_name))
relp = split(relpath(mpir_dir, fn))[0] + '\\'
with open(fn, 'w') as outf:
outf.write(f1)
vcx_proj_cfg(plat, outf)
vcx_globals(proj_name, guid, outf)
vcx_default_cpp_props(outf)
vcx_library_type(plat, proj_type, outf)
vcx_cpp_props(outf)
if af_list:
vcx_extensions(outf)
vcx_user_props(plat, outf)
outf.write(f2)
vcx_target_name_and_dirs(proj_name, plat, proj_type, outf)
vcx_tool_options(config, plat, proj_type, is_cpp, af_list, outf)
if hf_list:
vcx_hdr_items(hf_list, relp, outf)
vcx_c_items(cf_list, plat, relp, outf)
vcx_a_items(af_list, relp, outf)
outf.write(f3)
if af_list:
outf.write(f4)
outf.write(f5)
# add a project file to the solution
folder_guid = "{2150E333-8FDC-42A3-9474-1A3956D46DE8}"
vcxproject_guid = "{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}"
s_guid = r'\s*(\{\w{8}-\w{4}-\w{4}-\w{4}-\w{12}\})\s*'
s_name = r'\s*\"([a-zA-Z][-.\\_a-zA-Z0-9]*\s*)\"\s*'
re_guid = compile(r'\s*\"\s*' + s_guid + r'\"\s*')
re_proj = compile(r'Project\s*\(\s*\"' + s_guid + r'\"\)\s*=\s*'
+ s_name + r'\s*,\s*' + s_name + r'\s*,\s*\"' + s_guid + r'\"')
re_fmap = compile(r'\s*' + s_guid + r'\s*=\s*' + s_guid)
def read_solution_file(soln_name):
fd, pd, p2f = {}, {}, {}
solution_path = join(solution_dir, soln_name)
if exists(solution_path):
lines = open(solution_path).readlines()
for i, ln in enumerate(lines):
m = re_proj.search(ln)
if m:
if m.group(1) == folder_guid and m.group(2) == m.group(3):
fd[m.group(2)] = m.group(4)
elif m.group(3).endswith('.vcxproj') or m.group(3).endswith('.pyproj'):
pd[m.group(2)] = (m.group(1), m.group(3), m.group(4))
m = re_fmap.search(ln)
if m:
p2f[m.group(1)] = m.group(2)
return fd, pd, p2f
sol_1 = '''Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio 2013
VisualStudioVersion = 12.0.30626.0
MinimumVisualStudioVersion = 10.0.40219.1
'''
sol_2 = '''Project("{}") = "{}", "{}", "{}"
EndProject
'''
sol_3 = '''Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Win32 = Debug|Win32
Debug|x64 = Debug|x64
Release|Win32 = Release|Win32
Release|x64 = Release|x64
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(NestedProjects) = preSolution
'''
sol_4 = ''' {} = {}
'''
sol_5 = r''' EndGlobalSection
EndGlobal
'''
def write_solution_file(file_name, fd, pd, p2f):
with open(join(solution_dir, file_name), 'w') as outf:
outf.write(sol_1)
for f, g in fd.items():
outf.write(sol_2.format(folder_guid, f, f, g))
for f, (g1, pn, g2) in pd.items():
outf.write(sol_2.format(g1, f, pn, g2))
outf.write(sol_3)
for f, g in p2f.items():
outf.write(sol_4.format(f, g))
outf.write(sol_5)
def add_proj_to_sln(soln_name, soln_folder, proj_name, file_name, guid):
fd, pd, p2f = read_solution_file(soln_name)
if soln_folder:
if soln_folder in fd:
f_guid = fd[soln_folder]
else:
f_guid = '{' + str(uuid4()).upper() + '}'
fd[soln_folder] = f_guid
pd[proj_name] = (vcxproject_guid, file_name, guid)
if soln_folder:
p2f[guid] = f_guid
write_solution_file(soln_name, fd, pd, p2f)
# compile list of C files
t = find_src(c_directories)
c_hdr_list = t[0]
c_src_list = t[1]
if t[2] or t[3]:
print('found C++ and/or assembler file(s) in a C directory')
if t[2]:
for f in t[2]:
print(f)
print()
if t[3]:
for f in t[3]:
print(f)
print()
# compile list of C++ files
t = find_src(['cxx'])
cc_hdr_list = t[0]
cc_src_list = t[2]
if t[1] or t[3]:
print('found C and/or assembler file(s) in a C++ directory')
if t[1]:
for f in t[1]:
print(f)
print()
if t[3]:
for f in cc_src_list:
print(f)
print()
# compile list of C files in mpn\generic
t = find_src([r'mpn\generic'])
gc_hdr_list = t[0]
gc_src_list = t[1]
if t[2] or t[3]:
print('found C++ and/or assembler file(s) in a C directory')
if t[2]:
for f in gc_hdr_list:
print(f)
print()
if t[3]:
for f in gc_src_list:
print(f)
print()
# prepare the generic C build
mpn_gc = dict((('gc',[ gc_hdr_list, gc_src_list, [], [] ]),))
# prepare the list of Win32 builds
mpn_32 = find_asm(mpir_dir + 'mpn/x86w', gc_src_list)
syms32 = file_symbols(mpn_32)
del mpn_32['']
# prepare the list of x64 builds
mpn_64 = find_asm(mpir_dir + 'mpn/x86_64w', gc_src_list)
syms64 = file_symbols(mpn_64)
del mpn_64['']
if len(argv) != 1 and not int(argv[1]) & 2:
exit()
nd_gc = len(mpn_gc)
nd_32 = nd_gc + len(mpn_32)
nd_nd = nd_32 + len(mpn_64)
# now ask user which builds they wish to generate
while True:
cnt = 0
for v in sorted(mpn_gc):
cnt += 1
print('{0:2d}. {1:24s} '.format(cnt, v))
for v in sorted(mpn_32):
cnt += 1
print('{0:2d}. {1:24s} (win32)'.format(cnt, v))
for v in sorted(mpn_64):
cnt += 1
print('{0:2d}. {1:24s} (x64)'.format(cnt, v))
fs = 'Space separated list of builds (1..{0:d}, 0 to exit)? '
s = input(fs.format(cnt))
n_list = [int(c) for c in s.split()]
if 0 in n_list:
exit()
if any(n < 1 or n > nd_nd for n in n_list):
print('list contains invalid build numbers')
sleep(2)
else:
break
# multiple builds must each have their own prebuilds
if len(n_list) > 1:
add_prebuild = True
# now gnerate the requested builds
for n in n_list:
if 0 < n <= nd_gc:
config = sorted(mpn_gc)[n - 1]
mode = ('Win32', 'x64')
mpn_f = mpn_gc[config]
elif nd_gc < n <= nd_32:
config = sorted(mpn_32)[n - 1 - nd_gc]
if len(argv) == 1 or int(argv[1]) & 1:
gen_have_list(mpn_32[config], syms32, cfg_dir)
mode = ('Win32', )
mpn_f = mpn_32[config]
elif nd_32 < n <= nd_nd:
config = sorted(mpn_64)[n - 1 - nd_32]
if len(argv) == 1 or int(argv[1]) & 1:
gen_have_list(mpn_64[config], syms64, cfg_dir)
mode = ('x64', )
mpn_f = mpn_64[config]
else:
print('internal error')
exit()
if mode[0] == 'x64':
for l in mpn_f[1:]:
for t in l:
if t[0].startswith('preinv_'):
if 'x64' in mode and t[0] == 'preinv_divrem_1':
l.remove(t)
print(config, mode)
if not add_prebuild:
# generate mpir.h and gmp.h from gmp_h.in
gmp_h = '''
#ifdef _WIN32
# ifdef _WIN64
# define _LONG_LONG_LIMB 1
# define GMP_LIMB_BITS 64
# else
# define GMP_LIMB_BITS 32
# endif
# define __GMP_BITS_PER_MP_LIMB GMP_LIMB_BITS
# define SIZEOF_MP_LIMB_T (GMP_LIMB_BITS >> 3)
# define GMP_NAIL_BITS 0
#endif
'''
try:
lines = open(join(mpir_dir, 'gmp-h.in'), 'r').readlines()
except IOError:
print('error attempting to read from gmp_h.in')
exit()
try:
tfile = join(mpir_dir, 'tmp.h')
with open(tfile, 'w') as outf:
first = True
for line in lines:
if search(r'@\w+@', line):
if first:
first = False
outf.writelines(gmp_h)
else:
outf.writelines([line])
# write result to mpir.h but only overwrite the existing
# version if this version is different (don't trigger an
# unnecessary rebuild)
write_f(tfile, join(mpir_dir, 'mpir.h'))
write_f(tfile, join(mpir_dir, 'gmp.h'))
unlink(tfile)
except IOError:
print('error attempting to create mpir.h from gmp-h.in')
exit()
# generate config.h
try:
tfile = join(mpir_dir, 'tmp.h')
with open(tfile, 'w') as outf:
for i in sorted(mpn_f[5] + mpn_f[6]):
outf.writelines(['#define HAVE_NATIVE_{0:s} 1\n'.format(i)])
append_f(join(build_dir, 'cfg.h'), tfile)
write_f(tfile, join(mpir_dir, 'config.h'))
unlink(tfile)
except IOError:
print('error attempting to write to {0:s}'.format(tfile))
exit()
# generate longlong.h and copy gmp-mparam.h
try:
li_file = None
for i in mpn_f[0]:
if i[0] == 'longlong_inc':
li_file = join(mpir_dir, join(i[2], r'longlong_inc.h'))
if i[0] == 'gmp-mparam':
write_f(join(mpir_dir, join(i[2], 'gmp-mparam.h')),
join(mpir_dir, 'gmp-mparam.h'))
if not li_file or not exists(li_file):
print('error attempting to read {0:s}'.format(li_file))
exit()
tfile = join(mpir_dir, 'tmp.h')
write_f(join(mpir_dir, 'longlong_pre.h'), tfile)
append_f(li_file, tfile)
append_f(join(mpir_dir, 'longlong_post.h'), tfile)
write_f(tfile, join(mpir_dir, 'longlong.h'))
unlink(tfile)
except IOError:
print('error attempting to generate longlong.h')
exit()
# generate the vcxproj and the IDE filter files
# and add/replace project in the solution file
hf_list = ('config.h', 'gmp-impl.h', 'longlong.h', 'mpir.h', 'gmp-mparam.h')
af_list = sorted(mpn_f[2] + mpn_f[3])
# find the gmp-mparam.h file to be used
for name, ty, loc in mpn_f[0]:
if name == 'gmp-mparam':
loc = loc.replace('mpn\\x86w', '', 1)
loc = loc.replace('mpn\\x86_64w', '', 1)
if loc.startswith('\\'):
loc = loc[1:]
mp_dir = loc if loc else config
break
else:
mp_dir = config
proj_name = 'mpir'
cf = config.replace('\\', '_')
# set up DLL build
guid = '{' + str(uuid4()) + '}'
vcx_name = 'dll_mpir_' + cf
vcx_path = 'dll_mpir_' + cf + '\\' + vcx_name + '.vcxproj'
gen_filter(vcx_path + '.filters', hf_list,
c_src_list + cc_src_list + mpn_f[1], af_list)
gen_vcxproj(proj_name, vcx_path, guid, mp_dir, mode, dll_type,
False, hf_list, c_src_list + cc_src_list + mpn_f[1], af_list)
add_proj_to_sln(solution_name, '', vcx_name, vcx_path, guid)
# set up LIB build
guid = '{' + str(uuid4()) + '}'
vcx_name = 'lib_mpir_' + cf
vcx_path = 'lib_mpir_' + cf + '\\' + vcx_name + '.vcxproj'
gen_filter(vcx_path + '.filters', hf_list, c_src_list + mpn_f[1], af_list)
gen_vcxproj(proj_name, vcx_path, guid, mp_dir, mode, lib_type,
False, hf_list, c_src_list + mpn_f[1], af_list)
add_proj_to_sln(solution_name, '', vcx_name, vcx_path, guid)
# C++ library build
if add_cpp_lib:
guid = '{' + str(uuid4()) + '}'
proj_name = 'mpirxx'
mode = ('Win32', 'x64')
vcx_name = 'lib_mpir_cxx'
vcx_path = 'lib_mpir_cxx\\' + vcx_name + '.vcxproj'
th = hf_list + ('mpirxx.h',)
gen_filter(vcx_path + '.filters', th, cc_src_list, '')
gen_vcxproj(proj_name, vcx_path, guid, config, mode, lib_type, True, th, cc_src_list, '')
add_proj_to_sln('mpir.sln', '', vcx_name, vcx_path, guid)
# the following code is for diagnostic purposes only
if debug:
for x in sorted(mpn_f[0] + mpn_f[1]):
print(x)
print()
for x in sorted(mpn_f[2] + mpn_f[3]):
print(x)
print()
# mpn_files = dict()
# mpn_files.update(mpn_32)
# mpn_files.update(mpn_64)
for x in mpn_f[config]:
print(x)
if False:
print('1:')
for y in mpn_files[x][0]:
print(y)
print('2:')
for y in mpn_files[x][1]:
print(y)
print('3:')
for y in mpn_files[x][2]:
print(y)
print('4:')
for y in mpn_files[x][3]:
print(y)
print()
for y in sorted(x[2] + x[3]):
print(y)
print()
print()
if debug:
mpn_dirs = ('mpn/generic', 'mpn/x86_64w', 'mpn/x86w' )
# compile a list of files in directories in 'dl' under root 'r' with extension 'p'
def findf(r, dl, p):
l = []
for d in dl:
for root, dirs, files in walk(r + d):
relp = relpath(root, r) # path relative to mpir root directory
if '.svn' in dirs:
dirs.remove('.svn') # ignore SVN directories
if d == '' or root.endswith(build_vc):
for d in reversed(dirs): # don't scan build.vc10 subdirectories
dirs.remove(d)
for f in files:
if f.endswith(p):
l += [(tuple(relp.split('\\')), f)]
return sorted(l)
hdr_list = findf(mpir_dir, c_directories, '.h')
for x in hdr_list:
print(x)
print()
src_list = findf(mpir_dir, c_directories, '.c')
for x in src_list:
print(x)
print()
cpp_list = findf(mpir_dir, ['cpp'], '.cc')
for x in cpp_list:
print(x)
print()
gnc_list = findf(mpir_dir + 'mpn/', ['generic'], '.c')
for x in gnc_list:
print(x)
print()
w32_list = findf(mpir_dir + 'mpn/', ['x86w'], '.asm')
for x in w32_list:
print(x)
print()
x64_list = findf(mpir_dir + 'mpn/', ['x86_64w'], '.asm')
for x in x64_list:
print(x)
print()
nd = dict([])
for d, f in gnc_list:
n, x = splitext(f)
nd[n] = nd.get(n, []) + [(d, 'c')]
for d, f in x64_list:
n, x = splitext(f)
nd[n] = nd.get(n, []) + [(d, 'asm')]
for d, f in w32_list:
n, x = splitext(f)
nd[n] = nd.get(n, []) + [(d, 'asm')]
for x in nd:
print(x, nd[x])
|
omco/mpir
|
build.vc10/mpir_config.py
|
Python
|
lgpl-3.0
| 31,141
|
[
"Brian"
] |
5bccf05960d5fb0e520749775a57a27ad29967080dae9503cef939dc2d18be23
|
import os
import pyscf
from pyscf.lib.logger import perf_counter, process_clock
def setup_logger():
log = pyscf.lib.logger.Logger(verbose=5)
with open('/proc/cpuinfo') as f:
for line in f:
if 'model name' in line:
log.note(line[:-1])
break
with open('/proc/meminfo') as f:
log.note(f.readline()[:-1])
log.note('OMP_NUM_THREADS=%s\n', os.environ.get('OMP_NUM_THREADS', None))
return log
def get_cpu_timings():
return process_clock(), perf_counter()
|
sunqm/pyscf
|
examples/2-benchmark/benchmarking_utils.py
|
Python
|
apache-2.0
| 534
|
[
"PySCF"
] |
09ad38c5c241080519c67cbf42ab64aa7431bd81ebebdc1d3e7bc64b3e61dea0
|
#
# Copyright (c) 2015, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Greg Landrum based on code from Thomas Mueller
# August 2012
#
#pylint: disable=C0111,W0311
try:
from rdkit.Avalon import pyAvalonTools
except ImportError:
raise ImportError("This code requires the RDKit to be built with AvalonTools support")
from rdkit.Chem.MolKey import InchiInfo
from rdkit import RDConfig
from rdkit import Chem
from collections import namedtuple
import logging
import os,re,uuid,base64,hashlib,tempfile
class MolIdentifierException(Exception):
pass
class BadMoleculeException(Exception):
pass
MOL_KEY_VERSION = '1'
ERROR_DICT = dict(BAD_MOLECULE=1,
ALIAS_CONVERSION_FAILED=2,
TRANSFORMED=4,
FRAGMENTS_FOUND=8,
EITHER_WARNING=16,
STEREO_ERROR=32,
DUBIOUS_STEREO_REMOVED=64,
ATOM_CLASH=128,
ATOM_CHECK_FAILED=256,
SIZE_CHECK_FAILED=512,
RECHARGED=1024,
STEREO_FORCED_BAD=2048,
STEREO_TRANSFORMED=4096,
TEMPLATE_TRANSFORMED=8192,
INCHI_COMPUTATION_ERROR=65536,
RDKIT_CONVERSION_ERROR=131072,
INCHI_READWRITE_ERROR=262144,
NULL_MOL=524288)
INCHI_COMPUTATION_ERROR = ERROR_DICT['INCHI_COMPUTATION_ERROR']
RDKIT_CONVERSION_ERROR = ERROR_DICT['RDKIT_CONVERSION_ERROR']
INCHI_READWRITE_ERROR = ERROR_DICT['INCHI_READWRITE_ERROR']
NULL_MOL = ERROR_DICT['NULL_MOL']
BAD_SET = pyAvalonTools.StruChkResult.bad_set | INCHI_COMPUTATION_ERROR | RDKIT_CONVERSION_ERROR | INCHI_READWRITE_ERROR | NULL_MOL
GET_STEREO_RE = re.compile(r'^InChI=1S(.*?)/(t.*?)/m\d/s1(.*$)')
NULL_SMILES_RE = re.compile(r'^\s*$|^\s*NO_STRUCTURE\s*$', re.IGNORECASE)
PATTERN_NULL_MOL = r'^([\s0]+[1-9]+[\s]+V[\w]*)'
CHIRAL_POS = 12
T_NULL_MOL = (NULL_MOL, '') # the NULL mol result tuple
stereo_code_dict = {}
stereo_code_dict['DEFAULT'] = 0
stereo_code_dict['S_ACHIR'] = 1
stereo_code_dict['S_ABS'] = 2
stereo_code_dict['S_REL'] = 3
stereo_code_dict['S_PART'] = 4
stereo_code_dict['S_UNKN'] = 5
stereo_code_dict['S_ABS_ACHIR'] = 6
stereo_code_dict['R_ONE'] = 11
stereo_code_dict['R_REL'] = 12
stereo_code_dict['R_OTHER'] = 13
stereo_code_dict['MX_ENANT'] = 21
stereo_code_dict['MX_DIAST'] = 22
stereo_code_dict['MX_SP2'] = 31
stereo_code_dict['MX_DIAST_ABS'] = 32
stereo_code_dict['MX_DIAST_REL'] = 33
stereo_code_dict['OTHER'] = 100
stereo_code_dict['UNDEFINED'] = 200
def _fix_all(pat, sbt, my_string) :
try :
new_string = re.sub(pat, sbt, my_string)
return new_string
except :
return None
def _fix_line_ends(my_string) :
pat = '\r\n{0,1}'
sbt = '\n'
return _fix_all(pat, sbt, my_string)
def _fix_chemdraw_header(my_string) :
pat = '0V2000'
sbt = 'V2000'
return _fix_all(pat, sbt, my_string)
def _ctab_has_atoms(ctab_lines):
''' look at atom count position (line 4, characters 0:3)
Return True if the count is >0, False if 0.
Throw BadMoleculeException if there are no characters
at the required position or if they cannot be converted
to a positive integer
'''
try:
str_a_count = ctab_lines[3][0:3]
a_count = int(str_a_count)
if a_count < 0:
raise BadMoleculeException('Atom count negative')
if a_count > 0:
rval = True
else:
rval = False
except IndexError:
raise BadMoleculeException('Invalid molfile format')
except ValueError:
raise BadMoleculeException('Expected integer')
return rval
def _ctab_remove_chiral_flag(ctab_lines):
''' read the chiral flag (line 4, characters 12:15)
and set it to 0. Return True if it was 1, False if 0.
Throw BadMoleculeException if there are no characters
at the required position or if they where not 0 or 1
'''
try:
str_a_count = ctab_lines[3][12:15]
a_count = int(str_a_count)
if a_count == 0:
rval = False
elif a_count == 1:
rval = True
orig_line = ctab_lines[3]
ctab_lines[3] = orig_line[:CHIRAL_POS] + ' 0' + orig_line[CHIRAL_POS + 3:]
else:
raise BadMoleculeException('Expected chiral flag 0 or 1')
except IndexError:
raise BadMoleculeException('Invalid molfile format')
except ValueError:
raise BadMoleculeException('Expected integer, got {0}'.format(str_a_count))
return rval
__initCalled=False
def initStruchk(configDir=None,logFile=None):
global __initCalled
if configDir is None:
configDir=os.path.join(RDConfig.RDDataDir,'struchk')
if configDir[-1]!=os.path.sep:
configDir+=os.path.sep
if logFile is None:
fd = tempfile.NamedTemporaryFile(suffix='.log',delete=False)
fd.close()
logFile= fd.name
struchk_init = '''-tm
-ta {0}checkfgs.trn
-tm
-or
-ca {0}checkfgs.chk
-cc
-cl 3
-cs
-cn 999
-l {1}\n'''.format(configDir, logFile)
initRes=pyAvalonTools.InitializeCheckMol(struchk_init)
if initRes:
raise ValueError('bad result from InitializeCheckMol: '+str(initRes))
__initCalled=True
def CheckCTAB(ctab, isSmiles=True):
if not __initCalled:
initStruchk()
mol_str = ctab
if not mol_str:
raise BadMoleculeException('Unexpected blank or NULL molecule')
else:
mol_str = _fix_line_ends(mol_str)
mol_str = _fix_chemdraw_header(mol_str)
if isSmiles: # branch for NULL_MOL checks
if mol_str and NULL_SMILES_RE.match(mol_str):
rval = T_NULL_MOL
else:
rval = pyAvalonTools.CheckMoleculeString(mol_str, isSmiles)
else:
# decompose the ctab into lines
# the line terminator may be \n or \r\n, or even r'\n'
ctab_lines = mol_str.split('\n')
if len(ctab_lines) <= 3:
raise BadMoleculeException('Not enough lines in CTAB')
_ctab_remove_chiral_flag(ctab_lines)
if not _ctab_has_atoms(ctab_lines):
rval = T_NULL_MOL
else: # reassemble the ctab lines into one string.
mol_str = '\n'.join(ctab_lines)
rval = pyAvalonTools.CheckMoleculeString(mol_str, isSmiles)
return rval
InchiResult = namedtuple('InchiResult',['error','inchi','fixed_ctab'])
def GetInchiForCTAB(ctab):
"""
>>> from rdkit.Chem.MolKey import MolKey
>>> from rdkit.Avalon import pyAvalonTools
>>> res = MolKey.GetInchiForCTAB(pyAvalonTools.Generate2DCoords('c1cn[nH]c1C(Cl)Br',True))
>>> res.inchi
'InChI=1/C4H4BrClN2/c5-4(6)3-1-2-7-8-3/h1-2,4H,(H,7,8)/t4?/f/h8H'
>>> res = MolKey.GetInchiForCTAB(pyAvalonTools.Generate2DCoords('c1c[nH]nc1C(Cl)Br',True))
>>> res.inchi
'InChI=1/C4H4BrClN2/c5-4(6)3-1-2-7-8-3/h1-2,4H,(H,7,8)/t4?/f/h7H'
>>>
"""
inchi = None
ctab_str = ctab
(strucheck_err, fixed_mol) = CheckCTAB(ctab_str, False)
if strucheck_err & BAD_SET:
return (strucheck_err, None, fixed_mol)
conversion_err = 0
try:
r_mol = Chem.MolFromMolBlock(fixed_mol, sanitize=False)
if r_mol:
inchi = Chem.MolToInchi(r_mol, '/FixedH /SUU')
if not inchi:
conversion_err = INCHI_COMPUTATION_ERROR
else:
conversion_err = RDKIT_CONVERSION_ERROR
except Chem.InchiReadWriteError:
conversion_err = INCHI_READWRITE_ERROR
# keep warnings from strucheck
return InchiResult(strucheck_err | conversion_err, inchi, fixed_mol)
def _make_racemate_inchi(inchi):
""" Normalize the stereo information (t-layer) to one selected isomer. """
# set stereo type = 3 (racemate) for consistency
# reset inverted flag to m0 - not inverted
new_stereo = '/m0/s3/'
stereo_match = GET_STEREO_RE.match(inchi)
if stereo_match:
inchi = stereo_match.group(1) + new_stereo + stereo_match.group(2)
return inchi
def _get_identification_string(err, ctab, inchi, stereo_category=None, extra_stereo=None):
if err & NULL_MOL :
return _get_null_mol_identification_string(extra_stereo)
elif err & BAD_SET: # bad molecules get special key
return _get_bad_mol_identification_string(ctab, stereo_category, extra_stereo)
# make key string
pieces = []
if inchi:
pieces.append(inchi)
if not stereo_category:
raise MolIdentifierException('Stereo category may not be left undefined')
else:
pieces.append('ST=' + stereo_category)
if extra_stereo:
pieces.append('XTR=' + extra_stereo)
key_string = '/'.join(pieces)
return key_string
def _get_null_mol_identification_string(extra_stereo) :
key_string = str(uuid.uuid1 ())
return key_string
def _get_bad_mol_identification_string(ctab, stereo_category, extra_stereo):
pieces = []
ctab_str = ctab
if ctab_str: # make the ctab part of the key if available
ctab_str = _fix_line_ends(ctab_str)
ctab_str = _fix_chemdraw_header(ctab_str)
ctab_str = '\n'.join(ctab_str.split('\n')[3:])
pieces.append(ctab_str.replace('\n', r'\n')) # make a handy one-line string
else:
pass
if stereo_category: # add xtra info if available
key_string = 'ST={0}'.format(stereo_category)
pieces.append(key_string)
if extra_stereo: # add xtra info if available
key_string = 'XTR={0}'.format(extra_stereo)
pieces.append(key_string)
key_string = '/'.join(pieces)
return key_string
def _identify(err, ctab, inchi, stereo_category, extra_structure_desc=None):
""" Compute the molecule key based on the inchi string,
stereo category as well as extra structure
information """
key_string = _get_identification_string(err, ctab, inchi, stereo_category, extra_structure_desc)
if key_string:
return "{0}|{1}".format(MOL_KEY_VERSION,
base64.b64encode(hashlib.md5(key_string.encode('UTF-8')).digest()).decode()) #pylint: disable=E1101
else:
return None
def _get_chiral_identification_string(n_def, n_udf) :
assert n_def >= 0
assert n_udf >= 0
id_str = 'OTHER'
if n_def == 0 : # no defined stereocenters
if n_udf == 0 : # no undefined ones either
id_str = 'S_ACHIR' # -> achiral
elif n_udf == 1 : # one undefined, no defined
id_str = 'R_ONE' # -> racemate by convention
else: # several undefined, no defined
id_str = 'S_UNKN' # -> can't say anything based on the drawing
else: # some stereo defined
if n_udf == 0 : # fully specified stereo
id_str = 'S_ABS' # -> absolute stereo
else: # multiple possibilities
id_str = 'S_PART' # -> assume single compound (can usually be separated)
return id_str
def ErrorBitsToText(err):
" returns a list of error bit descriptions for the error code provided "
error_text_list = []
for err_dict_key in ERROR_DICT:
if (err & ERROR_DICT[err_dict_key]) > 0:
error_text_list.append(err_dict_key)
return error_text_list
MolKeyResult=namedtuple('MolKeyResult',['mol_key','error','inchi','fixed_ctab','stereo_code','stereo_comment'])
def GetKeyForCTAB(ctab,stereo_info=None,stereo_comment=None,logger=None):
"""
>>> from rdkit.Chem.MolKey import MolKey
>>> from rdkit.Avalon import pyAvalonTools
>>> res=MolKey.GetKeyForCTAB(pyAvalonTools.Generate2DCoords('c1ccccc1C(F)Cl',True))
>>> res.mol_key
'1|L7676nfGsSIU33wkx//NCg=='
>>> res.stereo_code
'R_ONE'
>>> res=MolKey.GetKeyForCTAB(pyAvalonTools.Generate2DCoords('c1ccccc1[C@H](F)Cl',True))
>>> res.mol_key
'1|Aj38EIxf13RuPDQG2A0UMw=='
>>> res.stereo_code
'S_ABS'
>>> res=MolKey.GetKeyForCTAB(pyAvalonTools.Generate2DCoords('c1ccccc1[C@@H](F)Cl',True))
>>> res.mol_key
'1|9ypfMrhxn1w0ncRooN5HXw=='
>>> res.stereo_code
'S_ABS'
>>> res=MolKey.GetKeyForCTAB(pyAvalonTools.Generate2DCoords('c1cccc(C(Br)Cl)c1[C@@H](F)Cl',True))
>>> res.mol_key
'1|c96jMSlbn7O9GW5d5uB9Mw=='
>>> res.stereo_code
'S_PART'
>>> res=MolKey.GetKeyForCTAB(pyAvalonTools.Generate2DCoords('c1cccc([C@H](Br)Cl)c1[C@@H](F)Cl',True))
>>> res.mol_key
'1|+B+GCEardrJteE8xzYdGLA=='
>>> res.stereo_code
'S_ABS'
>>> res=MolKey.GetKeyForCTAB(pyAvalonTools.Generate2DCoords('c1cccc(C(Br)Cl)c1C(F)Cl',True))
>>> res.mol_key
'1|5H9R3LvclagMXHp3Clrc/g=='
>>> res.stereo_code
'S_UNKN'
>>> res=MolKey.GetKeyForCTAB(pyAvalonTools.Generate2DCoords('c1cccc(C(Br)Cl)c1C(F)Cl',True),stereo_info='S_REL')
>>> res.mol_key
'1|cqKWVsUEY6QNpGCbDaDTYA=='
>>> res.stereo_code
'S_REL'
>>> res.inchi
'InChI=1/C8H6BrCl2F/c9-7(10)5-3-1-2-4-6(5)8(11)12/h1-4,7-8H/t7?,8?'
"""
if logger is None:
logger = logging
try:
err, inchi, fixed_mol = GetInchiForCTAB(ctab)
except BadMoleculeException:
logger.warn('Corrupt molecule substituting no-struct: --->\n{0}\n<----'.format(ctab))
err = NULL_MOL
key = _identify(err, '', '', None, None)
return MolKeyResult(key, err, '', '', None, None)
# read or estimate stereo category and/or extra structure description
stereo_category = None
extra_structure_desc = stereo_comment
if stereo_info: # check stereo_info field for coded stereo category and extra stereo info
info_flds = stereo_info.split(' ', 1)
code_fld = info_flds[0]
if code_fld in stereo_code_dict:
stereo_category = code_fld
if (not stereo_comment) and len(info_flds) > 1:
extra_structure_desc = info_flds[1].strip()
else:
logger.warn('stereo code {0} not recognized. Using default value for ctab.'.format(code_fld))
if not (err & BAD_SET):
(n_stereo, n_undef_stereo, is_meso, dummy) = InchiInfo.InchiInfo(inchi).get_sp3_stereo()['main']['non-isotopic']
if stereo_category == None or stereo_category == 'DEFAULT' : # compute if not set
stereo_category = _get_chiral_identification_string(n_stereo - n_undef_stereo,
n_undef_stereo)
else:
raise NotImplementedError("currently cannot generate correct keys for molecules with struchk errors")
key = _identify(err, fixed_mol, inchi, stereo_category, extra_structure_desc)
return MolKeyResult(key, err, inchi, fixed_mol, stereo_category, extra_structure_desc)
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
|
adalke/rdkit
|
rdkit/Chem/MolKey/MolKey.py
|
Python
|
bsd-3-clause
| 16,577
|
[
"RDKit"
] |
b313bb98e3a397335c835f2ddbb065d0167443b21cb59aef3140a66f52980c87
|
#!/usr/bin/env python
""" This script instantiate a DFC client against a given service,
and hammers it with read request (listDirectory) for a given time.
It produces two files : time.txt and clock.txt which contain time measurement,
using time.time and time.clock (see respective doc)
It assumes that the DB has been filled with the scripts in generateDB
Tunable parameters:
* maxDuration : time it will run. Cannot be too long, otherwise job
is killed because staled
* port: list of ports on which we can find a service (assumes all the service running on one machine)
* hostname: name of the host hosting the service
* readDepth: depth of the path when reading
The depths are to be put in relation with the depths you used to generate the db
"""
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
import os
import random
import time
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
port = random.choice([9196, 9197,9198, 9199])
hostname = 'yourmachine.somewhere.something'
servAddress = 'dips://%s:%s/DataManagement/FileCatalog' % ( hostname, port )
maxDuration = 1800 # 30mn
fc = FileCatalogClient(servAddress)
# lfc size = 9, huge db small req = 12, huge db big = 6
readDepth = 12
f = open('time.txt', 'w')
f2 = open('clock.txt', 'w')
f.write("QueryStart\tQueryEnd\tQueryTime\textra(port %s)\n"%port)
f2.write("QueryStart\tQueryEnd\tQueryClock\textra(port %s)\n"%port)
start = time.time()
done = False
while not done:
# Between 0 and 3 because in generate we have 4 subdirs per dir. Adapt :-)
rndTab = [random.randint( 0, 3 ) for i in xrange( readDepth ) ]
dirPath = '/' + '/'.join(map(str,rndTab))
before = time.time()
beforeC = time.clock()
res = fc.listDirectory(dirPath)
afterC = time.clock()
after = time.time()
queryTime = after - before
queryTimeC = afterC - beforeC
if not res['OK']:
extra = res['Message']
else:
out = res['Value']['Successful'][dirPath]
extra = "%s %s %s"%(dirPath, len(out['Files']), len(out['SubDirs']))
f.write("%s\t%s\t%s\t%s\n"%(before, after, queryTime, extra))
f.flush()
os.fsync(f)
f2.write("%s\t%s\t%s\t%s\n"%(beforeC, afterC, queryTimeC, extra))
f2.flush()
os.fsync(f2)
if (time.time() - start > maxDuration):
done = True
f.close()
f2.close()
|
Andrew-McNab-UK/DIRAC
|
tests/Performance/DFCPerformance/readPerf.py
|
Python
|
gpl-3.0
| 2,365
|
[
"DIRAC"
] |
cda0a516f797be070891ae72d05e02d67319b1bfb07de2748dfb92f748650756
|
# dump stokes-netcdf
# Copyright (C) 2007-2008 Kengo Ichiki <kichiki@users.sourceforge.net>
# $Id: stncdump.py,v 1.11 2008/06/03 02:55:55 kichiki Exp $
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import sys
#sys.path.append('/somewhere/ryuon/stokes/python')
import stokes
def usage():
print '$Id: stncdump.py,v 1.11 2008/06/03 02:55:55 kichiki Exp $'
print 'USAGE:'
print '\t-f or --file : stokes-nc-file'
print '\t-line : all particles are in a single line for each time\n'\
'\t\t (default: one particle per line)'
print '\t-step n : write the config at step n\n'\
'\t\t n starts from 1 and ends at 1001 for 1000-step run.\n'
print '\t-com x y z : shift the COM to (x,y,z) with -step option.\n'
sys.exit ()
def main():
filename = ''
flag_line = 0
step = -1
flag_com = 0
x0 = 0.0
y0 = 0.0
z0 = 0.0
i = 1
while i < len(sys.argv):
if sys.argv[i] == '-f' or sys.argv[i] == '--file':
filename = sys.argv[i+1]
i += 2
elif sys.argv[i] == '-line':
flag_line = 1
i += 1
elif sys.argv[i] == '-step':
step = int(sys.argv[i+1])
step -= 1
i += 2
elif sys.argv[i] == '-com':
flag_com = 1
x0 = float(sys.argv[i+1])
y0 = float(sys.argv[i+2])
z0 = float(sys.argv[i+3])
i += 4
else:
usage()
if filename == '': usage()
nc = stokes.stokes_nc_open (filename)
# pos[] : center of particles
pos = stokes.darray(nc.np * nc.nvec)
# q[] : quaternion
if nc.flag_q != 0:
q = stokes.darray(nc.np * nc.nquat)
else:
q = []
# lattice
lattice = stokes.darray(3)
stokes.stokes_nc_get_array1d (nc, 'l', lattice)
# extract the config at the step
if step >= 0:
if step > nc.ntime:
print 'out of the range %d <= %d'%(step, nc.ntime)
sys.exit(1)
# read the config at the step
t = stokes.stokes_nc_get_time_step (nc, step)
stokes.stokes_nc_get_data (nc, "x", step, pos)
if nc.flag_q != 0:
stokes.stokes_nc_get_data (nc, "q", step, q)
comx = 0.0
comy = 0.0
comz = 0.0
if flag_com != 0:
for i in range(nc.np):
comx += pos[i*3]
comy += pos[i*3+1]
comz += pos[i*3+2]
comx /= float(nc.np)
comy /= float(nc.np)
comz /= float(nc.np)
# print the config
# print arguments as a comment
print '; config at %d step of %s, generated by'%(step+1, filename)
print '; ',
for i in range(len(sys.argv)):
print '%s'%(sys.argv[i]),
print ''
print '; at time %f'%(t)
print '(define x #('
for i in range(nc.np):
print ' %f %f %f ; %d'%(pos[i*3] - comx + x0,
pos[i*3+1] - comy + y0,
pos[i*3+2] - comz + z0,
i)
print '))'
if nc.flag_q != 0:
stokes.stokes_nc_get_data (nc, "q", step, q)
print '(define q #('
for i in range(nc.np):
print ' %f %f %f %f ; %d'\
%(q[i*4],q[i*4+1],q[i*4+2],q[i*4+3],i)
print '))'
sys.exit(1)
# done!
if flag_line == 0:
# print some general informations
stokes.stokes_nc_print_actives(nc, stokes.get_stdout())
print ''
print 'ntime = %d'%(nc.ntime)
print ''
# imposed flows
if nc.flag_ui0 == 1:
ui0 = stokes.darray(3)
stokes.stokes_nc_get_array1d (nc, "Ui0", ui0)
print 'ui0 = %f %f %f'%(ui0[0], ui0[1], ui0[2])
if nc.flag_oi0 == 1:
oi0 = stokes.darray(3)
stokes.stokes_nc_get_array1d (nc, "Oi0", oi0)
print 'oi0 = %f %f %f'%(oi0[0], oi0[1], oi0[2])
if nc.flag_ei0 == 1:
ei0 = stokes.darray(5)
stokes.stokes_nc_get_array1d (nc, "Ei0", ei0)
print 'ei0 = %f %f %f %f %f'\
%(ei0[0], ei0[1], ei0[2], ei0[3], ei0[4])
print ''
print 'lattice %f %f %f\n'%(lattice[0], lattice[1], lattice[2])
# print the infos about the output format
if flag_line == 0:
if nc.flag_q != 0:
print '# t, i, x, y, z, q1, q2, q3, q4'
else:
print '# t, i, x, y, z'
else:
if nc.flag_q != 0:
print '# t, x, y, z, q1, q2, q3, q4 (for particle 0),'\
' ... upto particle %d'%(nc.np)
else:
print '# t, x, y, z (for particle 0), x, y, z (for 1),'\
' ... upto particle %d'%(nc.np)
for i in range(nc.ntime):
t = stokes.stokes_nc_get_time_step (nc, i)
stokes.stokes_nc_get_data (nc, "x", i, pos)
if nc.flag_q != 0:
stokes.stokes_nc_get_data (nc, "q", i, q)
if flag_line == 0:
for j in range(nc.np):
x = pos[j*3]
y = pos[j*3+1]
z = pos[j*3+2]
if nc.flag_q != 0:
print '%f %d %f %f %f %f %f %f %f'\
%(t, j, x, y, z, q[0], q[1], q[2], q[3])
else:
print '%f %d %f %f %f'%(t, j, x, y, z)
else:
print t,
for j in range(nc.np):
x = pos[j*3]
y = pos[j*3+1]
z = pos[j*3+2]
print x, y, z,
if nc.flag_q != 0:
print q[0], q[1], q[2], q[3],
print ''
if __name__ == "__main__":
main()
|
kichiki/stokes
|
python/stncdump.py
|
Python
|
gpl-2.0
| 6,474
|
[
"NetCDF"
] |
ba99f198be2e96c64962f3034fd0d4b878270fbde9422e3b779ce96ee55f5f26
|
import numpy as np
import Grid
import pf_dynamic_sph
import os
import sys
from timeit import default_timer as timer
if __name__ == "__main__":
start = timer()
# ---- INITIALIZE GRIDS ----
(Lx, Ly, Lz) = (21, 21, 21)
(dx, dy, dz) = (0.375, 0.375, 0.375)
xgrid = Grid.Grid('CARTESIAN_3D')
xgrid.initArray('x', -Lx, Lx, dx); xgrid.initArray('y', -Ly, Ly, dy); xgrid.initArray('z', -Lz, Lz, dz)
NGridPoints_cart = (1 + 2 * Lx / dx) * (1 + 2 * Ly / dy) * (1 + 2 * Lz / dz)
NGridPoints_desired = (1 + 2 * Lx / dx) * (1 + 2 * Lz / dz)
Ntheta = 50
Nk = np.ceil(NGridPoints_desired / Ntheta)
theta_max = np.pi
thetaArray, dtheta = np.linspace(0, theta_max, Ntheta, retstep=True)
# k_max = np.sqrt((np.pi / dx)**2 + (np.pi / dy)**2 + (np.pi / dz)**2)
k_max = ((2 * np.pi / dx)**3 / (4 * np.pi / 3))**(1 / 3)
k_min = 1e-5
kArray, dk = np.linspace(k_min, k_max, Nk, retstep=True)
if dk < k_min:
print('k ARRAY GENERATION ERROR')
kgrid = Grid.Grid("SPHERICAL_2D")
kgrid.initArray_premade('k', kArray)
kgrid.initArray_premade('th', thetaArray)
# for imdyn evolution
tMax = 1e5
# tMax = 6e4
CoarseGrainRate = 100
dt = 10
tgrid = np.arange(0, tMax + dt, dt)
gParams = [xgrid, kgrid, tgrid]
NGridPoints = kgrid.size()
print('Total time steps: {0}'.format(tgrid.size))
print('UV cutoff: {0}'.format(k_max))
print('dk: {0}'.format(dk))
print('dtheta: {0}'.format(dtheta))
print('NGridPoints: {0}'.format(NGridPoints))
# Basic parameters
mI = 1
# mI = 10
mB = 1
n0 = 1
gBB = (4 * np.pi / mB) * 0.05
sParams = [mI, mB, n0, gBB]
# Toggle parameters
toggleDict = {'Location': 'cluster', 'Dynamics': 'imaginary', 'Coupling': 'twophonon', 'Grid': 'spherical', 'Longtime': 'false', 'CoarseGrainRate': CoarseGrainRate}
# ---- SET OUTPUT DATA FOLDER ----
if toggleDict['Location'] == 'home':
datapath = '/home/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}'.format(NGridPoints_cart, mI / mB)
elif toggleDict['Location'] == 'work':
datapath = '/media/kis/Storage/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}'.format(NGridPoints_cart, mI / mB)
elif toggleDict['Location'] == 'cluster':
datapath = '/n/scratchlfs02/demler_lab/kis/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}'.format(NGridPoints_cart, mI / mB)
if toggleDict['Dynamics'] == 'real':
innerdatapath = datapath + '/redyn'
elif toggleDict['Dynamics'] == 'imaginary':
innerdatapath = datapath + '/imdyn'
if toggleDict['Grid'] == 'cartesian':
innerdatapath = innerdatapath + '_cart'
elif toggleDict['Grid'] == 'spherical':
innerdatapath = innerdatapath + '_spherical'
if toggleDict['Coupling'] == 'frohlich':
innerdatapath = innerdatapath + '_froh'
elif toggleDict['Coupling'] == 'twophonon':
innerdatapath = innerdatapath
if toggleDict['Longtime'] == 'true':
innerdatapath = innerdatapath + '_longtime'
elif toggleDict['Longtime'] == 'false':
innerdatapath = innerdatapath
# if os.path.isdir(datapath[0:-14]) is False:
# os.mkdir(datapath[0:-14])
# if os.path.isdir(datapath) is False:
# os.mkdir(datapath)
# if os.path.isdir(innerdatapath) is False:
# os.mkdir(innerdatapath)
# # # ---- SINGLE FUNCTION RUN ----
# runstart = timer()
# P = 1.4
# aIBi = -0.1
# print(innerdatapath)
# # aSi = aSi_grid(kgrid, 0, mI, mB, n0, gBB); aIBi = aIBi - aSi
# # print(aIBi)
# cParams = [P, aIBi]
# dynsph_ds = pf_dynamic_sph.quenchDynamics_DataGeneration(cParams, gParams, sParams, toggleDict)
# dynsph_ds.to_netcdf(innerdatapath + '/P_{:.3f}_aIBi_{:.2f}.nc'.format(P, aIBi))
# end = timer()
# print('Time: {:.2f}'.format(end - runstart))
# # # ---- SINGLE FUNCTION RUN GAUSSIAN COMPARISON ----
# runstart = timer()
# thetaArray = np.linspace(0, np.pi, 1e3)
# kArray = np.arange(0.1, 5.1, 0.1)
# kgrid = Grid.Grid("SPHERICAL_2D")
# kgrid.initArray_premade('k', kArray)
# kgrid.initArray_premade('th', thetaArray)
# # print('{:.2E}'.format(kgrid.size()))
# tMax = 20
# dt = 0.1
# tgrid = np.arange(0, tMax + dt, dt)
# gParams = [xgrid, kgrid, tgrid]
# mI = 1e9
# mB = 1
# n0 = 1
# gBB = (4 * np.pi / mB) * 0.065
# sParams = [mI, mB, n0, gBB]
# P = 0.05
# aIBi = -1.2
# cParams = [P, aIBi]
# datapath = datapath[0:-22] + '{:.2E}/massRatio=inf'.format(kgrid.size())
# if toggleDict['Dynamics'] == 'real':
# innerdatapath = datapath + '/redyn_spherical'
# filepath = innerdatapath + '/cs_mfrt_aIBi_{:.2f}.npy'.format(aIBi)
# elif toggleDict['Dynamics'] == 'imaginary':
# innerdatapath = datapath + '/imdyn_spherical'
# filepath = innerdatapath + '/cs_mfit_aIBi_{:.2f}.npy'.format(aIBi)
# if os.path.isdir(datapath) is False:
# os.mkdir(datapath)
# if os.path.isdir(innerdatapath) is False:
# os.mkdir(innerdatapath)
# dynsph_ds = pf_dynamic_sph.quenchDynamics_DataGeneration(cParams, gParams, sParams, toggleDict)
# # dynsph_ds.to_netcdf(innerdatapath + '/P_{:.3f}_aIBi_{:.2f}.nc'.format(P, aIBi))
# energy_vec = np.zeros(tgrid.size)
# CSAmp_ds = dynsph_ds['Real_CSAmp'] + 1j * dynsph_ds['Imag_CSAmp']
# for ind, t in enumerate(tgrid):
# CSAmp = CSAmp_ds.sel(t=t).values
# energy_vec[ind] = pf_dynamic_sph.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# NB_Vec = dynsph_ds['Nph'].values
# Zfactor_Vec = np.abs((dynsph_ds['Real_DynOv'] + 1j * dynsph_ds['Imag_DynOv']).values)
# tVec = tgrid
# Params = [aIBi, mB, n0, gBB]
# data = [Params, tVec, NB_Vec, Zfactor_Vec, energy_vec]
# np.save(filepath, data)
# end = timer()
# print('Time: {:.2f}'.format(end - runstart))
# ---- SET CPARAMS (RANGE OVER MULTIPLE aIBi, P VALUES) ----
cParams_List = []
aIBi_Vals = np.array([-15.0, -12.5, -10.0, -9.0, -8.0, -7.0, -5.0, -3.5, -2.0, -1.0, -0.75, -0.5, -0.1]) # used by many plots (spherical)
P_Vals = np.concatenate((np.linspace(0.1, 0.8, 10, endpoint=False), np.linspace(0.8, 4.0, 40, endpoint=False), np.linspace(4.0, 5.0, 2)))
# P_Vals = np.concatenate((np.array([0.1, 0.4, 0.6]), np.linspace(0.8, 2.8, 20), np.linspace(3.0, 5.0, 3)))
# P_Vals = np.concatenate((np.linspace(0.1, 7.0, 16, endpoint=False), np.linspace(7.0, 10.0, 15), np.linspace(11.0, 15.0, 3)))
for ind, aIBi in enumerate(aIBi_Vals):
for P in P_Vals:
cParams_List.append([P, aIBi])
print(len(cParams_List))
# CANCELLED cParams_List[63-127]
# missedVals = [6, 7, 13, 14, 19, 24, 25, 26, 27, 30, 31, 32, 33, 34, 44, 45, 46, 47, 56, 57, 58, 59, 65, 66, 67, 68, 70, 71, 74, 75, 76]
# cParams_List = [cParams_List[i] for i in missedVals]
# print(len(cParams_List))
# print(P_Vals)
# # ---- COMPUTE DATA ON COMPUTER ----
# print(innerdatapath)
# runstart = timer()
# for ind, cParams in enumerate(cParams_List):
# loopstart = timer()
# [P, aIBi] = cParams
# dynsph_ds = pf_dynamic_sph.quenchDynamics_DataGeneration(cParams, gParams, sParams, toggleDict)
# # dynsph_ds.to_netcdf(innerdatapath + '/P_{:.3f}_aIBi_{:.2f}.nc'.format(P, aIBi))
# loopend = timer()
# print('Index: {:d}, P: {:.2f}, aIBi: {:.2f} Time: {:.2f}'.format(ind, P, aIBi, loopend - loopstart))
# end = timer()
# print('Total Time: {:.2f}'.format(end - runstart))
# ---- COMPUTE DATA ON CLUSTER ----
runstart = timer()
taskCount = int(os.getenv('SLURM_ARRAY_TASK_COUNT'))
taskID = int(os.getenv('SLURM_ARRAY_TASK_ID'))
if(taskCount != len(cParams_List)):
print('ERROR: TASK COUNT MISMATCH')
print(taskCount, len(cParams_List))
P = float('nan')
aIBi = float('nan')
sys.exit()
else:
cParams = cParams_List[taskID]
[P, aIBi] = cParams
dynsph_ds = pf_dynamic_sph.quenchDynamics_DataGeneration(cParams, gParams, sParams, toggleDict)
dynsph_ds.to_netcdf(innerdatapath + '/P_{:.3f}_aIBi_{:.2f}.nc'.format(P, aIBi))
end = timer()
print('Task ID: {:d}, P: {:.2f}, aIBi: {:.2f} Time: {:.2f}'.format(taskID, P, aIBi, end - runstart))
|
kseetharam/genPolaron
|
datagen_qdynamics_imdyn_sph.py
|
Python
|
mit
| 8,468
|
[
"Gaussian"
] |
8a466b74517e26672fe087de4a2c33e535d269a5f2665bc3f7f28fcbe4965803
|
# -*- coding: UTF-8 -*-
# Copyright 2017 Luc Saffre
#
# License: BSD (see file COPYING for details)
from lino.api import rt, _
from lino.utils.mldbc import babel_named as named
def objects():
Group = rt.models.groups.Group
User = rt.models.users.User
UserTypes = rt.models.users.UserTypes
yield named(Group, _("Hitchhiker's Guide to the Galaxy"))
yield named(Group, _("Star Trek"))
yield named(Group, _("Harry Potter"))
yield User(username="andy", user_type=UserTypes.user)
yield User(username="bert", user_type=UserTypes.user)
yield User(username="chloe", user_type=UserTypes.user)
|
khchine5/xl
|
lino_xl/lib/groups/fixtures/demo.py
|
Python
|
bsd-2-clause
| 625
|
[
"Galaxy"
] |
2b879edf5eb7da37a9fc53079377332d55767d639ae7e688798bec45afe0cffb
|
import numpy as np
from layers import *
from connections import *
from neurons import *
from util import *
from train import *
from cost import *
class NetworkConstructor(object):
def set_input_layer(self, NeuronLayer):
self.InputLayer = NeuronLayer
def set_output_layer(self, NeuronLayer):
self.OutputLayer = NeuronLayer
def add_neuron_layer(self, NeuronLayer):
self.NeuronLayers.append(NeuronLayer)
for neuron in NeuronLayer.neurons.reshape(NeuronLayer.size):
if neuron.Bias.trainable:
self.Weights.append(neuron.Bias)
def add_connection_layer(self, ConnectionLayer):
self.ConnectionLayers.append(ConnectionLayer)
for connection in ConnectionLayer.connections:
if connection.Weight not in self.Weights:
if connection.Weight.trainable:
self.Weights.append(connection.Weight)
def auto_add_layer_fullyconnected(self, NeuronLayer):
self.add_neuron_layer(NeuronLayer)
if len(self.NeuronLayers)==1:
self.set_input_layer(NeuronLayer)
else:
self.add_connection_layer(ConnectionLayer(self.NeuronLayers[-2], \
self.NeuronLayers[-1], connection_type='full'))
self.set_output_layer(NeuronLayer)
def auto_add_layer_convolution(self, NeuronLayer):
self.add_neuron_layer(NeuronLayer)
if len(self.NeuronLayers)==1:
self.set_input_layer(NeuronLayer)
else:
self.add_connection_layer(ConvolutionConnectionLayer(self.NeuronLayers[-2], \
self.NeuronLayers[-1]))
self.set_output_layer(NeuronLayer)
def auto_add_layer_maxpooling(self, NeuronLayer):
self.add_neuron_layer(NeuronLayer)
if len(self.NeuronLayers)==1:
self.set_input_layer(NeuronLayer)
else:
self.add_connection_layer(MaxPoolingConnectionLayer(self.NeuronLayers[-2], \
self.NeuronLayers[-1]))
self.set_output_layer(NeuronLayer)
class FeedForwardNetwork(NetworkConstructor):
def __init__(self):
self.NeuronLayers = []
self.ConnectionLayers = []
self.Weights = []
self.InputLayer = None
self.OutputLayer = None
self.trained = False
self.Cost = Cost()
def infer(self, input_array):
self.InputLayer.activate(input_array)
for connection_layer in self.ConnectionLayers:
connection_layer.forward_pass()
output_array = self.OutputLayer.get_activations()
return(output_array)
def backpropagate_errors(self, true_output_array):
cost_wd = self.Cost.cost_derivative(true_output_array, self.OutputLayer.get_activations())
self.OutputLayer.set_output_errors(cost_wd)
self.OutputLayer.backward_pass()
for connection_layer in list(reversed(self.ConnectionLayers)):
connection_layer.backward_pass()
def update(self, lr):
for weight in self.Weights:
weight.update(lr)
def study(self, input_array, true_output_array):
self.infer(input_array)
self.backpropagate_errors(true_output_array)
def learn(self, input_array, true_output_array, lr=1.):
self.study(input_array, true_output_array)
self.update(lr)
class DoubleFeedForwardNetwork(NetworkConstructor):
def __init__(self):
self.NeuronLayers = []
self.ConnectionLayers = []
self.Weights = []
self.InputLayer1 = None
self.InputLayer2 = None
self.OutputLayer = None
self.trained = False
self.Cost = Cost()
def infer(self, input_array):
input_array1 = input_array[0]
input_array2 = input_array[1]
self.InputLayer1.activate(input_array1)
self.InputLayer2.activate(input_array2)
for connection_layer in self.ConnectionLayers:
connection_layer.forward_pass()
output_array = self.OutputLayer.get_activations()
return(output_array)
def backpropagate_errors(self, true_output_array):
cost_wd = self.Cost.cost_derivative(true_output_array, self.OutputLayer.get_activations())
self.OutputLayer.set_output_errors(cost_wd)
self.OutputLayer.backward_pass()
for connection_layer in list(reversed(self.ConnectionLayers)):
connection_layer.backward_pass()
def update(self, lr):
for weight in self.Weights:
weight.update(lr)
def study(self, input_array, true_output_array):
self.infer(input_array)
self.backpropagate_errors(true_output_array)
def learn(self, input_array, true_output_array, lr=1.):
self.study(input_array, true_output_array)
self.update(lr)
|
yohanyee/simple-neural-net
|
classes/construct.py
|
Python
|
mit
| 4,902
|
[
"NEURON"
] |
55a0e8534dbf851c4634f8afaf336ba205313ca922fba2112cc203616d15ec10
|
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy.special import gamma
from scipy.stats import norm
from sklearn.neighbors import BallTree
from astroML.density_estimation import GaussianMixture1D
from astroML.plotting import plot_mcmc,hist
# hack to fix an import issue in older versions of pymc
import scipy
scipy.derivative = scipy.misc.derivative
import pymc
def get_logp(S, model):
"""compute log(p) given a pyMC model"""
M = pymc.MAP(model)
traces = np.array([S.trace(s)[:] for s in S.stochastics])
logp = np.zeros(traces.shape[1])
for i in range(len(logp)):
logp[i] = -M.func(traces[:, i])
return logp
def estimate_bayes_factor(traces, logp, r=0.05, return_list=False):
"""Estimate the bayes factor using the local density of points"""
D, N = traces.shape
# compute volume of a D-dimensional sphere of radius r
Vr = np.pi ** (0.5 * D) / gamma(0.5 * D + 1) * (r ** D)
# use neighbor count within r as a density estimator
bt = BallTree(traces.T)
count = bt.query_radius(traces.T, r=r, count_only=True)
BF = logp + np.log(N) + np.log(Vr) - np.log(count)
if return_list:
return BF
else:
p25, p50, p75 = np.percentile(BF, [25, 50, 75])
return p50, 0.7413 * (p75 - p25)
#------------------------------------------------------------
# Generate the data
mu1_in = 0
sigma1_in = 0.3
mu2_in = 1
sigma2_in = 0.3#1
ratio_in = 1.5
N = 200
np.random.seed(10)
gm = GaussianMixture1D([mu1_in, mu2_in],
[sigma1_in, sigma2_in],
[ratio_in, 1])
x_sample = gm.sample(N)
#------------------------------------------------------------
# Set up pyMC model: single gaussian
# 2 parameters: (mu, sigma)
M1_mu = pymc.Uniform('M1_mu', -5, 5, value=0)
M1_log_sigma = pymc.Uniform('M1_log_sigma', -10, 10, value=0)
@pymc.deterministic
def M1_sigma(M1_log_sigma=M1_log_sigma):
return np.exp(M1_log_sigma)
@pymc.deterministic
def M1_tau(M1_sigma=M1_sigma):
return 1. / M1_sigma ** 2
M1 = pymc.Normal('M1', M1_mu, M1_tau, observed=True, value=x_sample)
model1 = dict(M1_mu=M1_mu, M1_log_sigma=M1_log_sigma,
M1_sigma=M1_sigma,
M1_tau=M1_tau, M1=M1)
#------------------------------------------------------------
# Set up pyMC model: double gaussian
# 5 parameters: (mu1, mu2, sigma1, sigma2, ratio)
def doublegauss_like(x, mu1, mu2, sigma1, sigma2, ratio):
"""log-likelihood for double gaussian"""
r1 = ratio / (1. + ratio)
r2 = 1 - r1
L = r1 * norm(mu1, sigma1).pdf(x) + r2 * norm(mu2, sigma2).pdf(x)
L[L == 0] = 1E-16 # prevent divide-by-zero error
logL = np.log(L).sum()
if np.isinf(logL):
raise pymc.ZeroProbability
else:
return logL
def rdoublegauss(mu1, mu2, sigma1, sigma2, ratio, size=None):
"""random variable from double gaussian"""
r1 = ratio / (1. + ratio)
r2 = 1 - r1
R = np.asarray(np.random.random(size))
Rshape = R.shape
R = np.atleast1d(R)
mask1 = (R < r1)
mask2 = ~mask1
N1 = mask1.sum()
N2 = R.size - N1
R[mask1] = norm(mu1, sigma1).rvs(N1)
R[mask2] = norm(mu2, sigma2).rvs(N2)
return R.reshape(Rshape)
DoubleGauss = pymc.stochastic_from_dist('doublegauss',
logp=doublegauss_like,
random=rdoublegauss,
dtype=np.float,
mv=True)
# set up our Stochastic variables, mu1, mu2, sigma1, sigma2, ratio
M2_mu1 = pymc.Uniform('M2_mu1', -5, 5, value=0)
M2_mu2 = pymc.Uniform('M2_mu2', -5, 5, value=1)
M2_log_sigma1 = pymc.Uniform('M2_log_sigma1', -10, 10, value=0)
M2_log_sigma2 = pymc.Uniform('M2_log_sigma2', -10, 10, value=0)
@pymc.deterministic
def M2_sigma1(M2_log_sigma1=M2_log_sigma1):
return np.exp(M2_log_sigma1)
@pymc.deterministic
def M2_sigma2(M2_log_sigma2=M2_log_sigma2):
return np.exp(M2_log_sigma2)
M2_ratio = pymc.Uniform('M2_ratio', 1E-3, 1E3, value=1)
M2 = DoubleGauss('M2', M2_mu1, M2_mu2, M2_sigma1, M2_sigma2, M2_ratio,
observed=True, value=x_sample)
model2 = dict(M2_mu1=M2_mu1, M2_mu2=M2_mu2,
M2_log_sigma1=M2_log_sigma1, M2_log_sigma2=M2_log_sigma2,
M2_sigma1=M2_sigma1, M2_sigma2=M2_sigma2,
M2_ratio=M2_ratio, M2=M2)
#------------------------------------------------------------
# Set up MCMC sampling
def compute_MCMC_models(Niter=10000, burn=1000, rseed=0):
pymc.numpy.random.seed(rseed)
S1 = pymc.MCMC(model1)
S1.sample(iter=Niter, burn=burn)
trace1 = np.vstack([S1.trace('M1_mu')[:],
S1.trace('M1_sigma')[:]])
logp1 = get_logp(S1, model1)
S2 = pymc.MCMC(model2)
S2.sample(iter=Niter, burn=burn)
trace2 = np.vstack([S2.trace('M2_mu1')[:],
S2.trace('M2_mu2')[:],
S2.trace('M2_sigma1')[:],
S2.trace('M2_sigma2')[:],
S2.trace('M2_ratio')[:]])
logp2 = get_logp(S2, model2)
return trace1, logp1, trace2, logp2
trace1, logp1, trace2, logp2 = compute_MCMC_models()
#------------------------------------------------------------
# Compute Odds ratio with density estimation technique
BF1, dBF1 = estimate_bayes_factor(trace1, logp1, r=0.02)
BF1_list = estimate_bayes_factor(trace1,logp1,r=0.02,return_list = True)
BF2, dBF2 = estimate_bayes_factor(trace2, logp2, r=0.05)
BF2_list = estimate_bayes_factor(trace2,logp2,r=0.05,return_list = True)
print "Bayes Factor (Single Gaussian): Median = {0:.3f}, p75-p25 = {1:.3f}".format(BF1,dBF1)
print "Bayes Factor (Double Gaussian): Median = {0:.3f}, p75-p25 = {1:.3f}".format(BF2,dBF2)
print np.sum(BF1_list),np.sum(BF2_list)
BF1_list_plot = BF1_list[(BF1_list >= BF1-1.*dBF1) & (BF1_list <= BF1+1.*dBF1)]
BF2_list_plot = BF2_list[(BF2_list >= BF2-1.*dBF2) & (BF2_list <= BF2+1.*dBF2)]
ax = plt.figure().add_subplot(111)
hist(BF1_list_plot,bins='knuth',ax=ax,normed=True,color='red',alpha=0.25)
hist(BF2_list,bins='knuth',ax=ax,normed=True,color='green',alpha=0.25)
ax.figure.savefig('figure_5-24_BFhist.png',dpi=300)
|
AndrewRook/machine_learning
|
figure_5-24.py
|
Python
|
mit
| 6,544
|
[
"Gaussian"
] |
114c12d877635de7add2edc61816eb5baf922b1924e1171c83d3ad0892662a8a
|
#!/usr/bin/env python
"""
With this command you can log in to DIRAC.
There are two options:
- using a user certificate, creating a proxy.
- go through DIRAC Authorization Server by selecting your Identity Provider.
Example:
# Login with default group
$ dirac-login
# Choose another group
$ dirac-login dirac_user
# Return token
$ dirac-login dirac_user --token
"""
import os
import sys
import copy
import getpass
import DIRAC
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Security import Locations
from DIRAC.Core.Security.ProxyFile import writeToProxyFile
from DIRAC.Core.Security.ProxyInfo import getProxyInfo, formatProxyInfoAsString
from DIRAC.Core.Security.X509Chain import X509Chain # pylint: disable=import-error
from DIRAC.Core.Utilities.NTP import getClockDeviation
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
from DIRAC.Resources.IdProvider.IdProviderFactory import IdProviderFactory
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.FrameworkSystem.private.authorization.utils.Tokens import (
writeTokenDictToTokenFile,
readTokenFromFile,
getTokenFileLocation,
)
class Params:
"""This class describes the input parameters"""
def __init__(self):
"""C`r"""
self.group = None
self.scopes = []
self.outputFile = None
self.lifetime = None
self.issuer = None
self.certLoc = None
self.keyLoc = None
self.result = "proxy"
self.authWith = "certificate"
self.enableCS = True
def disableCS(self, _arg) -> dict:
"""Set issuer
:param arg: issuer
"""
self.enableCS = False
return S_OK()
def setIssuer(self, arg: str) -> dict:
"""Set issuer
:param arg: issuer
"""
self.useDIRACAS(None)
self.issuer = arg
return S_OK()
def useDIRACAS(self, _arg) -> dict:
"""Use DIRAC AS
:param _arg: unuse
"""
self.authWith = "diracas"
return S_OK()
def useCertificate(self, _arg) -> dict:
"""Use certificate
:param _arg: unuse
"""
os.environ["DIRAC_USE_ACCESS_TOKEN"] = "false"
self.authWith = "certificate"
self.result = "proxy"
return S_OK()
def setCertificate(self, arg: str) -> dict:
"""Set certificate file path
:param arg: path
"""
if not os.path.exists(arg):
DIRAC.gLogger.error(f"{arg} does not exist.")
DIRAC.exit(1)
self.useCertificate(None)
self.certLoc = arg
return S_OK()
def setPrivateKey(self, arg: str) -> dict:
"""Set private key file path
:param arg: path
"""
if not os.path.exists(arg):
DIRAC.gLogger.error(f"{arg} is not exist.")
DIRAC.exit(1)
self.useCertificate(None)
self.keyLoc = arg
return S_OK()
def setOutputFile(self, arg: str) -> dict:
"""Set output file location
:param arg: output file location
"""
self.outputFile = arg
return S_OK()
def setLifetime(self, arg: str) -> dict:
"""Set proxy lifetime
:param arg: lifetime
"""
self.lifetime = arg
return S_OK()
def setProxy(self, _arg) -> dict:
"""Return proxy
:param _arg: unuse
"""
os.environ["DIRAC_USE_ACCESS_TOKEN"] = "false"
self.result = "proxy"
return S_OK()
def setToken(self, _arg) -> dict:
"""Return tokens
:param _arg: unuse
"""
os.environ["DIRAC_USE_ACCESS_TOKEN"] = "true"
self.useDIRACAS(None)
self.result = "token"
return S_OK()
def authStatus(self, _arg) -> dict:
"""Get authorization status
:param _arg: unuse
"""
result = self.getAuthStatus()
if result["OK"]:
self.howToSwitch()
DIRAC.exit(0)
gLogger.fatal(result["Message"])
DIRAC.exit(1)
def registerCLISwitches(self):
"""Register CLI switches"""
Script.registerArgument(
"group: select a DIRAC group for authorization, can be determined later.", mandatory=False
)
Script.registerArgument(["scope: scope to add to authorization request."], mandatory=False)
Script.registerSwitch("T:", "lifetime=", "set access lifetime in hours", self.setLifetime)
Script.registerSwitch(
"O:",
"save-output=",
"where to save the authorization result(e.g: proxy or tokens). By default we will try to find a standard place.",
self.setOutputFile,
)
Script.registerSwitch("I:", "issuer=", "set issuer.", self.setIssuer)
Script.registerSwitch(
"",
"use-certificate",
"in case you want to generate a proxy using a certificate. By default.",
self.useCertificate,
)
Script.registerSwitch(
"", "use-diracas", "in case you want to authorize with DIRAC Authorization Server.", self.useDIRACAS
)
Script.registerSwitch("C:", "certificate=", "user certificate location", self.setCertificate)
Script.registerSwitch("K:", "key=", "user key location", self.setPrivateKey)
Script.registerSwitch("", "proxy", "return proxy in case of successful authorization", self.setProxy)
Script.registerSwitch("", "token", "return tokens in case of successful authorization", self.setToken)
Script.registerSwitch("", "status", "print user authorization status", self.authStatus)
Script.registerSwitch("", "nocs", "disable CS.", self.disableCS)
def doOAuthMagic(self):
"""Magic method with tokens
:return: S_OK()/S_ERROR()
"""
params = {}
if self.issuer:
params["issuer"] = self.issuer
result = IdProviderFactory().getIdProvider("DIRACCLI", **params)
if not result["OK"]:
return result
idpObj = result["Value"]
if self.group and self.group not in self.scopes:
self.scopes.append(f"g:{self.group}")
if self.result == "proxy" and self.result not in self.scopes:
self.scopes.append(self.result)
if self.lifetime:
self.scopes.append("lifetime:%s" % (int(self.lifetime or 12) * 3600))
idpObj.scope = "+".join(self.scopes) if self.scopes else ""
# Submit Device authorisation flow
result = idpObj.deviceAuthorization()
if not result["OK"]:
return result
if self.result == "proxy":
self.outputFile = self.outputFile or Locations.getDefaultProxyLocation()
# Save new proxy certificate
result = writeToProxyFile(idpObj.token["proxy"].encode("UTF-8"), self.outputFile)
if not result["OK"]:
return result
gLogger.notice(f"Proxy is saved to {self.outputFile}.")
else:
# Revoke old tokens from token file
self.outputFile = getTokenFileLocation(self.outputFile)
if os.path.isfile(self.outputFile):
result = readTokenFromFile(self.outputFile)
if not result["OK"]:
gLogger.error(result["Message"])
elif result["Value"]:
oldToken = result["Value"]
for tokenType in ["access_token", "refresh_token"]:
result = idpObj.revokeToken(oldToken[tokenType], tokenType)
if result["OK"]:
gLogger.notice(f"{tokenType} is revoked from", self.outputFile)
else:
gLogger.error(result["Message"])
# Save new tokens to token file
result = writeTokenDictToTokenFile(idpObj.token, self.outputFile)
if not result["OK"]:
return result
self.outputFile = result["Value"]
gLogger.notice(f"New token is saved to {self.outputFile}.")
if not DIRAC.gConfig.getValue("/DIRAC/Security/Authorization/issuer"):
gLogger.notice("To continue use token you need to add /DIRAC/Security/Authorization/issuer option.")
if not self.issuer:
DIRAC.exit(1)
DIRAC.gConfig.setOptionValue("/DIRAC/Security/Authorization/issuer", self.issuer)
# Try to get user authorization information from token
result = readTokenFromFile(self.outputFile)
if not result["OK"]:
return result
gLogger.notice(result["Value"].getInfoAsString())
return S_OK()
def loginWithCertificate(self):
"""Login with certificate"""
# Search certificate and key
if not self.certLoc or not self.keyLoc:
cakLoc = Locations.getCertificateAndKeyLocation()
if not cakLoc:
return S_ERROR("Can't find user certificate and key")
self.certLoc = self.certLoc or cakLoc[0]
self.keyLoc = self.keyLoc or cakLoc[1]
chain = X509Chain()
# Load user cert and key
result = chain.loadChainFromFile(self.certLoc)
if result["OK"]:
result = chain.loadKeyFromFile(self.keyLoc, password=getpass.getpass("Enter Certificate password:"))
if not result["OK"]:
return result
# Read user credentials
result = chain.getCredentials(withRegistryInfo=False)
if not result["OK"]:
return result
credentials = result["Value"]
# Remember a clean proxy to then upload it in step 2
proxy = copy.copy(chain)
# Create local proxy with group
self.outputFile = self.outputFile or Locations.getDefaultProxyLocation()
result = chain.generateProxyToFile(self.outputFile, int(self.lifetime or 12) * 3600, self.group)
if not result["OK"]:
return S_ERROR(f"Couldn't generate proxy: {result['Message']}")
if self.enableCS:
# After creating the proxy, we can try to connect to the server
result = Script.enableCS()
if not result["OK"]:
return S_ERROR(f"Cannot contact CS: {result['Message']}")
gConfig.forceRefresh()
# Step 2: Upload proxy to DIRAC server
result = gProxyManager.getUploadedProxyLifeTime(credentials["subject"])
if not result["OK"]:
return result
uploadedProxyLifetime = result["Value"]
# Upload proxy to the server if it longer that uploaded one
if credentials["secondsLeft"] > uploadedProxyLifetime:
gLogger.notice("Upload proxy to server.")
return gProxyManager.uploadProxy(proxy)
return S_OK()
def howToSwitch(self) -> bool:
"""Helper message, how to switch access type(proxy or access token)"""
if "DIRAC_USE_ACCESS_TOKEN" in os.environ:
src, useTokens = ("env", os.environ.get("DIRAC_USE_ACCESS_TOKEN", "false").lower() in ("y", "yes", "true"))
else:
src, useTokens = (
"conf",
gConfig.getValue("/DIRAC/Security/UseTokens", "false").lower() in ("y", "yes", "true"),
)
msg = f"\nYou are currently using {'access token' if useTokens else 'proxy'} to access new HTTP DIRAC services."
msg += f" To use a {'proxy' if useTokens else 'access token'} instead, do the following:\n"
if src == "conf":
msg += f" set /DIRAC/Security/UseTokens={not useTokens} in dirac.cfg\nor\n"
msg += f" export DIRAC_USE_ACCESS_TOKEN={not useTokens}\n"
gLogger.notice(msg)
return useTokens
def getAuthStatus(self):
"""Try to get user authorization status.
:return: S_OK()/S_ERROR()
"""
result = Script.enableCS()
if not result["OK"]:
return S_ERROR("Cannot contact CS.")
gConfig.forceRefresh()
if self.result == "proxy":
result = getProxyInfo(self.outputFile)
if result["OK"]:
gLogger.notice(formatProxyInfoAsString(result["Value"]))
else:
result = readTokenFromFile(self.outputFile)
if result["OK"]:
gLogger.notice(result["Value"].getInfoAsString())
return result
@Script()
def main():
p = Params()
p.registerCLISwitches()
# Check time
deviation = getClockDeviation()
if not deviation["OK"]:
gLogger.warn(deviation["Message"])
elif deviation["Value"] > 60:
gLogger.fatal(f"Your host's clock seems to deviate by {(int(deviation['Value']) / 60):.0f} minutes!")
sys.exit(1)
Script.disableCS()
Script.parseCommandLine(ignoreErrors=True)
# It's server installation?
if gConfig.useServerCertificate():
# In this case you do not need to login.
gLogger.notice(
"You have run the command in a DIRAC server installation environment, which eliminates the need for login."
)
DIRAC.exit(1)
p.group, p.scopes = Script.getPositionalArgs(group=True)
# If you have chosen to use a certificate then a proxy will be generated locally using the specified certificate
if p.authWith == "certificate":
result = p.loginWithCertificate()
# Otherwise, you must log in to the authorization server to gain access
else:
result = p.doOAuthMagic()
# Print authorization status
if result["OK"] and p.enableCS:
result = p.getAuthStatus()
if not result["OK"]:
gLogger.fatal(result["Message"])
sys.exit(1)
p.howToSwitch()
sys.exit(0)
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/FrameworkSystem/scripts/dirac_login.py
|
Python
|
gpl-3.0
| 13,913
|
[
"DIRAC"
] |
2a70fbaaa94a664dd42f7c7f0c344ff4e12756b9dd1003b3fa2e1ca135562527
|
"""Simple XML-RPC Server.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCServer
class.
It can also be used to handle XML-RPC requests in a CGI
environment using CGIXMLRPCRequestHandler.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the string functions available through
# string.func_name
import string
self.string = string
def _listMethods(self):
# implement this method so that system.listMethods
# knows to advertise the strings methods
return list_public_methods(self) + \
['string.' + method for method in list_public_methods(self.string)]
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _listMethods(self):
# this method must be present for system.listMethods
# to work
return ['add', 'pow']
def _methodHelp(self, method):
# this method must be present for system.methodHelp
# to work
if method == 'add':
return "add(2,3) => 5"
elif method == 'pow':
return "pow(x, y[, z]) => number"
else:
# By convention, return empty
# string if no help is available
return ""
def _dispatch(self, method, params):
if method == 'pow':
return pow(*params)
elif method == 'add':
return params[0] + params[1]
else:
raise 'bad method'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCServer:
class MathServer(SimpleXMLRPCServer):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
def export_add(self, x, y):
return x + y
server = MathServer(("localhost", 8000))
server.serve_forever()
5. CGI script:
server = CGIXMLRPCRequestHandler()
server.register_function(pow)
server.handle_request()
"""
# Written by Brian Quinlan (brian@sweetapp.com).
# Based on code written by Fredrik Lundh.
import xmlrpclib
from xmlrpclib import Fault
import SocketServer
import BaseHTTPServer
import sys
import os
import traceback
import re
try:
import fcntl
except ImportError:
fcntl = None
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr).
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
def list_public_methods(obj):
"""Returns a list of attribute strings, found in the specified
object, which represent callable attributes"""
return [member for member in dir(obj)
if not member.startswith('_') and
hasattr(getattr(obj, member), '__call__')]
def remove_duplicates(lst):
"""remove_duplicates([2,2,2,1,3,3]) => [3,1,2]
Returns a copy of a list without duplicates. Every list
item must be hashable and the order of the items in the
resulting list is not defined.
"""
u = {}
for x in lst:
u[x] = 1
return u.keys()
class SimpleXMLRPCDispatcher:
"""Mix-in class that dispatches XML-RPC requests.
This class is used to register XML-RPC method handlers
and then to dispatch them. This class doesn't need to be
instanced directly when used by SimpleXMLRPCServer but it
can be instanced when used by the MultiPathXMLRPCServer.
"""
def __init__(self, allow_none=False, encoding=None):
self.funcs = {}
self.instance = None
self.allow_none = allow_none
self.encoding = encoding
def register_instance(self, instance, allow_dotted_names=False):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called. Methods beginning with an '_'
are considered private and will not be called by
SimpleXMLRPCServer.
If a registered function matches a XML-RPC request, then it
will be called instead of the registered instance.
If the optional allow_dotted_names argument is true and the
instance does not have a _dispatch method, method names
containing dots are supported and resolved, as long as none of
the name segments start with an '_'.
*** SECURITY WARNING: ***
Enabling the allow_dotted_names options allows intruders
to access your module's global variables and may allow
intruders to execute arbitrary code on your machine. Only
use this option on a secure, closed network.
"""
self.instance = instance
self.allow_dotted_names = allow_dotted_names
def register_function(self, function, name = None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
"""
if name is None:
name = function.__name__
self.funcs[name] = function
def register_introspection_functions(self):
"""Registers the XML-RPC introspection methods in the system
namespace.
see http://xmlrpc.usefulinc.com/doc/reserved.html
"""
self.funcs.update({'system.listMethods' : self.system_listMethods,
'system.methodSignature' : self.system_methodSignature,
'system.methodHelp' : self.system_methodHelp})
def register_multicall_functions(self):
"""Registers the XML-RPC multicall method in the system
namespace.
see http://www.xmlrpc.com/discuss/msgReader$1208"""
self.funcs.update({'system.multicall' : self.system_multicall})
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
"""Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the preferred means
of changing method dispatch behavior.
"""
try:
params, method = xmlrpclib.loads(data)
# generate response
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = xmlrpclib.dumps(response, methodresponse=1,
allow_none=self.allow_none, encoding=self.encoding)
except Fault, fault:
response = xmlrpclib.dumps(fault, allow_none=self.allow_none,
encoding=self.encoding)
except:
# report exception back to server
exc_type, exc_value, exc_tb = sys.exc_info()
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none,
)
return response
def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = self.funcs.keys()
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods = remove_duplicates(
methods + self.instance._listMethods()
)
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods = remove_duplicates(
methods + list_public_methods(self.instance)
)
methods.sort()
return methods
def system_methodSignature(self, method_name):
"""system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature."""
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
return 'signatures not supported'
def system_methodHelp(self, method_name):
"""system.methodHelp('add') => "Adds two integers together"
Returns a string containing documentation for the specified method."""
method = None
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
# Instance can implement _methodHelp to return help for a method
if hasattr(self.instance, '_methodHelp'):
return self.instance._methodHelp(method_name)
# if the instance has a _dispatch method then we
# don't have enough information to provide help
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
# Note that we aren't checking that the method actually
# be a callable object of some kind
if method is None:
return ""
else:
import pydoc
return pydoc.getdoc(method)
def system_multicall(self, call_list):
"""system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
[[4], ...]
Allows the caller to package multiple XML-RPC calls into a single
request.
See http://www.xmlrpc.com/discuss/msgReader$1208
"""
results = []
for call in call_list:
method_name = call['methodName']
params = call['params']
try:
# XXX A marshalling error in any response will fail the entire
# multicall. If someone cares they should fix this.
results.append([self._dispatch(method_name, params)])
except Fault, fault:
results.append(
{'faultCode' : fault.faultCode,
'faultString' : fault.faultString}
)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
results.append(
{'faultCode' : 1,
'faultString' : "%s:%s" % (exc_type, exc_value)}
)
return results
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
func = None
try:
# check to see if a matching function has been registered
func = self.funcs[method]
except KeyError:
if self.instance is not None:
# check for a _dispatch method
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params)
else:
# call instance method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
if func is not None:
return func(*params)
else:
raise Exception('method "%s" is not supported' % method)
class SimpleXMLRPCRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
"""
# Class attribute listing the accessible path components;
# paths not on this list will result in a 404 error.
rpc_paths = ('/', '/RPC2')
#if not None, encode responses larger than this, if possible
encode_threshold = 1400 #a common MTU
#Override form StreamRequestHandler: full buffering of output
#and no Nagle.
wbufsize = -1
disable_nagle_algorithm = True
# a re to match a gzip Accept-Encoding
aepattern = re.compile(r"""
\s* ([^\s;]+) \s* #content-coding
(;\s* q \s*=\s* ([0-9\.]+))? #q
""", re.VERBOSE | re.IGNORECASE)
def accept_encodings(self):
r = {}
ae = self.headers.get("Accept-Encoding", "")
for e in ae.split(","):
match = self.aepattern.match(e)
if match:
v = match.group(3)
v = float(v) if v else 1.0
r[match.group(1)] = v
return r
def is_rpc_path_valid(self):
if self.rpc_paths:
return self.path in self.rpc_paths
else:
# If .rpc_paths is empty, just assume all paths are legal
return True
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = ''.join(L)
data = self.decode_request_content(data)
if data is None:
return #response has been sent
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None), self.path
)
except Exception, e: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
# Send information about the exception if requested
if hasattr(self.server, '_send_traceback_header') and \
self.server._send_traceback_header:
self.send_header("X-exception", str(e))
self.send_header("X-traceback", traceback.format_exc())
self.send_header("Content-length", "0")
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
if self.encode_threshold is not None:
if len(response) > self.encode_threshold:
q = self.accept_encodings().get("gzip", 0)
if q:
try:
response = xmlrpclib.gzip_encode(response)
self.send_header("Content-Encoding", "gzip")
except NotImplementedError:
pass
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def decode_request_content(self, data):
#support gzip encoding of request
encoding = self.headers.get("content-encoding", "identity").lower()
if encoding == "identity":
return data
if encoding == "gzip":
try:
return xmlrpclib.gzip_decode(data)
except NotImplementedError:
self.send_response(501, "encoding %r not supported" % encoding)
except ValueError:
self.send_response(400, "error decoding gzip content")
else:
self.send_response(501, "encoding %r not supported" % encoding)
self.send_header("Content-length", "0")
self.end_headers()
def report_404 (self):
# Report a 404 error
self.send_response(404)
response = 'No such page'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(SocketServer.TCPServer,
SimpleXMLRPCDispatcher):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch XML-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inhereted
from SimpleXMLRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
# Warning: this is for debugging purposes only! Never set this to True in
# production code, as will be sending out sensitive information (exception
# and stack trace details) when exceptions are raised inside
# SimpleXMLRPCRequestHandler.do_POST
_send_traceback_header = False
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None, bind_and_activate=True):
self.logRequests = logRequests
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
SocketServer.TCPServer.__init__(self, addr, requestHandler, bind_and_activate)
# [Bug #1222790] If possible, set close-on-exec flag; if a
# method spawns a subprocess, the subprocess shouldn't have
# the listening socket open.
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
class MultiPathXMLRPCServer(SimpleXMLRPCServer):
"""Multipath XML-RPC Server
This specialization of SimpleXMLRPCServer allows the user to create
multiple Dispatcher instances and assign them to different
HTTP request paths. This makes it possible to run two or more
'virtual XML-RPC servers' at the same port.
Make sure that the requestHandler accepts the paths in question.
"""
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None, bind_and_activate=True):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none,
encoding, bind_and_activate)
self.dispatchers = {}
self.allow_none = allow_none
self.encoding = encoding
def add_dispatcher(self, path, dispatcher):
self.dispatchers[path] = dispatcher
return dispatcher
def get_dispatcher(self, path):
return self.dispatchers[path]
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
try:
response = self.dispatchers[path]._marshaled_dispatch(
data, dispatch_method, path)
except:
# report low level exception back to server
# (each dispatcher should have handled their own
# exceptions)
exc_type, exc_value = sys.exc_info()[:2]
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none)
return response
class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
"""Simple handler for XML-RPC data passed through CGI."""
def __init__(self, allow_none=False, encoding=None):
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
print 'Content-Type: text/xml'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def handle_get(self):
"""Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
"""
code = 400
message, explain = \
BaseHTTPServer.BaseHTTPRequestHandler.responses[code]
response = BaseHTTPServer.DEFAULT_ERROR_MESSAGE % \
{
'code' : code,
'message' : message,
'explain' : explain
}
print 'Status: %d %s' % (code, message)
print 'Content-Type: %s' % BaseHTTPServer.DEFAULT_ERROR_CONTENT_TYPE
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def handle_request(self, request_text = None):
"""Handle a single XML-RPC request passed through a CGI post method.
If no XML data is given then it is read from stdin. The resulting
XML-RPC response is printed to stdout along with the correct HTTP
headers.
"""
if request_text is None and \
os.environ.get('REQUEST_METHOD', None) == 'GET':
self.handle_get()
else:
# POST data is normally available through stdin
try:
length = int(os.environ.get('CONTENT_LENGTH', None))
except (TypeError, ValueError):
length = -1
if request_text is None:
request_text = sys.stdin.read(length)
self.handle_xmlrpc(request_text)
if __name__ == '__main__':
print 'Running XML-RPC server on port 8000'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
|
ktan2020/legacy-automation
|
win/Lib/SimpleXMLRPCServer.py
|
Python
|
mit
| 26,386
|
[
"Brian"
] |
430838afdf189553318cfeb3a48aa5765372845366cf66b15a12f2b7e96d6f38
|
# Copyright Tilde Materials Informatics
# Distributed under the MIT License
from __future__ import print_function
from setuptools import setup, find_packages
from codecs import open
import os
import sys
# Search for required system packages
missing_packages = []
try:
import numpy
from numpy import linalg
except ImportError:
missing_packages.append('numpy')
try:
from distutils.sysconfig import get_makefile_filename
except ImportError:
missing_packages.append('build-essential')
missing_packages.append('python-dev')
import subprocess
child = subprocess.Popen(["pkg-config", "libffi"], stdout=subprocess.PIPE)
status = child.communicate()[0]
if child.returncode != 0:
missing_packages.append('libffi-dev')
if missing_packages:
print("Please install the following required packages (or equivalents) on your system:")
print("".join([" * %s\n" % p for p in missing_packages]))
print()
print("Installation will now exit.")
sys.exit(1)
# convert documentation to rst
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError, RuntimeError) as e:
if len(sys.argv) > 2 and sys.argv[2] == "upload":
raise e.__class__("PyPI servers need reStructuredText as README! Please install pypandoc to convert markdown "
"to rst")
else:
long_description = ''
packages = find_packages(exclude=["tests", "tests.*"])
install_requires = [
'numpy >= 1.9',
'ujson',
'bcrypt',
'importlib',
'pg8000',
'sqlalchemy >= 1.0.12',
'argparse',
'ase >= 3.11',
'spglib >= 1.9.1',
'tornado >= 4.3.0',
'sockjs-tornado',
'websocket-client',
'futures',
'httplib2']
setup(
name='tilde',
version='0.8.1',
description='Materials informatics framework for ab initio data repositories',
long_description=long_description,
url='https://github.com/tilde-lab/tilde',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
],
keywords='CRYSTAL Quantum-ESPRESSO VASP ab-initio materials informatics first-principles',
packages=packages,
include_package_data=True,
install_requires=install_requires,
tests_require= ['nose',],
test_suite='nose.collector',
scripts=[
"utils/tilde.sh",
"utils/entry.py"
]
)
|
ansobolev/tilde
|
setup.py
|
Python
|
mit
| 3,058
|
[
"ASE",
"CRYSTAL",
"ESPResSo",
"VASP"
] |
f9c052dbeb8984985324776d53add883e84a48255e4202d22bf40517d91f064e
|
# -*- coding: utf-8 -*-
"""
The module contains the basic network architectures
+-------------------------+------------+---------+-----------------+----------+
| Network Type | Function | Count of|Support train fcn| Error fcn|
| | | layers | | |
+=========================+============+=========+=================+==========+
| Single-layer perceptron | newp | 1 | train_delta | SSE |
+-------------------------+------------+---------+-----------------+----------+
| Multi-layer perceptron | newff | >=1 | train_gd, | SSE |
| | | | train_gdm, | |
| | | | train_gda, | |
| | | | train_gdx, | |
| | | | train_rprop, | |
| | | | train_bfgs*, | |
| | | | train_cg | |
+-------------------------+------------+---------+-----------------+----------+
| Competitive layer | newc | 1 | train_wta, | SAE |
| | | | train_cwta* | |
+-------------------------+------------+---------+-----------------+----------+
| LVQ | newlvq | 2 | train_lvq | MSE |
+-------------------------+------------+---------+-----------------+----------+
| Elman | newelm | >=1 | train_gdx | MSE |
+-------------------------+------------+---------+-----------------+----------+
| Hopield | newhop | 1 | None | None |
+-------------------------+------------+---------+-----------------+----------+
| Hemming | newhem | 2 | None | None |
+-------------------------+------------+---------+-----------------+----------+
.. note:: \* - default function
"""
from .core import Net
from . import trans
from . import layer
from . import train
from . import error
from . import init
import numpy as np
def newff(minmax, size, transf=None):
"""
Create multilayer perceptron
:Parameters:
minmax: list of list, the outer list is the number of input neurons,
inner lists must contain 2 elements: min and max
Range of input value
size: the length of list equal to the number of layers
except input layer, the element of the list is the neuron number
for corresponding layer
Contains the number of neurons for each layer
transf: list (default TanSig)
List of activation function for each layer
:Returns:
net: Net
:Example:
>>> # create neural net with 2 inputs
>>> # input range for each input is [-0.5, 0.5]
>>> # 3 neurons for hidden layer, 1 neuron for output
>>> # 2 layers including hidden layer and output layer
>>> net = newff([[-0.5, 0.5], [-0.5, 0.5]], [3, 1])
>>> net.ci
2
>>> net.co
1
>>> len(net.layers)
2
"""
net_ci = len(minmax)
net_co = size[-1]
if transf is None:
transf = [trans.TanSig()] * len(size)
assert len(transf) == len(size)
layers = []
for i, nn in enumerate(size):
layer_ci = size[i - 1] if i > 0 else net_ci
l = layer.Perceptron(layer_ci, nn, transf[i])
l.initf = init.initnw
layers.append(l)
connect = [[i - 1] for i in range(len(layers) + 1)]
net = Net(minmax, net_co, layers, connect, train.train_bfgs, error.SSE())
return net
def newp(minmax, cn, transf=trans.HardLim()):
"""
Create one layer perceptron
:Parameters:
minmax: list of list, the outer list is the number of input neurons,
inner lists must contain 2 elements: min and max
Range of input value
cn: int, number of output neurons
Number of neurons
transf: func (default HardLim)
Activation function
:Returns:
net: Net
:Example:
>>> # create network with 2 inputs and 10 neurons
>>> net = newp([[-1, 1], [-1, 1]], 10)
"""
ci = len(minmax)
l = layer.Perceptron(ci, cn, transf)
net = Net(minmax, cn, [l], [[-1], [0]], train.train_delta, error.SSE())
return net
def newc(minmax, cn):
"""
Create competitive layer (Kohonen network)
:Parameters:
minmax: list of list, the outer list is the number of input neurons,
inner lists must contain 2 elements: min and max
Range of input value
cn: int, number of output neurons
Number of neurons
:Returns:
net: Net
:Example:
>>> # create network with 2 inputs and 10 neurons
>>> net = newc([[-1, 1], [-1, 1]], 10)
"""
ci = len(minmax)
l = layer.Competitive(ci, cn)
net = Net(minmax, cn, [l], [[-1], [0]], train.train_cwta, error.SAE())
return net
def newlvq(minmax, cn0, pc):
"""
Create a learning vector quantization (LVQ) network
:Parameters:
minmax: list of list, the outer list is the number of input neurons,
inner lists must contain 2 elements: min and max
Range of input value
cn0: int
Number of neurons in input layer
pc: list
List of percent, sum(pc) == 1
:Returns:
net: Net
:Example:
>>> # create network with 2 inputs,
>>> # 2 layers and 10 neurons in each layer
>>> net = newlvq([[-1, 1], [-1, 1]], 10, [0.6, 0.4])
"""
pc = np.asfarray(pc)
assert sum(pc) == 1
ci = len(minmax)
cn1 = len(pc)
assert cn0 > cn1
layer_inp = layer.Competitive(ci, cn0)
layer_out = layer.Perceptron(cn0, cn1, trans.PureLin())
layer_out.initf = None
layer_out.np['b'].fill(0.0)
layer_out.np['w'].fill(0.0)
inx = np.floor(cn0 * pc.cumsum())
for n, i in enumerate(inx):
st = 0 if n == 0 else inx[n - 1]
layer_out.np['w'][n][st:i].fill(1.0)
net = Net(minmax, cn1, [layer_inp, layer_out],
[[-1], [0], [1]], train.train_lvq, error.MSE())
return net
def newelm(minmax, size, transf=None):
"""
Create a Elman recurrent network
:Parameters:
minmax: list of list, the outer list is the number of input neurons,
inner lists must contain 2 elements: min and max
Range of input value
size: the length of list equal to the number of layers
except input layer, the element of the list is the neuron number
for corresponding layer
Contains the number of neurons for each layer
:Returns:
net: Net
:Example:
>>> # 1 input, input range is [-1, 1], 1 output neuron,
>>> # 1 layer including output layer
>>> net = newelm([[-1, 1]], [1], [trans.PureLin()])
>>> net.layers[0].np['w'][:] = 1 # set weight for input neurons to 1
>>> net.layers[0].np['b'][:] = 0 # set bias for all input neurons to 0
>>> net.sim([[1], [1] ,[1], [3]])
array([[ 1.],
[ 2.],
[ 3.],
[ 6.]])
"""
net_ci = len(minmax)
net_co = size[-1]
if transf is None:
transf = [trans.TanSig()] * len(size)
assert len(transf) == len(size)
layers = []
for i, nn in enumerate(size):
layer_ci = size[i - 1] if i > 0 else net_ci + size[0]
l = layer.Perceptron(layer_ci, nn, transf[i])
# l.initf = init.InitRand([-0.1, 0.1], 'wb')
layers.append(l)
connect = [[i - 1] for i in range(len(layers) + 1)]
# recurrent set
connect[0] = [-1, 0]
net = Net(minmax, net_co, layers, connect, train.train_gdx, error.MSE())
return net
def newhop(target, transf=None, max_init=10, delta=0):
"""
Create a Hopfield recurrent network
:Parameters:
target: array like (l x net.co)
train target patterns
transf: func (default HardLims)
Activation function
max_init: int (default 10)
Maximum of recurrent iterations
delta: float (default 0)
Minimum difference between 2 outputs for stop recurrent cycle
:Returns:
net: Net
:Example:
>>> net = newhem([[-1, -1, -1], [1, -1, 1]])
>>> output = net.sim([[-1, 1, -1], [1, -1, 1]])
"""
target = np.asfarray(target)
assert target.ndim == 2
ci = len(target[0])
if transf is None:
transf = trans.HardLims()
l = layer.Reccurent(ci, ci, transf, max_init, delta)
w = l.np['w']
b = l.np['b']
# init weight
for i in range(ci):
for j in range(ci):
if i == j:
w[i, j] = 0.0
else:
w[i, j] = np.sum(target[:, i] * target[:, j]) / ci
b[i] = 0.0
l.initf = None
minmax = transf.out_minmax if hasattr(transf, 'out_minmax') else [-1, 1]
net = Net([minmax] * ci, ci, [l], [[-1], [0]], None, None)
return net
def newhem(target, transf=None, max_iter=10, delta=0):
"""
Create a Hemming recurrent network with 2 layers
:Parameters:
target: array like (l x net.co)
train target patterns
transf: func (default SatLinPrm(0.1, 0, 10))
Activation function of input layer
max_init: int (default 10)
Maximum of recurrent iterations
delta: float (default 0)
Minimum dereference between 2 outputs for stop recurrent cycle
:Returns:
net: Net
:Example:
>>> net = newhop([[-1, -1, -1], [1, -1, 1]])
>>> output = net.sim([[-1, 1, -1], [1, -1, 1]])
"""
target = np.asfarray(target)
assert target.ndim == 2
cn = target.shape[0]
ci = target.shape[1]
if transf is None:
transf = trans.SatLinPrm(0.1, 0, 10)
layer_inp = layer.Perceptron(ci, cn, transf)
# init input layer
layer_inp.initf = None
layer_inp.np['b'][:] = float(ci) / 2
for i, tar in enumerate(target):
layer_inp.np['w'][i][:] = tar / 2
layer_out = layer.Reccurent(cn, cn, trans.SatLinPrm(1, 0, 1e6),
max_iter, delta)
# init output layer
layer_out.initf = None
layer_out.np['b'][:] = 0
eps = - 1.0 / cn
for i in range(cn):
layer_out.np['w'][i][:] = [eps] * cn
layer_out.np['w'][i][i] = 1
# create network
minmax = [[-1, 1]] * ci
layers = [layer_inp, layer_out]
connect = [[-1], [0], [1]]
net = Net(minmax, cn, layers, connect, None, None)
return net
|
blagasz/python-ann
|
neurolab/net.py
|
Python
|
gpl-2.0
| 10,929
|
[
"NEURON"
] |
bfb50d6a4a7f23c79357c208063aff98fef5199af349a6ff156d0d7793d235d3
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import config, defaults, pprint, time
from lib import *
import wato
# Python 2.3 does not have 'set' in normal namespace.
# But it can be imported from 'sets'
try:
set()
except NameError:
from sets import Set as set
loaded_with_language = False
builtin_dashboards = {}
# Declare constants to be used in the definitions of the dashboards
GROW = 0
MAX = -1
# These settings might go into the config module, sometime in future,
# in order to allow the user to customize this.
header_height = 60 # Distance from top of the screen to the lower border of the heading
screen_margin = 5 # Distance from the left border of the main-frame to the dashboard area
dashlet_padding = 21, 5, 5, 0 # Margin (N, E, S, W) between outer border of dashlet and its content
corner_overlap = 22
title_height = 0 # Height of dashlet title-box
raster = 10, 10 # Raster the dashlet choords are measured in
# Load plugins in web/plugins/dashboard and declare permissions,
# note: these operations produce language-specific results and
# thus must be reinitialized everytime a language-change has
# been detected.
def load_plugins():
global loaded_with_language
if loaded_with_language == current_language:
return
# Load plugins for dashboards. Currently these files
# just may add custom dashboards by adding to builtin_dashboards.
load_web_plugins("dashboard", globals())
# This must be set after plugin loading to make broken plugins raise
# exceptions all the time and not only the first time (when the plugins
# are loaded).
loaded_with_language = current_language
# In future there will be user editable dashboards just like
# views which will be loaded. Currently we only use the builtin
# dashboads.
global dashboards
dashboards = builtin_dashboards
# Declare permissions for all dashboards
config.declare_permission_section("dashboard", _("Dashboards"))
for name, dashboard in dashboards.items():
config.declare_permission("dashboard.%s" % name,
dashboard["title"],
dashboard.get("description", ''),
config.builtin_role_ids)
def permitted_dashboards():
return [ (name, dashboard) for name, dashboard
in dashboards.items()
if config.may("dashboard.%s" % name)
]
# HTML page handler for generating the (a) dashboard. The name
# of the dashboard to render is given in the HTML variable 'name'.
# This defaults to "main".
def page_dashboard():
name = html.var("name", "main")
if name not in dashboards:
raise MKGeneralException("No such dashboard: '<b>%s</b>'" % name)
if not config.may("dashboard.%s" % name):
raise MKAuthException(_("You are not allowed to access this dashboard."))
render_dashboard(name)
def add_wato_folder_to_url(url, wato_folder):
if not wato_folder:
return url
elif '/' in url:
return url # do not append wato_folder to non-Check_MK-urls
elif '?' in url:
return url + "&wato_folder=" + html.urlencode(wato_folder)
else:
return url + "?wato_folder=" + html.urlencode(wato_folder)
# Actual rendering function
def render_dashboard(name):
board = dashboards[name]
# The dashboard may be called with "wato_folder" set. In that case
# the dashboard is assumed to restrict the shown data to a specific
# WATO subfolder or file. This could be a configurable feature in
# future, but currently we assume, that *all* dashboards are filename
# sensitive.
wato_folder = html.var("wato_folder")
# When an empty wato_folder attribute is given a user really wants
# to see only the hosts contained in the root folder. So don't ignore
# the root folder anymore.
#if not wato_folder: # ignore wato folder in case of root folder
# wato_folder = None
# The title of the dashboard needs to be prefixed with the WATO path,
# in order to make it clear to the user, that he is seeing only partial
# data.
title = board["title"]
global header_height
if title is None:
# If the title is none, hide the header line
html.set_render_headfoot(False)
header_height = 0
title = ''
elif wato_folder is not None:
title = wato.api.get_folder_title(wato_folder) + " - " + title
html.header(title, javascripts=["dashboard"], stylesheets=["pages", "dashboard", "status", "views"])
html.write("<div id=dashboard class=\"dashboard_%s\">\n" % name) # Container of all dashlets
refresh_dashlets = [] # Dashlets with automatic refresh, for Javascript
for nr, dashlet in enumerate(board["dashlets"]):
# dashlets using the 'urlfunc' method will dynamically compute
# an url (using HTML context variables at their wish).
if "urlfunc" in dashlet:
dashlet["url"] = dashlet["urlfunc"]()
# dashlets using the 'url' method will be refreshed by us. Those
# dashlets using static content (such as an iframe) will not be
# refreshed by us but need to do that themselves.
if "url" in dashlet:
refresh_dashlets.append([nr, dashlet.get("refresh", 0),
str(add_wato_folder_to_url(dashlet["url"], wato_folder))])
# Paint the dashlet's HTML code
render_dashlet(nr, dashlet, wato_folder)
html.write("</div>\n")
# Put list of all autorefresh-dashlets into Javascript and also make sure,
# that the dashbaord is painted initially. The resize handler will make sure
# that every time the user resizes the browser window the layout will be re-computed
# and all dashlets resized to their new positions and sizes.
html.javascript("""
var header_height = %d;
var screen_margin = %d;
var title_height = %d;
var dashlet_padding = Array%s;
var corner_overlap = %d;
var refresh_dashlets = %r;
var dashboard_name = '%s';
set_dashboard_size();
window.onresize = function () { set_dashboard_size(); }
window.onload = function () { set_dashboard_size(); }
dashboard_scheduler(1);
""" % (header_height, screen_margin, title_height, dashlet_padding,
corner_overlap, refresh_dashlets, name))
html.body_end() # omit regular footer with status icons, etc.
# Create the HTML code for one dashlet. Each dashlet has an id "dashlet_%d",
# where %d is its index (in board["dashlets"]). Javascript uses that id
# for the resizing. Within that div there is an inner div containing the
# actual dashlet content. The margin between the inner and outer div is
# used for stylish layout stuff (shadows, etc.)
def render_dashlet(nr, dashlet, wato_folder):
html.write('<div class=dashlet id="dashlet_%d">' % nr)
# render shadow
if dashlet.get("shadow", True):
for p in [ "nw", "ne", "sw", "se", "n", "s", "w", "e" ]:
html.write('<img id="dashadow_%s_%d" class="shadow %s" src="images/dashadow-%s.png">' %
(p, nr, p, p))
if dashlet.get("title"):
url = dashlet.get("title_url", None)
if url:
title = '<a href="%s">%s</a>' % (url, dashlet["title"])
else:
title = dashlet["title"]
html.write('<div class="title" id="dashlet_title_%d">%s</div>' % (nr, title))
if dashlet.get("background", True):
bg = " background"
else:
bg = ""
html.write('<div class="dashlet_inner%s" id="dashlet_inner_%d">' % (bg, nr))
# Optional way to render a dynamic iframe URL
if "iframefunc" in dashlet:
dashlet["iframe"] = dashlet["iframefunc"]()
# The method "view" is a shortcut for "iframe" with a certain url
if "view" in dashlet:
dashlet["iframe"] = "view.py?view_name=%s&_display_options=HRSIXL&_body_class=dashlet" % dashlet["view"]
if dashlet.get("reload_on_resize"):
dashlet["onload"] = "dashlet_add_dimensions('dashlet_%d', this)" % nr
# The content is rendered only if it is fixed. In the
# other cases the initial (re)-size will paint the content.
if "content" in dashlet: # fixed content
html.write(dashlet["content"])
elif "iframe" in dashlet: # fixed content containing iframe
if not dashlet.get("reload_on_resize"):
url = add_wato_folder_to_url(dashlet["iframe"], wato_folder)
else:
url = 'about:blank'
# Fix of iPad >:-P
html.write('<div style="width: 100%; height: 100%; -webkit-overflow-scrolling:touch; overflow: hidden;">')
html.write('<iframe id="dashlet_iframe_%d" allowTransparency="true" frameborder="0" width="100%%" '
'height="100%%" src="%s"> </iframe>' % (nr, url))
html.write('</div>')
if dashlet.get("reload_on_resize"):
html.javascript('reload_on_resize["%d"] = "%s"' %
(nr, add_wato_folder_to_url(dashlet["iframe"], wato_folder)))
html.write("</div></div>\n")
# Here comes the brain stuff: An intelligent liquid layout algorithm.
# It is called via ajax, mainly because I was not eager to code this
# directly in Javascript (though this would be possible and probably
# more lean.)
# Compute position and size of all dashlets
def ajax_resize():
# computation with vectors
class vec:
def __init__(self, xy):
self._data = xy
def __div__(self, xy):
return vec((self._data[0] / xy[0], self._data[1] / xy[1]))
def __repr__(self):
return repr(self._data)
def __getitem__(self, i):
return self._data[i]
def make_absolute(self, size):
n = []
for i in [0, 1]:
if self._data[i] < 0:
n.append(size[i] + self._data[i] + 1) # Here was a bug fixed by Markus Lengler
else:
n.append(self._data[i] - 1) # make begin from 0
return vec(n)
# Compute the initial size of the dashlet. If MAX is used,
# then the dashlet consumes all space in its growing direction,
# regardless of any other dashlets.
def initial_size(self, position, rastersize):
n = []
for i in [0, 1]:
if self._data[i] == MAX:
n.append(rastersize[i] - abs(position[i]) + 1)
elif self._data[i] == GROW:
n.append(1)
else:
n.append(self._data[i])
return n
def compute_grow_by(self, size):
n = []
for i in [0, 1]:
if size[i] != GROW: # absolute size, no growth
n.append(0)
elif self._data[i] < 0:
n.append(-1) # grow direction left, up
else:
n.append(1) # grow direction right, down
return n
def __add__(self, b):
return vec((self[0] + b[0], self[1] + b[1]))
board = dashboards[html.var("name")]
screensize = vec((int(html.var("width")), int(html.var("height"))))
rastersize = screensize / raster
used_matrix = {} # keep track of used raster elements
# first place all dashlets at their absolute positions
positions = []
for nr, dashlet in enumerate(board["dashlets"]):
# Relative position is as noted in the declaration. 1,1 => top left origin,
# -1,-1 => bottom right origin, 0 is not allowed here
rel_position = vec(dashlet["position"]) # starting from 1, negative means: from right/bottom
# Compute the absolute position, this time from 0 to rastersize-1
abs_position = rel_position.make_absolute(rastersize)
# The size in raster-elements. A 0 for a dimension means growth. No negative values here.
size = vec(dashlet["size"])
# Compute the minimum used size for the dashlet. For growth-dimensions we start with 1
used_size = size.initial_size(rel_position, rastersize)
# Now compute the rectangle that is currently occupied. The choords
# of bottomright are *not* included.
if rel_position[0] > 0:
left = abs_position[0]
right = left + used_size[0]
else:
right = abs_position[0]
left = right - used_size[0]
if rel_position[1] > 0:
top = abs_position[1]
bottom = top + used_size[1]
else:
bottom = abs_position[1]
top = bottom - used_size[1]
# Allocate used squares in matrix. If not all squares we need are free,
# then the dashboard is too small for all dashlets (as it seems).
# TEST: Dashlet auf 0/0 setzen, wenn kein Platz dafür da ist.
try:
for x in range(left, right):
for y in range(top, bottom):
if (x,y) in used_matrix:
raise Exception()
used_matrix[(x,y)] = True
# Helper variable for how to grow, both x and y in [-1, 0, 1]
grow_by = rel_position.compute_grow_by(size)
positions.append((nr, True, left, top, right, bottom, grow_by))
except:
positions.append((nr, False, left, top, right, bottom, (0,0)))
# now resize all elastic dashlets to the max, but only
# by one raster at a time, in order to be fair
def try_resize(x, y, width, height):
return False
if x + width >= xmax or y + height >= ymax:
return False
for xx in range(x, x + width):
for yy in range(y, y + height):
if used_matrix[xx][yy]:
return False
for xx in range(x, x + width):
for yy in range(y, y + height):
used_matrix[xx][yy] = True
return True
# Das hier ist FALSCH! In Wirklichkeit muss ich nur prüfen,
# ob der *Zuwachs* nicht in der Matrix belegt ist. Das jetzige
# Rechteck muss ich ausklammern. Es ist ja schon belegt.
def try_allocate(left, top, right, bottom):
# Try if all needed squares are free
for x in range(left, right):
for y in range(top, bottom):
if (x,y) in used_matrix:
return False
# Allocate all needed squares
for x in range(left, right):
for y in range(top, bottom):
used_matrix[(x,y)] = True
return True
# Now try to expand all elastic rectangles as far as possible
at_least_one_expanded = True
while at_least_one_expanded:
at_least_one_expanded = False
new_positions = []
for (nr, visible, left, top, right, bottom, grow_by) in positions:
if visible:
# html.write(repr((nr, left, top, right, bottom, grow_by)))
# try to grow in X direction by one
if grow_by[0] > 0 and right < rastersize[0] and try_allocate(right, top, right+1, bottom):
at_least_one_expanded = True
right += 1
elif grow_by[0] < 0 and left > 0 and try_allocate(left-1, top, left, bottom):
at_least_one_expanded = True
left -= 1
# try to grow in Y direction by one
if grow_by[1] > 0 and bottom < rastersize[1] and try_allocate(left, bottom, right, bottom+1):
at_least_one_expanded = True
bottom += 1
elif grow_by[1] < 0 and top > 0 and try_allocate(left, top-1, right, top):
at_least_one_expanded = True
top -= 1
new_positions.append((nr, visible, left, top, right, bottom, grow_by))
positions = new_positions
resize_info = []
for nr, visible, left, top, right, bottom, grow_by in positions:
# html.write(repr((nr, left, top, right, bottom, grow_by)))
# html.write("<br>")
title = board["dashlets"][nr].get("title")
if title:
th = title_height
else:
th = 0
resize_info.append([nr,
visible and 1 or 0,
left * raster[0],
top * raster[1] + th,
(right - left) * raster[0],
(bottom - top) * raster[1] - th])
html.write(repr(resize_info))
def dashlet_overview():
html.write(
'<table class=dashlet_overview>'
'<tr><td valign=top>'
'<a href="http://mathias-kettner.de/check_mk.html"><img style="margin-right: 30px;" src="images/check_mk.trans.120.png"></a>'
'</td>'
'<td><h2>Check_MK Multisite</h2>'
'Welcome to Check_MK Multisite. If you want to learn more about Multsite, please visit '
'our <a href="http://mathias-kettner.de/checkmk_multisite.html">online documentation</a>. '
'Multisite is part of <a href="http://mathias-kettner.de/check_mk.html">Check_MK</a> - an Open Source '
'project by <a href="http://mathias-kettner.de">Mathias Kettner</a>.'
'</td>'
)
html.write('</tr></table>')
def dashlet_mk_logo():
html.write('<a href="http://mathias-kettner.de/check_mk.html">'
'<img style="margin-right: 30px;" src="images/check_mk.trans.120.png"></a>')
def dashlet_hoststats():
table = [
( _("Up"), "#0b3",
"searchhost&is_host_scheduled_downtime_depth=0&hst0=on",
"Stats: state = 0\n" \
"Stats: scheduled_downtime_depth = 0\n" \
"StatsAnd: 2\n"),
( _("Down"), "#f00",
"searchhost&is_host_scheduled_downtime_depth=0&hst1=on",
"Stats: state = 1\n" \
"Stats: scheduled_downtime_depth = 0\n" \
"StatsAnd: 2\n"),
( _("Unreachable"), "#f80",
"searchhost&is_host_scheduled_downtime_depth=0&hst2=on",
"Stats: state = 2\n" \
"Stats: scheduled_downtime_depth = 0\n" \
"StatsAnd: 2\n"),
( _("In Downtime"), "#0af",
"searchhost&search=1&is_host_scheduled_downtime_depth=1",
"Stats: scheduled_downtime_depth > 0\n" \
)
]
filter = "Filter: custom_variable_names < _REALNAME\n"
render_statistics(html.var('id', "hoststats"), "hosts", table, filter)
def dashlet_servicestats():
table = [
( _("OK"), "#0b3",
"searchsvc&hst0=on&st0=on&is_in_downtime=0",
"Stats: state = 0\n" \
"Stats: scheduled_downtime_depth = 0\n" \
"Stats: host_scheduled_downtime_depth = 0\n" \
"Stats: host_state = 0\n" \
"Stats: host_has_been_checked = 1\n" \
"StatsAnd: 5\n"),
( _("In Downtime"), "#0af",
"searchsvc&is_in_downtime=1",
"Stats: scheduled_downtime_depth > 0\n" \
"Stats: host_scheduled_downtime_depth > 0\n" \
"StatsOr: 2\n"),
( _("On Down host"), "#048",
"searchsvc&hst1=on&hst2=on&hstp=on&is_in_downtime=0",
"Stats: scheduled_downtime_depth = 0\n" \
"Stats: host_scheduled_downtime_depth = 0\n" \
"Stats: host_state != 0\n" \
"StatsAnd: 3\n"),
( _("Warning"), "#ff0",
"searchsvc&hst0=on&st1=on&is_in_downtime=0",
"Stats: state = 1\n" \
"Stats: scheduled_downtime_depth = 0\n" \
"Stats: host_scheduled_downtime_depth = 0\n" \
"Stats: host_state = 0\n" \
"Stats: host_has_been_checked = 1\n" \
"StatsAnd: 5\n"),
( _("Unknown"), "#f80",
"searchsvc&hst0=on&st3=on&is_in_downtime=0",
"Stats: state = 3\n" \
"Stats: scheduled_downtime_depth = 0\n" \
"Stats: host_scheduled_downtime_depth = 0\n" \
"Stats: host_state = 0\n" \
"Stats: host_has_been_checked = 1\n" \
"StatsAnd: 5\n"),
( _("Critical"), "#f00",
"searchsvc&hst0=on&st2=on&is_in_downtime=0",
"Stats: state = 2\n" \
"Stats: scheduled_downtime_depth = 0\n" \
"Stats: host_scheduled_downtime_depth = 0\n" \
"Stats: host_state = 0\n" \
"Stats: host_has_been_checked = 1\n" \
"StatsAnd: 5\n"),
]
filter = "Filter: host_custom_variable_names < _REALNAME\n"
render_statistics(html.var('id', "servicestats"), "services", table, filter)
def render_statistics(pie_id, what, table, filter):
html.write("<div class=stats>")
pie_diameter = 130
pie_left_aspect = 0.5
pie_right_aspect = 0.8
# Is the query restricted to a certain WATO-path?
wato_folder = html.var("wato_folder")
if wato_folder:
# filter += "Filter: host_state = 0"
filter += "Filter: host_filename ~ ^/wato/%s/\n" % wato_folder.replace("\n", "")
# Is the query restricted to a host contact group?
host_contact_group = html.var("host_contact_group")
if host_contact_group:
filter += "Filter: host_contact_groups >= %s\n" % host_contact_group.replace("\n", "")
# Is the query restricted to a service contact group?
service_contact_group = html.var("service_contact_group")
if service_contact_group:
filter += "Filter: service_contact_groups >= %s\n" % service_contact_group.replace("\n", "")
query = "GET %s\n" % what
for entry in table:
query += entry[3]
query += filter
result = html.live.query_summed_stats(query)
pies = zip(table, result)
total = sum([x[1] for x in pies])
html.write('<canvas class=pie width=%d height=%d id="%s_stats" style="float: left"></canvas>' %
(pie_diameter, pie_diameter, pie_id))
html.write('<img src="images/globe.png" class="globe">')
html.write('<table class="hoststats%s" style="float:left">' % (
len(pies) > 1 and " narrow" or ""))
table_entries = pies
while len(table_entries) < 6:
table_entries = table_entries + [ (("", "#95BBCD", "", ""), " ") ]
table_entries.append(((_("Total"), "", "all%s" % what, ""), total))
for (name, color, viewurl, query), count in table_entries:
url = "view.py?view_name=" + viewurl + "&filled_in=filter&search=1&wato_folder=" \
+ html.urlencode(html.var("wato_folder", ""))
if host_contact_group:
url += '&opthost_contactgroup=' + host_contact_group
if service_contact_group:
url += '&optservice_contactgroup=' + service_contact_group
html.write('<tr><th><a href="%s">%s</a></th>' % (url, name))
style = ''
if color:
style = ' style="background-color: %s"' % color
html.write('<td class=color%s>'
'</td><td><a href="%s">%s</a></td></tr>' % (style, url, count))
html.write("</table>")
r = 0.0
pie_parts = []
if total > 0:
# Count number of non-empty classes
num_nonzero = 0
for info, value in pies:
if value > 0:
num_nonzero += 1
# Each non-zero class gets at least a view pixels of visible thickness.
# We reserve that space right now. All computations are done in percent
# of the radius.
separator = 0.02 # 3% of radius
remaining_separatorspace = num_nonzero * separator # space for separators
remaining_radius = 1 - remaining_separatorspace # remaining space
remaining_part = 1.0 # keep track of remaining part, 1.0 = 100%
# Loop over classes, begin with most outer sphere. Inner spheres show
# worse states and appear larger to the user (which is the reason we
# are doing all this stuff in the first place)
for (name, color, viewurl, q), value in pies[::1]:
if value > 0 and remaining_part > 0: # skip empty classes
# compute radius of this sphere *including all inner spheres!* The first
# sphere always gets a radius of 1.0, of course.
radius = remaining_separatorspace + remaining_radius * (remaining_part ** (1/3.0))
pie_parts.append('chart_pie("%s", %f, %f, %r, true);' % (pie_id, pie_right_aspect, radius, color))
pie_parts.append('chart_pie("%s", %f, %f, %r, false);' % (pie_id, pie_left_aspect, radius, color))
# compute relative part of this class
part = float(value) / total # ranges from 0 to 1
remaining_part -= part
remaining_separatorspace -= separator
html.write("</div>")
html.javascript("""
function chart_pie(pie_id, x_scale, radius, color, right_side) {
var context = document.getElementById(pie_id + "_stats").getContext('2d');
if (!context)
return;
var pie_x = %(x)f;
var pie_y = %(y)f;
var pie_d = %(d)f;
context.fillStyle = color;
context.save();
context.translate(pie_x, pie_y);
context.scale(x_scale, 1);
context.beginPath();
if(right_side)
context.arc(0, 0, (pie_d / 2) * radius, 1.5 * Math.PI, 0.5 * Math.PI, false);
else
context.arc(0, 0, (pie_d / 2) * radius, 0.5 * Math.PI, 1.5 * Math.PI, false);
context.closePath();
context.fill();
context.restore();
context = null;
}
if (has_canvas_support()) {
%(p)s
}
""" % { "x" : pie_diameter / 2, "y": pie_diameter/2, "d" : pie_diameter, 'p': '\n'.join(pie_parts) })
def dashlet_pnpgraph():
render_pnpgraph(
html.var("site"), html.var("host"), html.var("service"),
int(html.var("source", 0)), int(html.var("view", 0)),
)
def dashlet_nodata():
html.write("<div class=nograph><div class=msg>")
html.write(html.var("message", _("No data available.")))
html.write("</div></div>")
def render_pnpgraph(site, host, service = None, source = 0, view = 0):
if not host:
html.message("Invalid URL to this dashlet. Missing <tt>host</tt>")
return;
if not service:
service = "_HOST_"
if not site:
base_url = defaults.url_prefix
else:
base_url = html.site_status[site]["site"]["url_prefix"]
base_url += "pnp4nagios/index.php/"
var_part = "?host=%s&srv=%s&view=0&source=%d&view=%d&theme=multisite&_t=%d" % \
(pnp_cleanup(host), pnp_cleanup(service), source, view, int(time.time()))
pnp_url = base_url + "graph" + var_part
img_url = base_url + "image" + var_part
html.write('<a href="%s"><img border=0 src="%s"></a>' % (pnp_url, img_url))
|
v-a/check_mk
|
web/htdocs/dashboard.py
|
Python
|
gpl-2.0
| 27,831
|
[
"VisIt"
] |
6a96a3452a0847f354c4516fa6451f369eb97289776590b660530596524d5825
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# first, create an image to warp
imageGrid = vtk.vtkImageGridSource()
imageGrid.SetGridSpacing(16,16,0)
imageGrid.SetGridOrigin(0,0,0)
imageGrid.SetDataExtent(0,255,0,255,0,0)
imageGrid.SetDataScalarTypeToUnsignedChar()
table = vtk.vtkLookupTable()
table.SetTableRange(0,1)
table.SetValueRange(1.0,0.0)
table.SetSaturationRange(0.0,0.0)
table.SetHueRange(0.0,0.0)
table.SetAlphaRange(0.0,1.0)
table.Build()
alpha = vtk.vtkImageMapToColors()
alpha.SetInputConnection(imageGrid.GetOutputPort())
alpha.SetLookupTable(table)
reader1 = vtk.vtkBMPReader()
reader1.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/masonry.bmp")
blend = vtk.vtkImageBlend()
blend.AddInputConnection(0,reader1.GetOutputPort(0))
blend.AddInputConnection(0,alpha.GetOutputPort(0))
# next, create a ThinPlateSpline transform
p1 = vtk.vtkPoints()
p1.SetNumberOfPoints(8)
p1.SetPoint(0,0,0,0)
p1.SetPoint(1,0,255,0)
p1.SetPoint(2,255,0,0)
p1.SetPoint(3,255,255,0)
p1.SetPoint(4,96,96,0)
p1.SetPoint(5,96,159,0)
p1.SetPoint(6,159,159,0)
p1.SetPoint(7,159,96,0)
p2 = vtk.vtkPoints()
p2.SetNumberOfPoints(8)
p2.SetPoint(0,0,0,0)
p2.SetPoint(1,0,255,0)
p2.SetPoint(2,255,0,0)
p2.SetPoint(3,255,255,0)
p2.SetPoint(4,96,159,0)
p2.SetPoint(5,159,159,0)
p2.SetPoint(6,159,96,0)
p2.SetPoint(7,96,96,0)
thinPlate = vtk.vtkThinPlateSplineTransform()
thinPlate.SetSourceLandmarks(p2)
thinPlate.SetTargetLandmarks(p1)
thinPlate.SetBasisToR2LogR()
# convert the thin plate spline into a grid
transformToGrid = vtk.vtkTransformToGrid()
transformToGrid.SetInput(thinPlate)
transformToGrid.SetGridSpacing(16,16,1)
transformToGrid.SetGridOrigin(-0.5,-0.5,0)
transformToGrid.SetGridExtent(0,16,0,16,0,0)
transformToGrid.Update()
transform = vtk.vtkGridTransform()
transform.SetDisplacementGridConnection(transformToGrid.GetOutputPort())
# apply the grid warp to the image
transform2 = vtk.vtkTransform()
transform2.RotateZ(30)
reslice = vtk.vtkImageReslice()
reslice.SetInputConnection(blend.GetOutputPort())
reslice.SetResliceTransform(transform.GetInverse())
reslice.SetInterpolationModeToLinear()
reslice.SetOptimization(1)
# set the window/level to 255.0/127.5 to view full range
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(reslice.GetOutputPort())
viewer.SetColorWindow(255.0)
viewer.SetColorLevel(127.5)
viewer.SetZSlice(0)
viewer.Render()
# --- end of script --
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Filters/Hybrid/Testing/Python/TestGridWarpLinear.py
|
Python
|
bsd-3-clause
| 2,541
|
[
"VTK"
] |
4be0ed7bd523daa107c5ea56db527abe96532d7fc937e19d13926ceceb5af7f2
|
# Copyright (c) 2011, Joerg Raedler (Berlin, Germany)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer. Redistributions in binary form must
# reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys, Gnuplot, CSV, netCDF, MATLAB, CSVlocale
formats = {
'CSV' : 'Comma separated values - read by many spreadsheet programs',
'CSVlocale' : 'Simple CSV with locale number formatting',
'Gnuplot' : 'File format read by gnuplot, a famous plotting package',
'netCDF' : 'netCDF is a format for structured multi-dimensional data',
'MATLAB' : 'MATLAB files are binary files of matrix data',
}
def export(fmt, dm, varList, fileName=None, formatOptions={}):
"""Export the data of the DyMatFile object `dm` to a data file. `fmt` is the
format string, `varList` the list of variables to export. If no `fileName` is
given, it will be derived from the mat file name. `formatOptions` will be used
in later versions.
:Arguments:
- string: fmt
- DyMolaMat object: dm
- sequence of strings: varList
- optional string: fileName
- optional dictionary: formatOptions
:Returns:
- None
"""
if not fmt in formats:
raise Exception('Unknown export format specified!')
m = sys.modules['DyMat.Export.%s' % fmt]
return m.export(dm, varList, fileName,formatOptions)
|
arktools/openfdm
|
python/pyopenfdm/deps/DyMat/DyMat/Export/__init__.py
|
Python
|
gpl-3.0
| 2,525
|
[
"NetCDF"
] |
ec7e99bd0d526950117c9ed673eaad289ae8f83d5523917b6091c71028c35427
|
"""Test Axis device."""
from copy import deepcopy
from unittest import mock
from unittest.mock import Mock, patch
import axis as axislib
from axis.event_stream import OPERATION_INITIALIZED
import pytest
import respx
from homeassistant.components import axis, zeroconf
from homeassistant.components.axis.const import (
CONF_EVENTS,
CONF_MODEL,
DOMAIN as AXIS_DOMAIN,
)
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.config_entries import SOURCE_ZEROCONF
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.helpers import device_registry as dr
from tests.common import MockConfigEntry, async_fire_mqtt_message
MAC = "00408C123456"
FORMATTED_MAC = "00:40:8c:12:34:56"
MODEL = "model"
NAME = "name"
DEFAULT_HOST = "1.2.3.4"
ENTRY_OPTIONS = {CONF_EVENTS: True}
ENTRY_CONFIG = {
CONF_HOST: DEFAULT_HOST,
CONF_USERNAME: "root",
CONF_PASSWORD: "pass",
CONF_PORT: 80,
CONF_MODEL: MODEL,
CONF_NAME: NAME,
}
API_DISCOVERY_RESPONSE = {
"method": "getApiList",
"apiVersion": "1.0",
"data": {
"apiList": [
{"id": "api-discovery", "version": "1.0", "name": "API Discovery Service"},
{"id": "param-cgi", "version": "1.0", "name": "Legacy Parameter Handling"},
]
},
}
API_DISCOVERY_BASIC_DEVICE_INFO = {
"id": "basic-device-info",
"version": "1.1",
"name": "Basic Device Information",
}
API_DISCOVERY_MQTT = {"id": "mqtt-client", "version": "1.0", "name": "MQTT Client API"}
API_DISCOVERY_PORT_MANAGEMENT = {
"id": "io-port-management",
"version": "1.0",
"name": "IO Port Management",
}
APPLICATIONS_LIST_RESPONSE = """<reply result="ok">
<application Name="vmd" NiceName="AXIS Video Motion Detection" Vendor="Axis Communications" Version="4.2-0" ApplicationID="143440" License="None" Status="Running" ConfigurationPage="local/vmd/config.html" VendorHomePage="http://www.axis.com" />
</reply>"""
BASIC_DEVICE_INFO_RESPONSE = {
"apiVersion": "1.1",
"data": {
"propertyList": {
"ProdNbr": "M1065-LW",
"ProdType": "Network Camera",
"SerialNumber": MAC,
"Version": "9.80.1",
}
},
}
LIGHT_CONTROL_RESPONSE = {
"apiVersion": "1.1",
"method": "getLightInformation",
"data": {
"items": [
{
"lightID": "led0",
"lightType": "IR",
"enabled": True,
"synchronizeDayNightMode": True,
"lightState": False,
"automaticIntensityMode": False,
"automaticAngleOfIlluminationMode": False,
"nrOfLEDs": 1,
"error": False,
"errorInfo": "",
}
]
},
}
MQTT_CLIENT_RESPONSE = {
"apiVersion": "1.0",
"context": "some context",
"method": "getClientStatus",
"data": {"status": {"state": "active", "connectionStatus": "Connected"}},
}
PORT_MANAGEMENT_RESPONSE = {
"apiVersion": "1.0",
"method": "getPorts",
"data": {
"numberOfPorts": 1,
"items": [
{
"port": "0",
"configurable": False,
"usage": "",
"name": "PIR sensor",
"direction": "input",
"state": "open",
"normalState": "open",
}
],
},
}
VMD4_RESPONSE = {
"apiVersion": "1.4",
"method": "getConfiguration",
"context": "Axis library",
"data": {
"cameras": [{"id": 1, "rotation": 0, "active": True}],
"profiles": [
{"filters": [], "camera": 1, "triggers": [], "name": "Profile 1", "uid": 1}
],
},
}
BRAND_RESPONSE = """root.Brand.Brand=AXIS
root.Brand.ProdFullName=AXIS M1065-LW Network Camera
root.Brand.ProdNbr=M1065-LW
root.Brand.ProdShortName=AXIS M1065-LW
root.Brand.ProdType=Network Camera
root.Brand.ProdVariant=
root.Brand.WebURL=http://www.axis.com
"""
IMAGE_RESPONSE = """root.Image.I0.Enabled=yes
root.Image.I0.Name=View Area 1
root.Image.I0.Source=0
root.Image.I1.Enabled=no
root.Image.I1.Name=View Area 2
root.Image.I1.Source=0
"""
PORTS_RESPONSE = """root.Input.NbrOfInputs=1
root.IOPort.I0.Configurable=no
root.IOPort.I0.Direction=input
root.IOPort.I0.Input.Name=PIR sensor
root.IOPort.I0.Input.Trig=closed
root.Output.NbrOfOutputs=0
"""
PROPERTIES_RESPONSE = f"""root.Properties.API.HTTP.Version=3
root.Properties.API.Metadata.Metadata=yes
root.Properties.API.Metadata.Version=1.0
root.Properties.EmbeddedDevelopment.Version=2.16
root.Properties.Firmware.BuildDate=Feb 15 2019 09:42
root.Properties.Firmware.BuildNumber=26
root.Properties.Firmware.Version=9.10.1
root.Properties.Image.Format=jpeg,mjpeg,h264
root.Properties.Image.NbrOfViews=2
root.Properties.Image.Resolution=1920x1080,1280x960,1280x720,1024x768,1024x576,800x600,640x480,640x360,352x240,320x240
root.Properties.Image.Rotation=0,180
root.Properties.System.SerialNumber={MAC}
"""
PTZ_RESPONSE = ""
STREAM_PROFILES_RESPONSE = """root.StreamProfile.MaxGroups=26
root.StreamProfile.S0.Description=profile_1_description
root.StreamProfile.S0.Name=profile_1
root.StreamProfile.S0.Parameters=videocodec=h264
root.StreamProfile.S1.Description=profile_2_description
root.StreamProfile.S1.Name=profile_2
root.StreamProfile.S1.Parameters=videocodec=h265
"""
VIEW_AREAS_RESPONSE = {"apiVersion": "1.0", "method": "list", "data": {"viewAreas": []}}
def mock_default_vapix_requests(respx: respx, host: str = DEFAULT_HOST) -> None:
"""Mock default Vapix requests responses."""
respx.post(f"http://{host}:80/axis-cgi/apidiscovery.cgi").respond(
json=API_DISCOVERY_RESPONSE,
)
respx.post(f"http://{host}:80/axis-cgi/basicdeviceinfo.cgi").respond(
json=BASIC_DEVICE_INFO_RESPONSE,
)
respx.post(f"http://{host}:80/axis-cgi/io/portmanagement.cgi").respond(
json=PORT_MANAGEMENT_RESPONSE,
)
respx.post(f"http://{host}:80/axis-cgi/lightcontrol.cgi").respond(
json=LIGHT_CONTROL_RESPONSE,
)
respx.post(f"http://{host}:80/axis-cgi/mqtt/client.cgi").respond(
json=MQTT_CLIENT_RESPONSE,
)
respx.post(f"http://{host}:80/axis-cgi/streamprofile.cgi").respond(
json=STREAM_PROFILES_RESPONSE,
)
respx.post(f"http://{host}:80/axis-cgi/viewarea/info.cgi").respond(
json=VIEW_AREAS_RESPONSE
)
respx.get(
f"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Brand"
).respond(
text=BRAND_RESPONSE,
headers={"Content-Type": "text/plain"},
)
respx.get(
f"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Image"
).respond(
text=IMAGE_RESPONSE,
headers={"Content-Type": "text/plain"},
)
respx.get(
f"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Input"
).respond(
text=PORTS_RESPONSE,
headers={"Content-Type": "text/plain"},
)
respx.get(
f"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.IOPort"
).respond(
text=PORTS_RESPONSE,
headers={"Content-Type": "text/plain"},
)
respx.get(
f"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Output"
).respond(
text=PORTS_RESPONSE,
headers={"Content-Type": "text/plain"},
)
respx.get(
f"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Properties"
).respond(
text=PROPERTIES_RESPONSE,
headers={"Content-Type": "text/plain"},
)
respx.get(
f"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.PTZ"
).respond(
text=PTZ_RESPONSE,
headers={"Content-Type": "text/plain"},
)
respx.get(
f"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.StreamProfile"
).respond(
text=STREAM_PROFILES_RESPONSE,
headers={"Content-Type": "text/plain"},
)
respx.post(f"http://{host}:80/axis-cgi/applications/list.cgi").respond(
text=APPLICATIONS_LIST_RESPONSE,
headers={"Content-Type": "text/xml"},
)
respx.post(f"http://{host}:80/local/vmd/control.cgi").respond(json=VMD4_RESPONSE)
async def setup_axis_integration(hass, config=ENTRY_CONFIG, options=ENTRY_OPTIONS):
"""Create the Axis device."""
config_entry = MockConfigEntry(
domain=AXIS_DOMAIN,
data=deepcopy(config),
options=deepcopy(options),
version=3,
unique_id=FORMATTED_MAC,
)
config_entry.add_to_hass(hass)
with respx.mock:
mock_default_vapix_requests(respx)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
return config_entry
async def test_device_setup(hass):
"""Successful setup."""
with patch(
"homeassistant.config_entries.ConfigEntries.async_forward_entry_setup",
return_value=True,
) as forward_entry_setup:
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
assert device.api.vapix.firmware_version == "9.10.1"
assert device.api.vapix.product_number == "M1065-LW"
assert device.api.vapix.product_type == "Network Camera"
assert device.api.vapix.serial_number == "00408C123456"
entry = device.config_entry
assert len(forward_entry_setup.mock_calls) == 4
assert forward_entry_setup.mock_calls[0][1] == (entry, "binary_sensor")
assert forward_entry_setup.mock_calls[1][1] == (entry, "camera")
assert forward_entry_setup.mock_calls[2][1] == (entry, "light")
assert forward_entry_setup.mock_calls[3][1] == (entry, "switch")
assert device.host == ENTRY_CONFIG[CONF_HOST]
assert device.model == ENTRY_CONFIG[CONF_MODEL]
assert device.name == ENTRY_CONFIG[CONF_NAME]
assert device.unique_id == FORMATTED_MAC
device_registry = dr.async_get(hass)
device_entry = device_registry.async_get_device(
identifiers={(AXIS_DOMAIN, device.unique_id)}
)
assert device_entry.configuration_url == device.api.config.url
async def test_device_info(hass):
"""Verify other path of device information works."""
api_discovery = deepcopy(API_DISCOVERY_RESPONSE)
api_discovery["data"]["apiList"].append(API_DISCOVERY_BASIC_DEVICE_INFO)
with patch.dict(API_DISCOVERY_RESPONSE, api_discovery):
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
assert device.api.vapix.firmware_version == "9.80.1"
assert device.api.vapix.product_number == "M1065-LW"
assert device.api.vapix.product_type == "Network Camera"
assert device.api.vapix.serial_number == "00408C123456"
async def test_device_support_mqtt(hass, mqtt_mock):
"""Successful setup."""
api_discovery = deepcopy(API_DISCOVERY_RESPONSE)
api_discovery["data"]["apiList"].append(API_DISCOVERY_MQTT)
with patch.dict(API_DISCOVERY_RESPONSE, api_discovery):
await setup_axis_integration(hass)
mqtt_mock.async_subscribe.assert_called_with(f"{MAC}/#", mock.ANY, 0, "utf-8")
topic = f"{MAC}/event/tns:onvif/Device/tns:axis/Sensor/PIR/$source/sensor/0"
message = b'{"timestamp": 1590258472044, "topic": "onvif:Device/axis:Sensor/PIR", "message": {"source": {"sensor": "0"}, "key": {}, "data": {"state": "1"}}}'
assert len(hass.states.async_entity_ids(BINARY_SENSOR_DOMAIN)) == 0
async_fire_mqtt_message(hass, topic, message)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(BINARY_SENSOR_DOMAIN)) == 1
pir = hass.states.get(f"{BINARY_SENSOR_DOMAIN}.{NAME}_pir_0")
assert pir.state == STATE_ON
assert pir.name == f"{NAME} PIR 0"
async def test_update_address(hass):
"""Test update address works."""
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
assert device.api.config.host == "1.2.3.4"
with patch(
"homeassistant.components.axis.async_setup_entry",
return_value=True,
) as mock_setup_entry, respx.mock:
mock_default_vapix_requests(respx, "2.3.4.5")
await hass.config_entries.flow.async_init(
AXIS_DOMAIN,
data=zeroconf.ZeroconfServiceInfo(
host="2.3.4.5",
addresses=["2.3.4.5"],
hostname="mock_hostname",
name="name",
port=80,
properties={"macaddress": MAC},
type="mock_type",
),
context={"source": SOURCE_ZEROCONF},
)
await hass.async_block_till_done()
assert device.api.config.host == "2.3.4.5"
assert len(mock_setup_entry.mock_calls) == 1
async def test_device_unavailable(hass, mock_rtsp_event, mock_rtsp_signal_state):
"""Successful setup."""
await setup_axis_integration(hass)
# Provide an entity that can be used to verify connection state on
mock_rtsp_event(
topic="tns1:AudioSource/tnsaxis:TriggerLevel",
data_type="triggered",
data_value="10",
source_name="channel",
source_idx="1",
)
await hass.async_block_till_done()
assert hass.states.get(f"{BINARY_SENSOR_DOMAIN}.{NAME}_sound_1").state == STATE_OFF
# Connection to device has failed
mock_rtsp_signal_state(connected=False)
await hass.async_block_till_done()
assert (
hass.states.get(f"{BINARY_SENSOR_DOMAIN}.{NAME}_sound_1").state
== STATE_UNAVAILABLE
)
# Connection to device has been restored
mock_rtsp_signal_state(connected=True)
await hass.async_block_till_done()
assert hass.states.get(f"{BINARY_SENSOR_DOMAIN}.{NAME}_sound_1").state == STATE_OFF
async def test_device_reset(hass):
"""Successfully reset device."""
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
result = await device.async_reset()
assert result is True
async def test_device_not_accessible(hass):
"""Failed setup schedules a retry of setup."""
with patch.object(axis.device, "get_device", side_effect=axis.errors.CannotConnect):
await setup_axis_integration(hass)
assert hass.data[AXIS_DOMAIN] == {}
async def test_device_trigger_reauth_flow(hass):
"""Failed authentication trigger a reauthentication flow."""
with patch.object(
axis.device, "get_device", side_effect=axis.errors.AuthenticationRequired
), patch.object(hass.config_entries.flow, "async_init") as mock_flow_init:
await setup_axis_integration(hass)
mock_flow_init.assert_called_once()
assert hass.data[AXIS_DOMAIN] == {}
async def test_device_unknown_error(hass):
"""Unknown errors are handled."""
with patch.object(axis.device, "get_device", side_effect=Exception):
await setup_axis_integration(hass)
assert hass.data[AXIS_DOMAIN] == {}
async def test_new_event_sends_signal(hass):
"""Make sure that new event send signal."""
entry = Mock()
entry.data = ENTRY_CONFIG
axis_device = axis.device.AxisNetworkDevice(hass, entry)
with patch.object(axis.device, "async_dispatcher_send") as mock_dispatch_send:
axis_device.async_event_callback(action=OPERATION_INITIALIZED, event_id="event")
await hass.async_block_till_done()
assert len(mock_dispatch_send.mock_calls) == 1
assert len(mock_dispatch_send.mock_calls[0]) == 3
async def test_shutdown():
"""Successful shutdown."""
hass = Mock()
entry = Mock()
entry.data = ENTRY_CONFIG
axis_device = axis.device.AxisNetworkDevice(hass, entry)
axis_device.api = Mock()
await axis_device.shutdown(None)
assert len(axis_device.api.stream.stop.mock_calls) == 1
async def test_get_device_fails(hass):
"""Device unauthorized yields authentication required error."""
with patch(
"axis.vapix.Vapix.request", side_effect=axislib.Unauthorized
), pytest.raises(axis.errors.AuthenticationRequired):
await axis.device.get_device(hass, host="", port="", username="", password="")
async def test_get_device_device_unavailable(hass):
"""Device unavailable yields cannot connect error."""
with patch(
"axis.vapix.Vapix.request", side_effect=axislib.RequestError
), pytest.raises(axis.errors.CannotConnect):
await axis.device.get_device(hass, host="", port="", username="", password="")
async def test_get_device_unknown_error(hass):
"""Device yield unknown error."""
with patch(
"axis.vapix.Vapix.request", side_effect=axislib.AxisException
), pytest.raises(axis.errors.AuthenticationRequired):
await axis.device.get_device(hass, host="", port="", username="", password="")
|
rohitranjan1991/home-assistant
|
tests/components/axis/test_device.py
|
Python
|
mit
| 16,933
|
[
"VMD"
] |
dd5d71f990f20dc90f517ebe69abf131d146f7d2272718be09053f795395734e
|
# L. Amber Wilcox-O'Hearn 2011
# test_article_randomiser.py
from recluse import article_randomiser
import unittest, StringIO, random, subprocess, bz2, os
class ArticleRandomiserTest(unittest.TestCase):
def test_randomise(self):
r = random.Random(999)
article_separation_line = "---END.OF.DOCUMENT---\n"
a1 = ["Anarchism.\n", "Anarchism is a political philosophy which considers the state undesirable, unnecessary and harmful, and instead promotes a stateless society, or anarchy. It seeks to diminish or even abolish authority in the conduct of human relations. Anarchists may widely disagree on what additional criteria are required in anarchism. \"The Oxford Companion to Philosophy\" says, \"there is no single defining position that all anarchists hold, and those considered anarchists at best share a certain family resemblance.\"", "---END.OF.DOCUMENT---\n"]
a2 = ["Hidehiko Shimizu.\n", "Hidehiko Shimizu (born 4 November 1954) is a former Japanese football player. He has played for Nissan Motors.\n", "---END.OF.DOCUMENT---\n"]
a3 = ["Some other thing.\n", "this\n", "could\n", "be a line or three.\n", "---END.OF.DOCUMENT---\n"]
a4 = ["Finally.\n", "Another one.\n", "---END.OF.DOCUMENT---\n"]
newline_list = ["\n"]
article_file_obj = a1 + newline_list + a2 + newline_list + a3 + newline_list + a4
train_file_obj = StringIO.StringIO()
devel_file_obj = StringIO.StringIO()
test_file_obj = StringIO.StringIO()
ar = article_randomiser.Randomiser(article_file_obj, train_file_obj, devel_file_obj, test_file_obj, r)
ar.randomise()
assert train_file_obj.getvalue() == "".join(a2+a4), "".join(a2+a4) + train_file_obj.getvalue()
assert devel_file_obj.getvalue() == "".join(a1), "".join(a1) + devel_file_obj.getvalue()
assert test_file_obj.getvalue() == "".join(a3), "".join(a3) + test_file_obj.getvalue()
def test_command_line(self):
randomize = subprocess.Popen(['python', 'recluse/article_randomiser.py', "---END.OF.DOCUMENT---", '.5', '.3', '.2'], stdin=-1, stdout=-1, stderr=-1 )
test_data_reader = bz2.BZ2File('recluse/test/data/small_westbury.txt.bz2', 'r')
randomize.communicate(input=test_data_reader.read())
self.assertEqual(randomize.returncode, 0)
num_train = subprocess.Popen(['grep', '-c', 'END', 'train'], stdout=-1)
self.assertEqual(int(num_train.stdout.read()), 11), num_train
num_devel = subprocess.Popen(['grep', '-c', 'END', 'devel'], stdout=-1)
self.assertEqual(int(num_devel.stdout.read()), 5), num_devel
num_test = subprocess.Popen(['grep', '-c', 'END', 'test'], stdout=-1)
self.assertEqual(int(num_test.stdout.read()), 2), num_test
os.remove('train')
os.remove('devel')
os.remove('test')
if __name__ == '__main__':
unittest.main()
|
ambimorph/recluse
|
recluse/test/test_article_randomiser.py
|
Python
|
agpl-3.0
| 2,916
|
[
"Amber"
] |
bc9eae0d7f373dd2dddb4f75ae3dcb512320591059b7cd330e86ac6a57f7a73d
|
#!/usr/bin/env python
from __future__ import division
__author__ = "Jens Reeder"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Greg Caporaso", "Jens Reeder", "Jai Ram Rideout",
"Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
from qiime.util import make_option
from os.path import split, splitext, join
from qiime.util import (make_option, parse_command_line_parameters,
load_qiime_config, get_options_lookup)
from qiime.parallel.identify_chimeric_seqs import ParallelChimericSequenceIdentifier
qiime_config = load_qiime_config()
options_lookup = get_options_lookup()
script_info = {}
script_info['brief_description'] = """Parallel chimera detection"""
script_info[
'script_description'] = """This script works like the identify_chimeric_seqs.py script, but is intended to make use of multicore/multiprocessor environments to perform analyses in parallel."""
script_info['script_usage'] = []
# copied from identify_chimeric_seqs.py
script_info['script_usage'].append(("""blast_fragments example""", """For each sequence provided as input, the blast_fragments method splits the input sequence into n roughly-equal-sized, non-overlapping fragments, and assigns taxonomy to each fragment against a reference database. The BlastTaxonAssigner (implemented in assign_taxonomy.py) is used for this. The taxonomies of the fragments are compared with one another (at a default depth of 4), and if contradictory assignments are returned the sequence is identified as chimeric. For example, if an input sequence was split into 3 fragments, and the following taxon assignments were returned:
========== ==========================================================
fragment1: Archaea;Euryarchaeota;Methanobacteriales;Methanobacterium
fragment2: Archaea;Euryarchaeota;Halobacteriales;uncultured
fragment3: Archaea;Euryarchaeota;Methanobacteriales;Methanobacterium
========== ==========================================================
The sequence would be considered chimeric at a depth of 3 (Methanobacteriales vs. Halobacteriales), but non-chimeric at a depth of 2 (all Euryarchaeota).
blast_fragments begins with the assumption that a sequence is non-chimeric, and looks for evidence to the contrary. This is important when, for example, no taxonomy assignment can be made because no blast result is returned. If a sequence is split into three fragments, and only one returns a blast hit, that sequence would be considered non-chimeric. This is because there is no evidence (i.e., contradictory blast assignments) for the sequence being chimeric. This script can be run by the following command, where the resulting data is written to $PWD/blast_fragments_chimeric_seqs.txt and using default parameters (i.e., number of fragments ("-n 3"), taxonomy depth ("-d 4") and maximum E-value ("-e 1e-30")). ALWAYS SPECIFY ABSOLUTE FILE PATHS (absolute path represented here as $PWD, but will generally look something like /home/ubuntu/my_analysis/).""", """%prog -i $PWD/inseqs.fasta -t $PWD/id_to_tax.txt -r $PWD/refseqs.fasta -o $PWD/blast_fragments_chimeric_seqs.txt -m blast_fragments"""))
script_info['script_usage'].append(("""ChimeraSlayer Example:""",
"""Identify chimeric sequences using the ChimeraSlayer algorithm against a user provided reference database. The input sequences need to be provided in aligned (Py)Nast format and the reference database needs to be provided as aligned FASTA (-a). Note that the reference database needs to be the same that was used to build the alignment of the input sequences! ALWAYS SPECIFY ABSOLUTE FILE PATHS (absolute path represented here as $PWD, but will generally look something like /home/ubuntu/my_analysis/).""",
"""%prog -i $PWD/inseqs_aligned.fasta -o $PWD/chimera_slayer_chimeric_seqs.txt"""))
script_info[
'output_description'] = """The result of parallel_identify_chimeric_seqs.py is a text file that identifies which sequences are chimeric."""
script_info['required_options'] = [
options_lookup['fasta_as_primary_input'],
]
chimera_detection_method_choices = ['blast_fragments', 'ChimeraSlayer']
script_info['optional_options'] = [
make_option('-a', '--aligned_reference_seqs_fp',
type='existing_filepath',
default=qiime_config['pynast_template_alignment_fp'],
help='Path to (Py)Nast aligned reference sequences. '
'REQUIRED when method ChimeraSlayer [default: %default]'),
make_option('-t', '--id_to_taxonomy_fp',
type='existing_filepath',
help='Path to tab-delimited file mapping sequences to assigned '
'taxonomy. Each assigned taxonomy is provided as a comma-separated '
'list. [default: %default; REQUIRED when method is blast_fragments]'),
make_option('-r', '--reference_seqs_fp',
type='existing_filepath',
help='Path to reference sequences (used to build a blast db when method blast_fragments). '
'[default: %default; REQUIRED when method blast_fragments' +
' if no blast_db is provided;]'),
make_option('-b', '--blast_db', type='blast_db',
help='Database to blast against. Must provide either --blast_db or '
'--reference_seqs_fp when method is blast_fragments [default: %default]'),
make_option('-m', '--chimera_detection_method',
type='choice', help='Chimera detection method. Choices: ' +
" or ".join(chimera_detection_method_choices) +
'. [default:%default]',
choices=chimera_detection_method_choices, default='ChimeraSlayer'),
make_option('-n', '--num_fragments',
type='int', help='Number of fragments to split sequences into' +
' (i.e., number of expected breakpoints + 1) [default: %default]',
default=3),
make_option('-d', '--taxonomy_depth',
type='int', help='Number of taxonomic divisions to consider' +
' when comparing taxonomy assignments [default: %default]',
default=4),
make_option('-e', '--max_e_value',
type='float', help='Max e-value to assign taxonomy' +
' [default: %default]', default=1e-30),
make_option('--min_div_ratio',
type='float', help='min divergence ratio ' +
'(passed to ChimeraSlayer). If set to None uses ' +
'ChimeraSlayer default value. ' +
' [default: %default]', default=None),
make_option('-o', '--output_fp',
type='new_filepath',
help='Path to store output [default: derived from input_seqs_fp]'),
# Define parallel-script-specific parameters
options_lookup['jobs_to_start'],
options_lookup['retain_temp_files'],
options_lookup['suppress_submit_jobs'],
options_lookup['poll_directly'],
options_lookup['cluster_jobs_fp'],
options_lookup['suppress_polling'],
options_lookup['job_prefix'],
options_lookup['seconds_to_sleep']
]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
# Create dict of command-line options.
params = eval(str(opts))
# Additional option checks (copied from scripts/identify_chimeric_seqs.py).
if opts.chimera_detection_method == 'blast_fragments':
if not (opts.blast_db or opts.reference_seqs_fp):
option_parser.error('Must provide either --blast_db or' +
' --reference_seqs_fp and --id_to_taxonomy_fp when' +
' method is blast_fragments.')
if not opts.id_to_taxonomy_fp:
option_parser.error('Must provide --id_to_taxonomy_fp when method' +
' is blast_fragments.')
if opts.num_fragments < 2:
option_parser.error('Invalid number of fragments (-n %d) Must be >= 2.'
% opts.num_fragments)
elif opts.chimera_detection_method == 'ChimeraSlayer':
if not opts.aligned_reference_seqs_fp:
option_parser.error("Must provide --aligned_reference_seqs_fp " +
"when using method ChimeraSlayer")
# Set the output_fp if not set.
output_fp = opts.output_fp
if not output_fp:
input_basename = splitext(split(opts.input_fasta_fp)[1])[0]
output_fp = '%s_chimeric.txt' % input_basename
params['output_fp'] = output_fp
# Find the output dir path based on the output file path.
output_dir, _ = split(output_fp)
if output_dir == "":
output_dir = "./"
parallel_runner = ParallelChimericSequenceIdentifier(
cluster_jobs_fp=opts.cluster_jobs_fp,
jobs_to_start=opts.jobs_to_start,
retain_temp_files=opts.retain_temp_files,
suppress_polling=opts.suppress_polling,
seconds_to_sleep=opts.seconds_to_sleep)
parallel_runner(opts.input_fasta_fp,
output_dir,
params,
job_prefix=opts.job_prefix,
poll_directly=opts.poll_directly,
suppress_submit_jobs=opts.suppress_submit_jobs)
if __name__ == "__main__":
main()
|
wasade/qiime
|
scripts/parallel_identify_chimeric_seqs.py
|
Python
|
gpl-2.0
| 9,500
|
[
"BLAST"
] |
2018815a1a7b48ccba4f1c6dc2e5f5f7aef5800ea361b450690ace710d3f64a7
|
import unittest
from splinter import Browser
from .fake_webapp import EXAMPLE_APP
from .base import WebDriverTests
class PhantomJSBrowserTest(WebDriverTests, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.browser = Browser("phantomjs")
@classmethod
def tearDownClass(cls):
cls.browser.quit()
def setUp(self):
self.browser.visit(EXAMPLE_APP)
def test_get_alert(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_right_click(self):
with self.assertRaises(NotImplementedError):
self.browser.find_by_id('visible').right_click()
def test_double_click(self):
with self.assertRaises(NotImplementedError):
self.browser.find_by_id('visible').double_click()
def test_access_prompts_and_be_able_to_fill_then(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_access_confirm_and_accept_and_dismiss_them_using_with(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_access_confirm_and_accept_and_dismiss_them(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_access_alerts_using_with(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_access_alerts_and_accept_them(self):
with self.assertRaises(NotImplementedError):
self.browser.get_alert()
def test_can_work_on_popups(self):
# FIXME: Check https://github.com/detro/ghostdriver/issues/180 to see if
# we can implement this test
pass
class PhantomJSBrowserTestWithCustomHeaders(unittest.TestCase):
@classmethod
def setUpClass(cls):
custom_headers = {'X-Splinter-Customheaders-1': 'Hello',
'X-Splinter-Customheaders-2': 'Bye'}
cls.browser = Browser("phantomjs", custom_headers=custom_headers)
def test_create_a_phantomjs_with_custom_headers(self):
self.browser.visit(EXAMPLE_APP + 'headers')
self.assertTrue(
self.browser.is_text_present('X-Splinter-Customheaders-1: Hello'))
self.assertTrue(
self.browser.is_text_present('X-Splinter-Customheaders-2: Bye'))
@classmethod
def tearDownClass(cls):
cls.browser.quit()
|
lrowe/splinter
|
tests/test_webdriver_phantomjs.py
|
Python
|
bsd-3-clause
| 2,438
|
[
"VisIt"
] |
02c3da19b27b8fb825b056e6405d1a817f20996d0450b84ec24bde42bf75029f
|
from __future__ import print_function, division
from brian import (Network, NeuronGroup, SpikeMonitor,
PoissonGroup, Connection,
mV, ms, Hz)
import sys
import matplotlib.pyplot as plt
import numpy as np
import itertools as itt
fin = [f*Hz for f in range(10, 41, 5)]
win = [w*mV for w in np.arange(0.5, 2.1, 0.5)]
Nin = [n for n in range(100, 181, 20)]
tau = 10*ms
Vth = 15*mV
reset = 0*mV
configs = [c for c in itt.product(Nin, fin, win)]
Nsims = len(configs)
print("Number of configurations: {}".format(Nsims))
lifeq = "dV/dt = -V/tau : volt"
sim = Network()
nrn = NeuronGroup(Nsims, lifeq, threshold="V>=Vth", reset="V=reset")
inputgroups = []
connections = []
print("Setting up ...")
for idx, c in enumerate(configs):
n, f, w = c
inp = PoissonGroup(n, f)
conn = Connection(inp, nrn[idx], state="V", weight=w)
inputgroups.append(inp)
connections.append(conn)
print("\r{}/{}".format(idx+1, Nsims), end="")
sys.stdout.flush()
print()
spikemon = SpikeMonitor(nrn)
sim.add(*inputgroups)
sim.add(*connections)
sim.add(nrn)
sim.add(spikemon)
duration = 1000*ms
print("Running for {} s".format(duration))
sim.run(duration, report="text")
plt.figure()
inputvolts = np.array([c[0]*c[1]*c[2]*tau for c in configs])
spikerates = np.array([len(sp) for sp in spikemon.spiketimes.itervalues()])
for idx in range(Nsims):
iv = inputvolts[idx]
sr = spikerates[idx]
plt.plot(iv, sr, "b.")
print("{} mV -> {} Hz".format(iv*1000, sr/duration))
ivsorted = np.sort(inputvolts)
theofout = 1.0/(tau*np.log(ivsorted/(ivsorted-Vth)))
theovin = Vth/(1-np.exp(-1.0/(tau*spikerates)))
plt.plot(ivsorted, theofout, "r-")
sidx = np.argsort(theovin)
plt.plot(theovin[sidx], spikerates[sidx], "g-")
Narr = np.array([c[0] for c in configs])
Warr = np.array([c[1] for c in configs])
farr = np.array([c[2] for c in configs])
theofin = Vth/((1-np.exp(-1.0/(tau*spikerates)))*Narr*Warr*tau)
plt.figure()
plt.plot(theofin, farr, "b.")
plt.plot([min(theofin), max(theofin)], [min(theofin), max(theofin)], 'k--')
plt.show()
|
achilleas-k/brian-scripts
|
thesis_stuff/test_in_out.py
|
Python
|
apache-2.0
| 2,076
|
[
"Brian"
] |
bccb34702e7ba4b14f642085118cb832d5d6425656e93489726a562743d2d4ba
|
"""The suite of window functions."""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy import special, linalg
from scipy.fftpack import fft
from scipy._lib.six import string_types
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin',
'slepian', 'cosine', 'hann', 'exponential', 'tukey', 'get_window']
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Included for completeness, this is equivalent to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return np.ones(M, float)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M, sym=True):
r"""
Return a Blackman window.
The Blackman window is a taper formed by using the first three terms of
a summation of cosines. It was designed to have close to the minimal
leakage possible. It is close to optimal, only slightly worse than a
Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.35875, 0.48829, 0.14128, 0.01168]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac) +
a[4] * np.cos(4 * fac))
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M, sym=True):
r"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the first and last samples equal to zero
and the maximum value normalized to 1 (though the value 1 does not
appear if `M` is even and `sym` is True).
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \frac{2}{M-1} \left(
\frac{M-1}{2} - \left|n - \frac{M-1}{2}\right|
\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The Fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def hann(M, sym=True):
r"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The window was named for Julius van Hann, an Austrian meteorologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
hanning = hann
def tukey(M, alpha=0.5, sym=True):
r"""Return a Tukey window, also known as a tapered cosine window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
alpha : float, optional
Shape parameter of the Tukey window, representing the faction of the
window inside the cosine tapered region.
If zero, the Tukey window is equivalent to a rectangular window.
If one, the Tukey window is equivalent to a Hann window.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
Analysis with the Discrete Fourier Transform". Proceedings of the
IEEE 66 (1): 51-83. doi:10.1109/PROC.1978.10837
.. [2] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function#Tukey_window
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.tukey(51)
>>> plt.plot(window)
>>> plt.title("Tukey window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.ylim([0, 1.1])
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Tukey window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
if alpha <= 0:
return np.ones(M, 'd')
elif alpha >= 1.0:
return hann(M, sym=sym)
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
width = int(np.floor(alpha*(M-1)/2.0))
n1 = n[0:width+1]
n2 = n[width+1:M-width-1]
n3 = n[M-width-1:]
w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1))))
w2 = np.ones(n2.shape)
w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1))))
w = np.concatenate((w1,w2,w3))
if not sym and not odd:
w = w[:-1]
return w
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
if not sym and not odd:
w = w[:-1]
return w
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
r"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Notes
-----
This window optimizes for the narrowest main lobe width for a given order
`M` and sidelobe equiripple attenuation `at`, using Chebyshev
polynomials. It was originally developed by Dolph to optimize the
directionality of radio antenna arrays.
Unlike most windows, the Dolph-Chebyshev is defined in terms of its
frequency response:
.. math:: W(k) = \frac
{\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}}
{\cosh[M \cosh^{-1}(\beta)]}
where
.. math:: \beta = \cosh \left [\frac{1}{M}
\cosh^{-1}(10^\frac{A}{20}) \right ]
and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`).
The time domain window is then generated using the IFFT, so
power-of-two `M` are the fastest to generate, and prime number `M` are
the slowest.
The equiripple condition in the frequency domain creates impulses in the
time domain, which appear at the ends of the window.
References
----------
.. [1] C. Dolph, "A current distribution for broadside arrays which
optimizes the relationship between beam width and side-lobe level",
Proceedings of the IEEE, Vol. 34, Issue 6
.. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter",
American Meteorological Society (April 1997)
http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf
.. [3] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transforms", Proceedings of the IEEE, Vol. 66,
No. 1, January 1978
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if np.abs(at) < 45:
warnings.warn("This window is not suitable for spectral analysis "
"for attenuation values lower than about 45dB because "
"the equivalent noise bandwidth of a Chebyshev window "
"does not grow monotonically with increasing sidelobe "
"attenuation when the attenuation is smaller than "
"about 45 dB.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (1 - 2 * (order % 2)) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fft(p))
n = (M + 1) // 2
w = w[:n]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(fft(p))
n = M // 2 + 1
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# our width is the full bandwidth
width = width / 2
# to match the old version
width = width / 2
m = np.arange(M, dtype='d')
H = np.zeros((2, M))
H[0, 1:] = m[1:] * (M - m[1:]) / 2
H[1, :] = ((M - 1 - 2 * m) / 2)**2 * np.cos(2 * np.pi * width)
_, win = linalg.eig_banded(H, select='i', select_range=(M-1, M-1))
win = win.ravel() / win.max()
if not sym and not odd:
win = win[:-1]
return win
def cosine(M, sym=True):
"""Return a window with a simple cosine shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.cosine(51)
>>> plt.plot(window)
>>> plt.title("Cosine window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the cosine window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.show()
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
w = np.sin(np.pi / M * (np.arange(0, M) + .5))
if not sym and not odd:
w = w[:-1]
return w
def exponential(M, center=None, tau=1., sym=True):
r"""Return an exponential (or Poisson) window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
center : float, optional
Parameter defining the center location of the window function.
The default value if not given is ``center = (M-1) / 2``. This
parameter must take its default value for symmetric windows.
tau : float, optional
Parameter defining the decay. For ``center = 0`` use ``tau = -(M-1) / ln(x)``
if ``x`` is the fraction of the window remaining at the end.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Exponential window is defined as
.. math:: w(n) = e^{-|n-center| / \tau}
References
----------
S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)",
Technical Review 3, Bruel & Kjaer, 1987.
Examples
--------
Plot the symmetric window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> M = 51
>>> tau = 3.0
>>> window = signal.exponential(M, tau=tau)
>>> plt.plot(window)
>>> plt.title("Exponential Window (tau=3.0)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -35, 0])
>>> plt.title("Frequency response of the Exponential window (tau=3.0)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
This function can also generate non-symmetric windows:
>>> tau2 = -(M-1) / np.log(0.01)
>>> window2 = signal.exponential(M, 0, tau2, False)
>>> plt.figure()
>>> plt.plot(window2)
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
"""
if sym and center is not None:
raise ValueError("If sym==True, center must be None.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
if center is None:
center = (M-1) / 2
n = np.arange(0, M)
w = np.exp(-np.abs(n-center) / tau)
if not sym and not odd:
w = w[:-1]
return w
def get_window(window, Nx, fftbins=True):
"""
Return a window.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True, create a "periodic" window ready to use with ifftshift
and be multiplied by the result of an fft (SEE ALSO fftfreq).
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen,
bohman, blackmanharris, nuttall, barthann, kaiser (needs beta),
gaussian (needs std), general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
exponential (needs decay scale), tukey (needs taper fraction)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the kaiser window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, string_types):
if window in ['kaiser', 'ksr', 'gaussian', 'gauss', 'gss',
'general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs',
'slepian', 'optimal', 'slep', 'dss', 'dpss',
'chebwin', 'cheb', 'exponential', 'poisson', 'tukey',
'tuk']:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." %
str(type(window)))
if winstr in ['blackman', 'black', 'blk']:
winfunc = blackman
elif winstr in ['triangle', 'triang', 'tri']:
winfunc = triang
elif winstr in ['hamming', 'hamm', 'ham']:
winfunc = hamming
elif winstr in ['bartlett', 'bart', 'brt']:
winfunc = bartlett
elif winstr in ['hanning', 'hann', 'han']:
winfunc = hann
elif winstr in ['blackmanharris', 'blackharr', 'bkh']:
winfunc = blackmanharris
elif winstr in ['parzen', 'parz', 'par']:
winfunc = parzen
elif winstr in ['bohman', 'bman', 'bmn']:
winfunc = bohman
elif winstr in ['nuttall', 'nutl', 'nut']:
winfunc = nuttall
elif winstr in ['barthann', 'brthan', 'bth']:
winfunc = barthann
elif winstr in ['flattop', 'flat', 'flt']:
winfunc = flattop
elif winstr in ['kaiser', 'ksr']:
winfunc = kaiser
elif winstr in ['gaussian', 'gauss', 'gss']:
winfunc = gaussian
elif winstr in ['general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs']:
winfunc = general_gaussian
elif winstr in ['boxcar', 'box', 'ones', 'rect', 'rectangular']:
winfunc = boxcar
elif winstr in ['slepian', 'slep', 'optimal', 'dpss', 'dss']:
winfunc = slepian
elif winstr in ['cosine', 'halfcosine']:
winfunc = cosine
elif winstr in ['chebwin', 'cheb']:
winfunc = chebwin
elif winstr in ['exponential', 'poisson']:
winfunc = exponential
elif winstr in ['tukey', 'tuk']:
winfunc = tukey
else:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
|
Dapid/scipy
|
scipy/signal/windows.py
|
Python
|
bsd-3-clause
| 54,555
|
[
"Gaussian"
] |
f3b3a94f17e77105769de67c906804ded45642f4ce360dd1e24fe308405b6854
|
import urllib,xbmcplugin,xbmcgui,xbmcaddon,xbmc,os
ADDON = xbmcaddon.Addon(id='plugin.video.kodi_maintenance')
DIR=os.path.join(ADDON.getAddonInfo('profile'))
THEHTML = xbmc.translatePath(os.path.join(ADDON.getAddonInfo('path'),'theemail.html'))
def CATEGORIES():
if ADDON.getSetting('email')=='':
Show_Dialog('','You Need To Enter Your Email Details','')
ADDON.openSettings()
addDir('Email Me My Log','ME',2,'','')
addDir('Email Someone Else My Log','',2,'','')
def search_entered():
favs = ADDON.getSetting('favs').split(',')
keyboard = xbmc.Keyboard('', 'Email')
keyboard.doModal()
if keyboard.isConfirmed():
search_entered = keyboard.getText()
if not search_entered in favs:
favs.append(search_entered)
ADDON.setSetting('favs', ','.join(favs))
return search_entered
def getOther():
NAME=['[COLOR red]Cancel[/COLOR]','[COLOR green]New Email Address[/COLOR]']
if ADDON.getSetting('favs') =='':
return search_entered()
favs = ADDON.getSetting('favs').split(',')
for title in favs:
if len(title)>1:
NAME.append(title)
EMAIL=NAME[xbmcgui.Dialog().select('Please Select Email', NAME)]
if EMAIL =='[COLOR green]New Email Address[/COLOR]':
return search_entered()
else:
return EMAIL
def getMessage():
a='''Seems you are using gmail and havent enabled insecure apps on your google account\n\nSimply Log into your acount online once logged in visit:\n\n[COLOR royalblue]https://www.google.com/settings/security/lesssecureapps[/COLOR]\n\nAnd "Turn On" Access for less secure apps\n\n\nThen This Emailer Will Work :)\n\nThanks\nTeam [COLOR royalblue]X[/COLOR]unity[COLOR royalblue]T[/COLOR]alk'''
return a
def send_email(TOWHO,LOG):
dp = xbmcgui.DialogProgress()
dp.create(".Kodi Log Emailer",'Logging Into Your Email')
dp.update(0)
THESMTP ,THEPORT = Servers()
fromaddr=ADDON.getSetting('email')
if TOWHO =='ME':
toaddr=fromaddr
else:
toaddr=getOther()
if toaddr =='[COLOR red]Cancel[/COLOR]':
Show_Dialog('No Email Sent','','Email Cancelled')
else:
import datetime
TODAY=datetime.datetime.today().strftime('[%d-%m-%Y %H:%M]')
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
fromaddr = '"Hi Message From Yourself" <%s>'% (fromaddr)
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "Your Kodi Log "+str(TODAY)
body = open(THEHTML).read()
content = MIMEText(body, 'html')
msg.attach(content)
try:filename = LOG.rsplit('\\', 1)[1]
except:filename = LOG.rsplit('/', 1)[1]
f = file(LOG)
attachment = MIMEText(f.read())
attachment.add_header('Content-Disposition', 'attachment', filename=filename.replace('log','txt'))
msg.attach(attachment)
import smtplib
server = smtplib.SMTP(str(THESMTP), int(THEPORT))
dp.update(50, 'Attaching Your Email',filename.replace('log','txt'))
server.ehlo()
server.starttls()
server.ehlo()
try:server.login(ADDON.getSetting('email').encode('UTF-8'),ADDON.getSetting('password').encode('UTF-8'))
except Exception as e:
if 'gmail' in THESMTP:
if '/answer/787' in str(e):
e=getMessage()
return showText('[COLOR red]ERROR !![/COLOR]',str(e).replace('\\n','[CR]'))
text = msg.as_string()
dp.update(75, 'Sending........',filename.replace('log','txt'))
server.sendmail(fromaddr, toaddr, text)
dp.close()
Show_Dialog('Email Sent To','[COLOR green]'+toaddr+'[/COLOR]','Also Check Junk Folder')
def Servers():
SERVER = ADDON.getSetting('server')
APPENDED=[]
server_list =[('Gmail','smtp.gmail.com','587'),
('Outlook/Hotmail','smtp-mail.outlook.com','587'),
('Office365','smtp.office365.com','587'),
('Yahoo Mail','smtp.mail.yahoo.com','465'),
('Yahoo Mail Plus','smtp.mail.yahoo.co.uk','465'),
('Yahoo Mail Deutschland','smtp.mail.yahoo.com','465'),
('Yahoo Mail AU/NZ','smtp.mail.yahoo.au','465'),
('AOL','smtp.att.yahoo.com','465'),
('NTL @ntlworld','smtp.ntlworld.com','465'),
('BT Connect','smtp.btconnect.com','25'),
('O2 Deutschland','smtp.1and1.com','587'),
('1&1 Deutschland','smtp.1und1.de','587'),
('Verizon','smtp.zoho.com','465'),
('Mail','smtp.mail.com','587'),
('GMX','smtp.gmx.com','465'),
('Custom',ADDON.getSetting('custom_server'),ADDON.getSetting('custom_port'))]
for server , smtp ,port in server_list:
if SERVER ==server:
APPENDED.append([smtp ,port])
return APPENDED[0][0],APPENDED[0][1]
def EmailLog(TOWHO):
nameSelect=[]
logSelect=[]
import glob
folder = xbmc.translatePath('special://logpath')
xbmc.log(folder)
for file in glob.glob(folder+'/*.log'):
try:nameSelect.append(file.rsplit('\\', 1)[1].upper())
except:nameSelect.append(file.rsplit('/', 1)[1].upper())
logSelect.append(file)
LOG = logSelect[xbmcgui.Dialog().select('Please Select Log', nameSelect)]
send_email(TOWHO,LOG)
def showText(heading, text):
id = 10147
xbmc.executebuiltin('ActivateWindow(%d)' % id)
xbmc.sleep(100)
win = xbmcgui.Window(id)
retry = 50
while (retry > 0):
try:
xbmc.sleep(10)
retry -= 1
win.getControl(1).setLabel(heading)
win.getControl(5).setText(text)
return
except:
pass
def Show_Dialog(line1,line2,line3):
dialog = xbmcgui.Dialog()
dialog.ok('.Kodi Log Emailer', line1,line2,line3)
|
repotvsupertuga/repo
|
instal/plugin.program.tvsupertugamanutencao/resources/mail/mail_file.py
|
Python
|
gpl-2.0
| 6,183
|
[
"VisIt"
] |
fe96adcb35898f38ad626d9a203e522a23facafe7bc1cb681ce8b3d9ca138ca7
|
# Photovoltaics surface
#
# Ladybug: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Ladybug.
#
# Copyright (c) 2013-2015, Djordje Spasic and Jason Sensibaugh <djordjedspasic@gmail.com and sensij@yahoo.com>
# Ladybug is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Ladybug is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ladybug; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to calculate amount of electrical energy that can be produced by a surface
if a certain percentage of it is covered with Photovoltaics.
Component based on NREL PVWatts v1 fixed tilt calculator for crystalline silicon (c-Si) photovoltaics.
Sources:
http://www.nrel.gov/docs/fy14osti/60272.pdf
https://pvpmc.sandia.gov
-
Provided by Ladybug 0.0.60
input:
_PVsurface: - Input planar Surface (not a polysurface) on which the PV modules will be applied. If you have a polysurface, explode it (using "Deconstruct Brep" component) and then feed its Faces(F) output to _PVsurface. Surface normal should be faced towards the sun.
- Or input surface Area, in square meters (example: "100").
- Or input PV system size (nameplate DC power rating), in kiloWatts at standard test conditions (example: "4 kw").
_epwFile: Input .epw file path by using grasshopper's "File Path" component.
PVsurfacePercent_: The percentage of surface which will be used for PV modules (range 0-100).
-
If not supplied, default value of 100 (all surface area will be covered in PV modules) is used.
PVsurfaceTiltAngle_: The angle from horizontal of the inclination of the PVsurface. Example: 0 = horizontal, 90 = vertical. (range 0-180)
-
If not supplied, but surface inputted into "_PVsurface", PVsurfaceTiltAngle will be calculated from an angle PVsurface closes with XY plane.
If not supplied, but surface NOT inputted into "_PVsurface" (instead, a surface area or system size inputed), location's latitude will be used as default value.
PVsurfaceAzimuthAngle_: The orientation angle (clockwise from the true north) of the PVsurface normal vector. (range 0-360)
-
If not supplied, but surface inputted into "_PVsurface", PVsurfaceAzimuthAngle will be calculated from an angle PVsurface closes with its north.
If not supplied, but surface NOT inputted into "_PVsurface" (instead, a surface area or system size inputed), default value of 180 (south-facing) for locations in the northern hemisphere or 0 (north-facing) for locations in the southern hemisphere, will be used.
DCtoACderateFactor_: Factor which accounts for various locations and instances in a PV system where power is lost from DC system nameplate to AC power. It ranges from 0 to 1.
It can be calculated with Ladybug's "DC to AC derate factor" component.
-
If not supplied, default value of 0.85 will be used.
moduleActiveAreaPercent_: Percentage of the module's area excluding module framing and gaps between cells.
-
If not supplied, default value of 90(%) will be used.
moduleType_: Module type and mounting configuration:
-
0 = Glass/cell/glass, Close (flush) roof mount (pv array mounted parallel and relatively close to the plane of the roof (between two and six inches))
1 = Glass/cell/polymer sheet, Insulated back (pv curtain wall, pv skylights)
2 = Glass/cell/polymer sheet, Open rack (ground mount array, flat/sloped roof array that is tilted, pole-mount solar panels, solar carports, solar canopies)
3 = Glass/cell/glass, Open rack (the same as upper "2" type, just with a glass on the back part of the module).
-
If not supplied, default type: "Glass/cell/glass, Close (flush) roof mount" (0) is used.
moduleEfficiency_: The ratio of energy output from the PV module to input energy from the sun. It ranges from 0 to 100 (%).
Current typical module efficiencies for crystalline silicon modules range from 14-20%
-
If not defined, default value of 15(%) will be used.
north_: Input a vector to be used as a true North direction, or a number between 0 and 360 that represents the clockwise degrees off from the Y-axis.
-
If not supplied, default North direction will be set to the Y-axis (0 degrees).
albedo_: Average reflection coefficient of the area surrounding the PV surface. It ranges from 0 for very dark to 1 for bright white or metallic surface. Here are some specific values:
-
Dry asphalt 0.12
Wet Asphalt 0.18
Bare soil 0.17
Grass 0.20
Concrete 0.30
Granite 0.32
Dry sand 0.35
Copper 0.74
Wet snow 0.65
Fresh snow 0.82
Aluminum 0.85
-
If not supplied default value of 0.20 (Grass) will be used.
annualHourlyData_: An optional list of hourly data from Ladybug's "Import epw" component (e.g. dryBulbTemperature), which will be used for "conditionalStatement_".
conditionalStatement_: This input allows users to calculate the Photovoltaics surface component results only for those annualHourlyData_ values which fit specific conditions or criteria. To use this input correctly, hourly data, such as dryBulbTemperature or windSpeed, must be plugged into the "annualHourlyData_" input. The conditional statement input here should be a valid condition statement in Python, such as "a>25" or "b<3" (without the quotation marks).
conditionalStatement_ accepts "and" and "or" operators. To visualize the hourly data, English letters should be used as variables, and each letter alphabetically corresponds to each of the lists (in their respective order): "a" always represents the 1st list, "b" always represents the 2nd list, etc.
For example, if you have an hourly dryBulbTemperature connected as the first list, and windSpeed connected as the second list (both to the annualHourlyData_ input), and you want to plot the data for the time period when temperature is between 18C and 23C, and windSpeed is larger than 3m/s, the conditionalStatement_ should be written as "18<a<23 and b>3" (without the quotation marks).
_runIt: ...
output:
readMe!: ...
ACenergyPerHour: AC power output for each hour during a year, in kWh
ACenergyPerMonth: Total AC power output for each month, in kWh
ACenergyPerYear: Total AC power output for a whole year, in kWh
averageDailyACenergyPerMonth: An average AC power output per day in each month, in kWh/day
averageDailyACenergyPerYear: An average AC power output per day in a whole year, in kWh/day
totalRadiationPerHour: Total Incident POA (Plane of array) irradiance for each hour during a year, in kWh/m2
moduleTemperaturePerHour: Module temperature for each hour during year, in C
cellTemperaturePerHour: Cell temperature for each hour during year, in C
nameplateDCpowerRating: DC rating or system size of the PV system. In kW
PVcoverArea: An area of the inputted _PVsurface which will be covered with Photovoltaics. In m2
PVcoverActiveArea: coverArea with excluded module framing and gaps between cells. In m2
"""
ghenv.Component.Name = "Ladybug_Photovoltaics Surface"
ghenv.Component.NickName = "PhotovoltaicsSurface"
ghenv.Component.Message = 'VER 0.0.60\nJUL_06_2015'
ghenv.Component.Category = "Ladybug"
ghenv.Component.SubCategory = "7 | WIP"
#compatibleLBVersion = VER 0.0.59\nMAY_26_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "1"
except: pass
import Grasshopper.Kernel as gh
import rhinoscriptsyntax as rs
import scriptcontext as sc
import Rhino
import math
import re
def getEpwData(epwFile, albedo):
if epwFile:
try:
# location data
locationName, latitude, longitude, timeZone, elevation, locationString = lb_preparation.epwLocation(epwFile)
# weather data
weatherData = lb_preparation.epwDataReader(epwFile, locationName)
dryBulbTemperature, dewPointTemperature, relativeHumidity, windSpeed, windDirection, directNormalRadiation, diffuseHorizontalRadiation, globalHorizontalRadiation, directNormalIlluminance, diffuseHorizontalIlluminance, globalHorizontalIlluminance, totalSkyCover, liquidPrecipitationDepth, barometricPressure, modelYear = weatherData
if (albedo == None) or (albedo < 0) or (albedo > 1):
albedo = 0.2 # default
Ta = dryBulbTemperature[7:]
ws = windSpeed[7:]
DNI = directNormalRadiation[7:]
DHI = diffuseHorizontalRadiation[7:]
yearsHOY = modelYear[7:]
monthsHOY = [1 for i in range(744)] + [2 for i in range(672)] + [3 for i in range(744)] + [4 for i in range(720)] + [5 for i in range(744)] + [6 for i in range(720)] + [7 for i in range(744)] + [8 for i in range(744)] + [9 for i in range(720)] + [10 for i in range(744)] + [11 for i in range(720)] + [12 for i in range(744)]
numberOfDaysMonth = [31,28,31,30,31,30,31,31,30,31,30,31]
daysHOY = []
day = 1
for i,item in enumerate(numberOfDaysMonth):
for k in range(item):
for g in range(24):
daysHOY.append(day)
day += 1
day = 1
hoursHOY = []
hour = 1
for i in range(365):
for k in range(24):
hoursHOY.append(hour)
hour += 1
hour = 1
HOYs = range(1,8761)
validEpwData = True
printMsg = "ok"
return locationName, float(latitude), float(longitude), float(timeZone), float(elevation), Ta, ws, DNI, DHI, yearsHOY, monthsHOY, daysHOY, hoursHOY, HOYs, albedo, validEpwData, printMsg
except Exception, e:
# something is wrong with "_epwFile" input
locationName = latitude = longitude = timeZone = elevation = Ta = ws = DNI = DHI = yearsHOY = monthsHOY = daysHOY = hoursHOY = HOYs = albedo = None
validEpwData = False
printMsg = "Something is wrong with \"_epwFile\" input."
else:
locationName = latitude = longitude = timeZone = elevation = Ta = ws = DNI = DHI = yearsHOY = monthsHOY = daysHOY = hoursHOY = HOYs = albedo = None
validEpwData = False
printMsg = "Please supply .epw file path to \"_epwFile\" input"
return locationName, latitude, longitude, timeZone, elevation, Ta, ws, DNI, DHI, yearsHOY, monthsHOY, daysHOY, hoursHOY, HOYs, albedo, validEpwData, printMsg
def PVsurfaceInputData(PVsurface, PVsurfacePercent, unitAreaConversionFactor, DCtoACderateFactor, moduleActiveAreaPercent, moduleType, moduleEfficiency):
if (PVsurface == None):
PVsurfaceInputType = nameplateDCpowerRating = srfArea = activeArea = PVsurfacePercent = DCtoACderateFactor = moduleActiveAreaPercent = moduleType = moduleEfficiency = None
validPVsurfaceData = False
printMsg = "Please input Surface (not polysurface) to \"_PVsurface\".\nOr input surface Area in square meters (example: \"100\").\nOr input Nameplate DC power rating in kiloWatts (example: \"4 kw\")."
return PVsurfaceInputType, nameplateDCpowerRating, srfArea, activeArea, PVsurfacePercent, DCtoACderateFactor, moduleActiveAreaPercent, moduleType, moduleEfficiency, validPVsurfaceData, printMsg
if (PVsurfacePercent == None) or (PVsurfacePercent < 0) or (PVsurfacePercent > 100):
PVsurfacePercent = 100 # default value 100%
if (DCtoACderateFactor == None) or (DCtoACderateFactor < 0) or (DCtoACderateFactor > 1):
DCtoACderateFactor = 0.85 # default value (corresponds to 11.42% of PVWatts v5 Total Losses)
if (moduleActiveAreaPercent == None) or (moduleActiveAreaPercent < 0) or (moduleActiveAreaPercent > 100):
moduleActiveAreaPercent = 90 # default value in %
if (moduleType == None) or (moduleType < 0) or (moduleType > 3):
moduleType = 0 # Glass/cell/glass, Close (flush) roof mount
if (moduleEfficiency == None) or (moduleEfficiency < 0) or (moduleEfficiency > 100):
moduleEfficiency = 15 # for crystalline silicon
# check PVsurface input
obj = rs.coercegeometry(PVsurface)
# input is surface
if isinstance(obj,Rhino.Geometry.Brep):
PVsurfaceInputType = "brep"
facesCount = obj.Faces.Count
if facesCount > 1:
# inputted polysurface
PVsurfaceInputType = nameplateDCpowerRating = srfArea = activeArea = PVsurfacePercent = DCtoACderateFactor = moduleActiveAreaPercent = moduleType = moduleEfficiency = None
validPVsurfaceData = False
printMsg = "The brep you supplied to \"_PVsurface\" is a polysurface. Please supply a surface"
return PVsurfaceInputType, nameplateDCpowerRating, srfArea, activeArea, PVsurfacePercent, DCtoACderateFactor, moduleActiveAreaPercent, moduleType, moduleEfficiency, validPVsurfaceData, printMsg
else:
# inputted brep with a single surface
srfArea = Rhino.Geometry.AreaMassProperties.Compute(obj).Area * (PVsurfacePercent/100) # in m2
srfArea = srfArea * unitAreaConversionFactor # in m2
activeArea = srfArea * (moduleActiveAreaPercent/100) # in m2
nameplateDCpowerRating = activeArea * (1 * (moduleEfficiency/100)) # in kW
validPVsurfaceData = True
printMsg = "ok"
return PVsurfaceInputType, nameplateDCpowerRating, srfArea, activeArea, PVsurfacePercent, DCtoACderateFactor, moduleActiveAreaPercent, moduleType, moduleEfficiency, validPVsurfaceData, printMsg
else:
PVsurfaceInputType = "number"
try:
# input is number (pv surface area in m2)
srfArea = float(PVsurface) * (PVsurfacePercent/100) # in m2
srfArea = srfArea * unitAreaConversionFactor # in m2
activeArea = srfArea * (moduleActiveAreaPercent/100) # in m2
nameplateDCpowerRating = activeArea * (1 * (moduleEfficiency/100)) # in kW
validPVsurfaceData = True
printMsg = "ok"
return PVsurfaceInputType, nameplateDCpowerRating, srfArea, activeArea, PVsurfacePercent, DCtoACderateFactor, moduleActiveAreaPercent, moduleType, moduleEfficiency, validPVsurfaceData, printMsg
except Exception, e:
pass
# input is string (nameplateDCpowerRating in kW)
lowerString = PVsurface.lower()
if "kw" in lowerString:
nameplateDCpowerRating = float(lowerString.replace("kw","")) * (PVsurfacePercent/100) # in kW
activeArea = nameplateDCpowerRating / (1 * (moduleEfficiency/100)) # in m2
srfArea = activeArea * (100/moduleActiveAreaPercent) # in m2
validPVsurfaceData = True
printMsg = "ok"
return PVsurfaceInputType, nameplateDCpowerRating, srfArea, activeArea, PVsurfacePercent, DCtoACderateFactor, moduleActiveAreaPercent, moduleType, moduleEfficiency, validPVsurfaceData, printMsg
else:
PVsurfaceInputType = nameplateDCpowerRating = srfArea = activeArea = PVsurfacePercent = DCtoACderateFactor = moduleActiveAreaPercent = moduleType = moduleEfficiency = None
validPVsurfaceData = False
printMsg = "Something is wrong with your \"PVsurface\" input data"
return PVsurfaceInputType, nameplateDCpowerRating, srfArea, activeArea, PVsurfacePercent, DCtoACderateFactor, moduleActiveAreaPercent, moduleType, moduleEfficiency, validPVsurfaceData, printMsg
def checkAnnualHourlyInputData(annualHourlyData):
if annualHourlyData == []:
annualHourlyDataLists = []
annualHourlyDataListsEpwNames = []
validAnnualHourlyData = True
printMsg = "ok"
return validAnnualHourlyData, annualHourlyDataLists, annualHourlyDataListsEpwNames, printMsg
elif len(annualHourlyData) % 8767 != 0:
annualHourlyDataLists = annualHourlyDataListsEpwNames = None
validAnnualHourlyData = False
printMsg = "Your annualHourlyData_ input is not correct. Please input complete 8767 items long list(s) from \"Ladybug_Import epw\" component"
return annualHourlyDataLists, validAnnualHourlyData, annualHourlyDataListsEpwNames, printMsg
else:
annualHourlyDataLists = []
annualHourlyDataListsEpwNames = []
startIndex = 0
endIndex = 8767
for i in range(int(len(annualHourlyData)/8767)):
untrimmedList = annualHourlyData[startIndex:endIndex]
trimmedList = untrimmedList[7:]
annualHourlyDataListsName = untrimmedList[2]
annualHourlyDataLists.append(trimmedList)
annualHourlyDataListsEpwNames.append(annualHourlyDataListsName)
startIndex += 8767
endIndex += 8767
validAnnualHourlyData = True
printMsg = "ok"
return validAnnualHourlyData, annualHourlyDataLists, annualHourlyDataListsEpwNames, printMsg
def checkConditionalStatement(conditionalStatement, annualHourlyDataLists, annualHourlyDataListsEpwNames, weatherPerHourDataSubLists, addZero):
if conditionalStatement == None and len(annualHourlyDataLists) > 0: # conditionalStatement_ not inputted, annualHourlyData_ inputted
validConditionalStatement = False
weatherPerHourDataConditionalStatementSubLists = conditionalStatementForFinalPrint = None
printMsg = "Please supply \"conditionalStatement_\" for inputted \"annualHourlyData_\" data."
return validConditionalStatement, weatherPerHourDataConditionalStatementSubLists, conditionalStatementForFinalPrint, printMsg
elif conditionalStatement == None and len(annualHourlyDataLists) == 0: # conditionalStatement_ not inputted, annualHourlyData_ not inputted
conditionalStatement = "True"
else: # conditionalStatement_ inputted, annualHourlyData_ not
if annualHourlyDataLists == []:
validConditionalStatement = False
weatherPerHourDataConditionalStatementSubLists = conditionalStatementForFinalPrint = None
printMsg = "Please supply \"annualHourlyData_\" data for inputted \"conditionalStatement_\"."
return validConditionalStatement, weatherPerHourDataConditionalStatementSubLists, conditionalStatementForFinalPrint, printMsg
else: # both conditionalStatement_ and annualHourlyData_ inputted
conditionalStatement = conditionalStatement.lower()
conditionalStatement = re.sub(r"\b([a-z])\b", r"\1[i]", conditionalStatement)
annualHourlyDataListsNames = map(chr, range(97, 123))
# finalPrint conditonal statements for "printOutput" function
if conditionalStatement != "True": # conditionalStatement_ not inputted
# replace conditionalStatement annualHourlyDataListsNames[i] names with annualHourlyDataListsEpwNames:
conditionalStatementForFinalPrint = conditionalStatement[:]
for i in range(len(annualHourlyDataLists)):
conditionalStatementForFinalPrint = conditionalStatementForFinalPrint.replace(annualHourlyDataListsNames[i]+"[i]", annualHourlyDataListsEpwNames[i])
else:
conditionalStatementForFinalPrint = "No condition"
annualHourlyDataListsNames = map(chr, range(97, 123))
numberOfLetters = 0
for letter in annualHourlyDataListsNames:
changedLetter = letter+"[i]"
if changedLetter in conditionalStatement:
numberOfLetters += 1
if numberOfLetters != len(annualHourlyDataLists):
validConditionalStatement = False
weatherPerHourDataConditionalStatementSubLists = conditionalStatementForFinalPrint = None
printMsg = "The number of a,b,c... variables you supplied in \"conditionalStatement_\" does not correspond to the number of \"annualHourlyData_\" lists you inputted. Please make the numbers of these two equal."
return validConditionalStatement, weatherPerHourDataConditionalStatementSubLists, conditionalStatementForFinalPrint, printMsg
else:
for i in range(len(annualHourlyDataLists)):
exec("%s = %s" % (annualHourlyDataListsNames[i],annualHourlyDataLists[i]))
try:
weatherPerHourDataConditionalStatementSubLists = []
for i in range(len(weatherPerHourDataSubLists)):
weatherPerHourDataConditionalStatementSubLists.append([])
for i in range(len(weatherPerHourDataSubLists[0])):
exec("conditionalSt = %s" % conditionalStatement)
if addZero == True: # add 0 if conditionalStatement == False
if conditionalSt:
for k in range(len(weatherPerHourDataConditionalStatementSubLists)):
weatherPerHourDataConditionalStatementSubLists[k].append(weatherPerHourDataSubLists[k][i])
else:
for k in range(len(weatherPerHourDataConditionalStatementSubLists)):
weatherPerHourDataConditionalStatementSubLists[k].append(0)
else: # skip the value
if conditionalSt:
for k in range(len(weatherPerHourDataConditionalStatementSubLists)):
weatherPerHourDataConditionalStatementSubLists[k].append(weatherPerHourDataSubLists[k][i])
except Exception, e:
validConditionalStatement = False
weatherPerHourDataConditionalStatementSubLists = conditionalStatementForFinalPrint = None
printMsg = "Your \"conditionalStatement_\" is incorrect. Please provide a valid conditional statement in Python, such as \"a>25 and b<80\" (without the quotation marks)"
return validConditionalStatement, weatherPerHourDataConditionalStatementSubLists, conditionalStatementForFinalPrint, printMsg
if len(weatherPerHourDataConditionalStatementSubLists[0]) == 0:
validConditionalStatement = False
weatherPerHourDataConditionalStatementSubLists = conditionalStatementForFinalPrint = None
printMsg = "No \"annualHourlyData_\" coresponds to \"conditionalStatement_\". Please edit your \"conditionalStatement_\""
return validConditionalStatement, weatherPerHourDataConditionalStatementSubLists, conditionalStatementForFinalPrint, printMsg
else:
validConditionalStatement = True
printMsg = "ok"
return validConditionalStatement, weatherPerHourDataConditionalStatementSubLists, conditionalStatementForFinalPrint, printMsg
def srfAzimuthAngle(PVsurfaceAzimuthAngle, PVsurfaceInputType, PVsurface, latitude):
# always use "PVsurfaceAzimuthAngle" input, even in case surface has been inputted into the "_PVsurface" input
if (PVsurfaceAzimuthAngle != None):
if (PVsurfaceAzimuthAngle < 0) or (PVsurfaceAzimuthAngle > 360):
if latitude > 0:
srfAzimuthD = 180 # equator facing for northern hemisphere
elif latitude < 0:
srfAzimuthD = 0 # equator facing for southern hemisphere
else:
srfAzimuthD = PVsurfaceAzimuthAngle
surfaceTiltDCalculated = "needs to be calculated"
# nothing inputted into "PVsurfaceAzimuthAngle_" input, calculate the PVsurfaceAzimuthAngle from inputted "_PVsurface" surface
elif (PVsurfaceAzimuthAngle == None):
if PVsurfaceInputType == "brep":
srfAzimuthD, surfaceTiltDCalculated = lb_photovoltaics.srfAzimuthAngle(PVsurface)
if surfaceTiltDCalculated == None:
surfaceTiltDCalculated = "needs to be calculated"
# nothing inputted into "PVsurfaceAzimuthAngle_" input, use south orientation (180 for + latitude locations, 0 for - latitude locations)
elif PVsurfaceInputType == "number":
if latitude > 0:
srfAzimuthD = 180 # equator facing for northern hemisphere
elif latitude < 0:
srfAzimuthD = 0 # equator facing for southern hemisphere
surfaceTiltDCalculated = "needs to be calculated"
return srfAzimuthD, surfaceTiltDCalculated
def srfTiltAngle(PVsurfaceTiltAngle, surfaceTiltDCalculated, PVsurfaceInputType, PVsurface, latitude):
# always use "PVsurfaceTiltAngle" input, even in case surface has been inputted into the "_PVsurface" input
if (PVsurfaceTiltAngle != None):
if (PVsurfaceTiltAngle < 0):
srfTiltD = 0
elif (PVsurfaceTiltAngle > 180):
srfTiltD = 0
else:
srfTiltD = PVsurfaceTiltAngle
# nothing inputted into "PVsurfaceTiltAngle_" input, calculate the PVsurfaceTiltAngle from inputted "_PVsurface" surface
elif (PVsurfaceTiltAngle == None):
# check if srfTildD hasn't already been calculated at srfAzimuthAngle() function
if (surfaceTiltDCalculated == 0) or (surfaceTiltDCalculated == 90) or (surfaceTiltDCalculated == 180):
srfTiltD = surfaceTiltDCalculated
elif surfaceTiltDCalculated == "needs to be calculated":
if PVsurfaceInputType == "brep":
srfTiltD = lb_photovoltaics.srfTiltAngle(PVsurface)
# nothing inputted into "PVsurfaceTiltAngle_" input, use site abs(latitude) for PVsurfaceTiltAngle
elif PVsurfaceInputType == "number":
srfTiltD = abs(latitude)
return srfTiltD
def angle2northClockwise(north):
# temporary function, until "Sunpath" class from Labybug_ladbybug.py starts calculating sun positions counterclockwise
try:
northVec =Rhino.Geometry.Vector3d.YAxis
northVec.Rotate(-math.radians(float(north)),Rhino.Geometry.Vector3d.ZAxis)
northVec.Unitize()
return 2*math.pi-math.radians(float(north)), northVec
except Exception, e:
try:
northVec =Rhino.Geometry.Vector3d(north)
northVec.Unitize()
return Rhino.Geometry.Vector3d.VectorAngle(Rhino.Geometry.Vector3d.YAxis, northVec, Rhino.Geometry.Plane.WorldXY), northVec
except Exception, e:
return 0, Rhino.Geometry.Vector3d.YAxis
def correctSrfAzimuthDforNorth(north, srfAzimuthD):
# nothing inputted in "north_" - use default value: 0
if north == None:
northDeg = 0 # default
correctedSrfAzimuthD = srfAzimuthD
validNorth = True
printMsg = "ok"
else:
try: # check if it's a number
north = float(north)
if north < 0 or north > 360:
correctedSrfAzimuthD = northDeg = None
validNorth = False
printMsg = "Please input north angle value from 0 to 360."
return correctedSrfAzimuthD, validNorth, printMsg
except Exception, e: # check if it's a vector
north.Unitize()
northRad, northVec = angle2northClockwise(north)
northDeg = 360-math.degrees(northRad)
correctedSrfAzimuthD = northDeg + srfAzimuthD
if correctedSrfAzimuthD > 360:
correctedSrfAzimuthD = correctedSrfAzimuthD - 360
validNorth = True
printMsg = "ok"
return correctedSrfAzimuthD, northDeg, validNorth, printMsg
def radiation_ACenergy(latitude, longitude, timeZone, locationName, years, months, days, hours, HOYs, nameplateDCpowerRating, DCtoACderateFactor, srfArea, srfTiltD, srfAzimuthD, moduleType, moduleEfficiency, dryBulbTemperature, windSpeed, directNormalRadiation, diffuseHorizontalRadiation, albedo):
# solar radiation, AC power output, module temperature, cell temperature
ACenergyPerHour = ["key:location/dataType/units/frequency/startsAt/endsAt", locationName, "AC power output", "kWh", "Hourly", (1, 1, 1), (12, 31, 24)]
totalRadiationPerHour = ["key:location/dataType/units/frequency/startsAt/endsAt", locationName, "Total POA irradiance", "kWh/m2", "Hourly", (1, 1, 1), (12, 31, 24)]
moduleTemperaturePerHour = ["key:location/dataType/units/frequency/startsAt/endsAt", locationName, "Module temperature", "C", "Hourly", (1, 1, 1), (12, 31, 24)]
cellTemperaturePerHour = ["key:location/dataType/units/frequency/startsAt/endsAt", locationName, "Cell temperature", "C", "Hourly", (1, 1, 1), (12, 31, 24)]
hoyForMonths = [0, 744, 1416, 2160, 2880, 3624, 4344, 5088, 5832, 6552, 7296, 8016, 8760, 9000]
numberOfDaysInThatMonth = [31,28,31,30,31,30,31,31,30,31,30,31]
monthsOfYearHoyPac = [[],[],[],[],[],[],[],[],[],[],[],[]]
averageDailyACenergyPerMonth = []
for i,hoy in enumerate(HOYs):
sunZenithD, sunAzimuthD, sunAltitudeD = lb_photovoltaics.NRELsunPosition(latitude, longitude, timeZone, years[i], months[i], days[i], hours[i]-1)
Epoa, Eb, Ed_sky, Eground, AOI_R = lb_photovoltaics.POAirradiance(sunZenithD, sunAzimuthD, srfTiltD, srfAzimuthD, directNormalRadiation[i], diffuseHorizontalRadiation[i], albedo)
Tm, Tcell, Pac = lb_photovoltaics.pvwatts(nameplateDCpowerRating, DCtoACderateFactor, AOI_R, Epoa, Eb, Ed_sky, Eground, moduleType, dryBulbTemperature[i], windSpeed[i], directNormalRadiation[i], diffuseHorizontalRadiation[i])
Epoa = Epoa/1000 # to kWh/m2
ACenergyPerHour.append(Pac)
totalRadiationPerHour.append(Epoa)
moduleTemperaturePerHour.append(Tm)
cellTemperaturePerHour.append(Tcell)
for k,item in enumerate(hoyForMonths):
if hoy >= hoyForMonths[k]+1 and hoy <= hoyForMonths[k+1]:
monthsOfYearHoyPac[k].append(Pac)
ACenergyPerMonth = [sum(monthPac) for monthPac in monthsOfYearHoyPac] # in kWh
ACenergyPerYear = sum(ACenergyPerMonth) # in kWh
for g,sumMonthPac in enumerate(ACenergyPerMonth):
MonthPac = (sumMonthPac)/numberOfDaysInThatMonth[g]
averageDailyACenergyPerMonth.append(MonthPac) # in kWh/day
averageDailyACenergyPerYear = sum(averageDailyACenergyPerMonth)/12 # in kWh/day
return ACenergyPerHour, ACenergyPerMonth, ACenergyPerYear, averageDailyACenergyPerMonth, averageDailyACenergyPerYear, totalRadiationPerHour, moduleTemperaturePerHour, cellTemperaturePerHour
def printOutput(unitAreaConversionFactor, north, latitude, longitude, timeZone, elevation, locationName, albedo, nameplateDCpowerRating, srfArea, activeArea, PVsurfacePercent, DCtoACderateFactor, srfTiltD, srfAzimuthD, moduleActiveAreaPercent, moduleType, moduleEfficiency, conditionalStatementForFinalPrint):
moduleTypesL = ["Glass/cell/glass Close (flush) roof mount", "Glass/cell/polymer sheet Insulated back", "Glass/cell/polymer sheet Open rack", "Glass/cell/glass Open rack"]
model = moduleTypesL[moduleType]
resultsCompletedMsg = "PVsurface component results successfully completed!"
printOutputMsg = \
"""
Input data:
Location: %s
Latitude: %s
Longitude: %s
Time zone: %s
Elevation: %s
North: %s
Albedo: %s
Surface percentage used for PV modules: %0.2f
Active area Percentage: %0.2f
Surface area (m2): %0.2f
Surface active area (m2): %0.2f
Nameplate DC power rating (kW): %0.2f
Overall DC to AC derate factor: %0.3f
Module type and mounting: %s
Module efficiency: %s
Array type: fixed tilt
Surface azimuth angle: %0.2f
Surface tilt angle: %0.2f
Caclulation based on the following condition:
%s
""" % (locationName, latitude, longitude, timeZone, elevation, north, albedo, PVsurfacePercent, moduleActiveAreaPercent, srfArea, activeArea, nameplateDCpowerRating, DCtoACderateFactor, model, moduleEfficiency, srfAzimuthD, srfTiltD, conditionalStatementForFinalPrint)
print resultsCompletedMsg
print printOutputMsg
level = gh.GH_RuntimeMessageLevel.Warning
if sc.sticky.has_key("ladybug_release"):
if sc.sticky["ladybug_release"].isCompatible(ghenv.Component):
lb_preparation = sc.sticky["ladybug_Preparation"]()
lb_photovoltaics = sc.sticky["ladybug_Photovoltaics"]()
if _PVsurface:
unitConversionFactor = lb_preparation.checkUnits()
unitAreaConversionFactor = unitConversionFactor**2
PVsurfaceInputType, nameplateDCpowerRating, srfArea, activeArea, PVsurfacePercent, DCtoACderateFactor, moduleActiveAreaPercent, moduleType, moduleEfficiency, validPVsurfaceData, printMsg = PVsurfaceInputData(_PVsurface, PVsurfacePercent_, unitAreaConversionFactor, DCtoACderateFactor_, moduleActiveAreaPercent_, moduleType_, moduleEfficiency_)
if validPVsurfaceData:
locationName, latitude, longitude, timeZone, elevation, dryBulbTemperature, windSpeed, directNormalRadiation, diffuseHorizontalRadiation, years, months, days, hours, HOYs, albedo, validEpwData, printMsg = getEpwData(_epwFile, albedo_)
if validEpwData:
validAnnualHourlyData, annualHourlyDataLists, annualHourlyDataListsEpwNames, printMsg = checkAnnualHourlyInputData(annualHourlyData_)
if validAnnualHourlyData:
validConditionalStatement, weatherPerHourDataConditionalStatementSubLists, conditionalStatementForFinalPrint, printMsg = checkConditionalStatement(conditionalStatement_, annualHourlyDataLists, annualHourlyDataListsEpwNames, [dryBulbTemperature, windSpeed, directNormalRadiation, diffuseHorizontalRadiation], True)
if validConditionalStatement:
dryBulbTemperatureCondStat, windSpeedCondStat, directNormalRadiationCondStat, diffuseHorizontalRadiationCondStat = weatherPerHourDataConditionalStatementSubLists
# all inputs ok
if _runIt:
srfAzimuthD, surfaceTiltDCalculated = srfAzimuthAngle(PVsurfaceAzimuthAngle_, PVsurfaceInputType, _PVsurface, latitude)
correctedSrfAzimuthD, northDeg, validNorth, printMsg = correctSrfAzimuthDforNorth(north_, srfAzimuthD)
srfTiltD = srfTiltAngle(PVsurfaceTiltAngle_, surfaceTiltDCalculated, PVsurfaceInputType, _PVsurface, latitude)
ACenergyPerHour, ACenergyPerMonth, ACenergyPerYear, averageDailyACenergyPerMonth, averageDailyACenergyPerYear, totalRadiationPerHour, moduleTemperaturePerHour, cellTemperaturePerHour = radiation_ACenergy(latitude, longitude, timeZone, locationName, years, months, days, hours, HOYs, nameplateDCpowerRating, DCtoACderateFactor, srfArea, srfTiltD, correctedSrfAzimuthD, moduleType, moduleEfficiency, dryBulbTemperatureCondStat, windSpeedCondStat, directNormalRadiationCondStat, diffuseHorizontalRadiationCondStat, albedo)
printOutput(unitAreaConversionFactor, northDeg, latitude, longitude, timeZone, elevation, locationName, albedo, nameplateDCpowerRating, srfArea, activeArea, PVsurfacePercent, DCtoACderateFactor, srfTiltD, correctedSrfAzimuthD, moduleActiveAreaPercent, moduleType, moduleEfficiency, conditionalStatementForFinalPrint)
PVcoverArea = srfArea; PVcoverActiveArea = activeArea
else:
print "All inputs are ok. Please set the \"_runIt\" to True, in order to run the Photovoltaics surface component"
else:
print printMsg
ghenv.Component.AddRuntimeMessage(level, printMsg)
else:
print printMsg
ghenv.Component.AddRuntimeMessage(level, printMsg)
else:
print printMsg
ghenv.Component.AddRuntimeMessage(level, printMsg)
else:
print printMsg
ghenv.Component.AddRuntimeMessage(level, printMsg)
else:
printMsg = "Please input a Surface (not a polysurface) to \"_PVsurface\".\nOr input surface Area in square meters (example: \"100\").\nOr input Nameplate DC power rating in kiloWatts (example: \"4 kw\")."
print printMsg
ghenv.Component.AddRuntimeMessage(level, printMsg)
else:
printMsg = "You need a newer version of Ladybug to use this component." + \
"Use updateLadybug component to update userObjects.\n" + \
"If you have already updated userObjects drag the Ladybug_Ladybug component " + \
"into the canvas and try again."
print printMsg
ghenv.Component.AddRuntimeMessage(level, printMsg)
else:
printMsg = "First please let the Ladybug fly..."
print printMsg
ghenv.Component.AddRuntimeMessage(level, printMsg)
|
samuto/ladybug
|
src/Ladybug_Photovoltaics Surface.py
|
Python
|
gpl-3.0
| 38,221
|
[
"EPW"
] |
925849b5b407e211fd765360f10ef4483b9b740e7e983d2e26760c3df2411edb
|
from __future__ import print_function
__author__ = """Alex "O." Holcombe, Charles Ludowici, """ ## double-quotes will be silently removed, single quotes will be left, eg, O'Connor
import time, sys, platform, os
from math import atan, atan2, pi, cos, sin, sqrt, ceil, radians, degrees
import numpy as np
import psychopy, psychopy.info
import copy
from psychopy import visual, sound, monitors, logging, gui, event, core, data
try:
from helpersAOH import accelerateComputer, openMyStimWindow
except Exception as e:
print(e); print('Problem loading helpersAOH. Check that the file helpersAOH.py in the same directory as this file')
print('Current directory is ',os.getcwd())
eyeTracking = False
if eyeTracking:
try:
import eyelinkEyetrackerForPsychopySUPA3
except Exception as e:
print(e)
print('Problem loading eyelinkEyetrackerForPsychopySUPA3. Check that the file eyelinkEyetrackerForPsychopySUPA3.py in the same directory as this file')
print('While a different version of pylink might make your eyetracking code work, your code appears to generally be out of date. Rewrite your eyetracker code based on the SR website examples')
#Psychopy v1.83.01 broke this, pylink version prevents EyelinkEyetrackerForPsychopySUPA3 stuff from importing. But what really needs to be done is to change eyetracking code to more modern calls, as indicated on SR site
eyeTracking = False
expname= "dot-jump"
demo = False; exportImages = False
autopilot = False
subject='test'
###############################
### Setup the screen parameters ##########
###############################
allowGUI = False
units='deg' #'cm'
waitBlank=False
refreshRate= 85 *1.0; #160 #set to the framerate of the monitor
fullscrn=True; #show in small window (0) or full screen (1)
scrn=True
if True: #just so I can indent all the below
#which screen to display the stimuli. 0 is home screen, 1 is second screen
# create a dialog from dictionary
infoFirst = { 'Autopilot':autopilot, 'Check refresh etc':True, 'Use second screen':scrn, 'Fullscreen (timing errors if not)': fullscrn, 'Screen refresh rate': refreshRate }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='MOT',
order=['Autopilot','Check refresh etc', 'Use second screen', 'Screen refresh rate', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating',
'Use second Screen': ''},
)
if not OK.OK:
print('User cancelled from dialog box'); logging.info('User cancelled from dialog box'); core.quit()
autopilot = infoFirst['Autopilot']
checkRefreshEtc = infoFirst['Check refresh etc']
scrn = infoFirst['Use second screen']
print('scrn = ',scrn, ' from dialog box')
fullscrn = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
#monitor parameters
widthPix = 1024 #1440 #monitor width in pixels
heightPix =768 #900 #monitor height in pixels
monitorwidth = 37 #28.5 #monitor width in centimeters
viewdist = 55.; #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
bgColor = [-1,-1,-1] #black background
monitorname = 'testMonitor' # 'mitsubishi' #in psychopy Monitors Center
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#fetch the most recent calib for this monitor
mon.setSizePix( (widthPix,heightPix) )
myWin = openMyStimWindow(mon,widthPix,heightPix,bgColor,allowGUI,units,fullscrn,scrn,waitBlank)
myWin.setRecordFrameIntervals(False)
trialsPerCondition = 2 #default value
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
print('Finished runInfo- which assesses the refresh and processes of this computer')
refreshMsg1 = 'Median frames per second ='+ str( np.round(1000./runInfo["windowRefreshTimeMedian_ms"],1) )
refreshRateTolerancePct = 3
pctOff = abs( (1000./runInfo["windowRefreshTimeMedian_ms"]-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
##
### END Setup of the screen parameters ##############################################################################################
####################################
askUserAndConfirmExpParams = True
if autopilot:
subject = 'autoTest'
###############################
### Ask user exp params ##############################################################################################
## askUserAndConfirmExpParams
if askUserAndConfirmExpParams:
dlgLabelsOrdered = list() #new dialog box
myDlg = gui.Dlg(title=expname, pos=(200,400))
if not autopilot:
myDlg.addField('Subject code :', subject)
dlgLabelsOrdered.append('subject')
else:
myDlg.addField('Subject code :', subject)
dlgLabelsOrdered.append('subject')
myDlg.addField('autoPilotTime:', 0, tip='Auto response time relative to cue')
myDlg.addField('randomTime:',False, tip = 'Add (rounded) gaussian N(0,2) error to time offset?')
myDlg.addField('autoPilotSpace:',0, tip='Auto response position relative to cue')
myDlg.addField('randomSpace:',False, tip = 'Add (rounded) gaussian N(0,2) error to space offset?')
dlgLabelsOrdered.append('autoPilotTime')
dlgLabelsOrdered.append('randomTime')
dlgLabelsOrdered.append('autoPilotSpace')
dlgLabelsOrdered.append('randomSpace')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
msgWrongResolution = ''
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Instead of desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels, screen apparently '+ str(myWinRes[0])+ 'x'+ str(myWinRes[1])
myDlg.addText(msgWrongResolution, color='Red')
print(msgWrongResolution); logging.info(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at response time', color='DimGrey') #works in PsychoPy1.84
#myDlg.addText('Note: to abort press ESC at a trials response screen', color=[-1.,1.,-1.]) #color names not working for some pre-1.84 versions
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition ='+str(trialsPerCondition))
if autopilot:
autoSpace = thisInfo[dlgLabelsOrdered.index('autoPilotSpace')]
autoTime = thisInfo[dlgLabelsOrdered.index('autoPilotTime')]
randomTime = thisInfo[dlgLabelsOrdered.index('randomTime')]
randomSpace = thisInfo[dlgLabelsOrdered.index('randomSpace')]
else:
print('User cancelled from dialog box.'); logging.info('User cancelled from dialog box')
logging.flush()
core.quit()
### Ask user exp params
## END askUserAndConfirmExpParams ###############################
##############################################################################################
if os.path.isdir('.'+os.sep+'dataRaw'):
dataDir='dataRaw'
else:
msg= 'dataRaw directory does not exist, so saving data in present working directory'
print(msg); logging.info(msg)
dataDir='.'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
fileNameWithPath = dataDir+os.sep+subject+ '_' + expname+timeAndDateStr
if not demo and not exportImages:
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileNameWithPath + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logF = logging.LogFile(fileNameWithPath+'.log',
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#info, data, warnings, and errors will be sent to this logfile
if demo or exportImages:
logging.console.setLevel(logging.ERROR) #only show this level's and higher messages
logging.console.setLevel(logging.WARNING) #DEBUG means set the console to receive nearly all messges, INFO is for everything else, INFO, EXP, DATA, WARNING and ERROR
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
longerThanRefreshTolerance = 0.27
longFrameLimit = round(1000./refreshRate*(1.0+longerThanRefreshTolerance),3) # round(1000/refreshRate*1.5,2)
msg = 'longFrameLimit='+ str(longFrameLimit) +' Recording trials where one or more interframe interval exceeded this figure '
logging.info(msg); print(msg)
if msgWrongResolution != '':
logging.error(msgWrongResolution)
myWin = openMyStimWindow(mon,widthPix,heightPix,bgColor,allowGUI,units,fullscrn,scrn,waitBlank)
runInfo = psychopy.info.RunTimeInfo(
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
msg = 'second window opening runInfo mean ms='+ str( runInfo["windowRefreshTimeAvg_ms"] )
logging.info(msg); print(msg)
logging.info(runInfo)
logging.info('gammaGrid='+str(mon.getGammaGrid()))
logging.info('linearizeMethod='+str(mon.getLinearizeMethod()))
####Functions. Save time by automating processes like stimulus creation and ordering
############################################################################
def oneFrameOfStim(n, itemFrames, SOAFrames, cueFrames, cuePos, trialObjects):
#n: the frame
#trialObjects: List of stimuli to display
#cuePos: cue serial temporal position
#cueFrames: Number of frames to display the cue
#itemFrames: Number of frames to display the item
#SOAFrames: Stimulus Onset Asynchrony in frames
cueFrame = cuePos * SOAFrames
cueMax = cueFrame + cueFrames
showIdx = int(np.floor(n/SOAFrames))
obj = trialObjects[showIdx]
drawObject = n%SOAFrames < itemFrames
if drawObject:
if n >= cueFrame and n < cueMax:
obj.draw()
cue.draw()
else:
obj.draw()
return True
def oneTrial(stimuli):
dotOrder = np.arange(len(stimuli))
np.random.shuffle(dotOrder)
shuffledStimuli = [stimuli[i] for i in dotOrder]
ts = []
myWin.flip(); myWin.flip() #Make sure raster at top of screen (unless not in blocking mode), and give CPU a chance to finish other tasks
t0 = trialClock.getTime()
for n in range(trialFrames):
fixation.draw()
oneFrameOfStim(n, itemFrames, SOAFrames, cueFrames, cuePos, shuffledStimuli)
myWin.flip()
ts.append(trialClock.getTime() - t0)
return True, shuffledStimuli, dotOrder, ts
def getResponse(trialStimuli):
if autopilot:
spacing = 360./nDots
autoResponseIdx = cuePos + autoTime #The serial position of the response in the stream
if randomTime:
autoResponseIdx += int(round( np.random.normal(0,2) ))
itemAtTemporalSelection = trialStimuli[autoResponseIdx]
unshuffledPositions = [dot.pos.tolist() for dot in stimuli]
itemSpatial = unshuffledPositions.index(itemAtTemporalSelection.pos.tolist())
itemSpatial = itemSpatial + autoSpace
if randomSpace:
itemSpatial += int(round( np.random.normal(0,2) ))
while itemSpatial>23:
itemSpatial = itemSpatial - 23
#Once we have temporal pos of selected item relative to start of the trial
#Need to get the serial spatial pos of this item, so that we can select items around it based on the autoSpace offset
selectionTemporal = trialStimuli.index(stimuli[itemSpatial]) #This seems redundant, but it tests that the item we've selected in space is the cued item in time. if the temporal and spatial offsets are 0, it should be the same as cuePos.
accuracy = cuePos == selectionTemporal
mousePos = (stimuli[itemSpatial].pos[0],stimuli[itemSpatial].pos[1])
expStop = False
item = stimuli[itemSpatial]
return accuracy, item, expStop, mousePos
elif not autopilot:
myMouse = event.Mouse(visible = False,win=myWin)
responded = False
expStop = False
event.clearEvents()
mousePos = (1e6,1e6)
escape = event.getKeys()
myMouse.setPos((0,0))
myMouse.setVisible(True)
while not responded:
for item in trialStimuli:
item.draw()
instruction.draw()
if drawProgress: #Draw progress message
progress.draw()
myWin.flip()
button = myMouse.getPressed()
mousePos = myMouse.getPos()
escapeKey = event.getKeys()
if button[0]:
print('click detected')
responded = True
print('getResponse mousePos:',mousePos)
elif len(escapeKey)>0:
if escapeKey[0] == 'q':
expStop = True
responded = True
return False, np.random.choice(trialStimuli), expStop, (0,0)
clickDistances = []
for item in trialStimuli:
x = mousePos[0] - item.pos[0]
y = mousePos[1] - item.pos[1]
distance = sqrt(x**2 + y**2)
clickDistances.append(distance)
if not expStop:
minDistanceIdx = clickDistances.index(min(clickDistances))
accuracy = minDistanceIdx == cuePos
item = trialStimuli[minDistanceIdx]
myMouse.setVisible(False)
return accuracy, item, expStop, mousePos
def drawStimuli(nDots, radius, center, stimulusObject, sameEachTime = True):
if len(center) > 2 or len(center) < 2:
print('Center coords must be list of length 2')
return None
if not sameEachTime and not isinstance(stimulusObject, (list, tuple)):
print('You want different objects in each position, but your stimuli is not a list or tuple')
return None
if not sameEachTime and isinstance(stimulusObject, (list, tuple)) and len(stimulusObject)!=nDots:
print('You want different objects in each position, but the number of positions does not equal the number of items')
return None
spacing = 360./nDots
stimuli = []
for dot in range(nDots): #have to specify positions for multiples of 90deg because python (computers in general?) can't store exact value of pi and thus cos(pi/2) = 6.123e-17, not 0
angle = dot*spacing
if angle == 0:
xpos = radius
ypos = 0
elif angle == 90:
xpos = 0
ypos = radius
elif angle == 180:
xpos = -radius
ypos = 0
elif angle == 270:
xpos = 0
ypos = -radius
elif angle%90!=0:
xpos = radius*cos(radians(angle))
ypos = radius*sin(radians(angle))
if sameEachTime:
stim = copy.copy(stimulusObject)
elif not sameEachTime:
stim = stimulusObject[dot]
stim.pos = (xpos,ypos)
stimuli.append(stim)
return stimuli
def checkTiming(ts):
interframeIntervals = np.diff(ts) * 1000
#print(interframeIntervals)
frameTimeTolerance=.3 #proportion longer than refreshRate that will not count as a miss
longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance),2)
idxsInterframeLong = np.where( interframeIntervals > longFrameLimit ) [0] #frames that exceeded 150% of expected duration
numCasesInterframeLong = len( idxsInterframeLong )
if numCasesInterframeLong > 0:
print(numCasesInterframeLong,'frames of', trialFrames,'were longer than',str(1000/refreshRate*(1.0+frameTimeTolerance)))
return numCasesInterframeLong
######Create visual objects, noise masks, response prompts etc. ###########
######Draw your stimuli here if they don't change across trials, but other parameters do (like timing or distance)
######If you want to automate your stimuli. Do it in a function below and save clutter.
######For instance, maybe you want random pairs of letters. Write a function!
###########################################################################
#Calculate size of stimuli in original experiment
OGWidth = 1024
OGHeight = 768
OGDiag = sqrt(OGWidth**2 + OGHeight**2)
OGDiagInch = 17
OGDiagCM = OGDiagInch * 2.54
OGpixelPerDegree = OGDiag/((atan(OGDiagCM/57.))*(180/np.pi))
print('OGPPD', OGpixelPerDegree)
radiusPix = 200
radius = radiusPix/OGpixelPerDegree #circle radius
center = (0,0) #circle centre
fixSize = .1
fixation= visual.Circle(myWin, radius = fixSize , fillColor = (1,1,1), units=units)
cueRadiusPix = 360
cueRadiusDeg = cueRadiusPix/OGpixelPerDegree
cue = visual.Circle(myWin, radius = cueRadiusDeg, fillColor = None, lineColor = (1,1,1), units = units)
instruction = visual.TextStim(myWin,pos=(0, -(radius+1)),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.75,units=units)
instructionText = 'Click the dot that was on screen with the cue.'
instruction.text = instructionText
progress = visual.TextStim(myWin,pos=(0, -(radius+1.5)),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.75,units=units)
##Set up stimuli
stimulusSizePix = 20.
stimulusSizeDeg = stimulusSizePix/OGpixelPerDegree
stimulus = visual.Circle(myWin, radius = stimulusSizeDeg, fillColor = (1,1,1) )
nDots = 24
sameEachTime = True #same item each position?
stimuli = drawStimuli(nDots, radius, center, stimulus, sameEachTime)
###Trial timing parameters
SOAMS = 66.667
itemMS = 22.222
ISIMS = SOAMS - itemMS
trialMS = SOAMS * nDots
cueMS = itemMS
SOAFrames = int(np.floor(SOAMS/(1000./refreshRate)))
itemFrames = int(np.floor(itemMS/(1000./refreshRate)))
ISIFrames = int(np.floor(ISIMS/(1000./refreshRate)))
trialFrames = int(nDots*SOAFrames)
cueFrames = int(np.floor(cueMS/(1000./refreshRate)))
print('cueFrames=',cueFrames)
print('itemFrames=',itemFrames)
print('refreshRate =', refreshRate)
print('cueMS from frames =', cueFrames*(1000./refreshRate))
print('num of SOAs in the trial:', trialFrames/SOAFrames)
###############
## Factorial design ###
###############
numResponsesPerTrial = 1 #default. Used to create headers for dataFile
stimList = []
#cuePositions = [dot for dot in range(nDots) if dot not in [0,nDots-1]]
cuePositions = [10]
print('cuePositions: ',cuePositions)
#cuePositions = cuePositions[2:(nDots-3)] #drop the first and final two dots
#Set up the factorial design (list of all conditions)
for cuePos in cuePositions:
stimList.append({'cuePos':cuePos})
trials = data.TrialHandler(stimList, nReps = trialsPerCondition)
####Create output file###
#########################################################################
dataFile = open(fileNameWithPath + '.txt', 'w')
numResponsesPerTrial = 1
#headers for initial datafile rows, they don't get repeated. These appear in the file in the order they appear here.
oneOffHeaders = [
'subject',
'task',
'staircase',
'trialNum'
]
for header in oneOffHeaders:
print(header, '\t', end='', file=dataFile)
#Headers for duplicated datafile rows. These are repeated using numResponsesPerTrial. For instance, we might have two responses in a trial.
duplicatedHeaders = [
'responseSpatialPos',
'responseX',
'responseY',
'correctX',
'correctY',
'clickX',
'clickY',
'accuracy',
'responsePosInStream',
'correctPosInStream'
]
if numResponsesPerTrial == 1:
for header in duplicatedHeaders:
print(header, '\t', end='', file=dataFile)
elif numResponsesPerTrial > 1:
for response in range(numResponsesPerTrial):
for header in duplicatedHeaders:
print(header+str(response), '\t', end='', file=dataFile)
for pos in range(nDots):
print('position'+str(pos),'\t',end='',file=dataFile)
#Headers done. Do a new line
print('longFrames',file=dataFile)
expStop = False
drawProgress = False #draw the progress message?
trialNum=0; numTrialsCorrect=0; framesSaved=0;
print('Starting experiment of',trials.nTotal,'trials. Current trial is trial ',trialNum)
#NextRemindCountText.setText( str(trialNum) + ' of ' + str(trials.nTotal) )
#NextRemindCountText.draw()
myWin.flip()
#end of header
trialClock = core.Clock()
stimClock = core.Clock()
if eyeTracking:
if getEyeTrackingFileFromEyetrackingMachineAtEndOfExperiment:
eyeMoveFile=('EyeTrack_'+subject+'_'+timeAndDateStr+'.EDF')
tracker=Tracker_EyeLink(myWin,trialClock,subject,1, 'HV5',(255,255,255),(0,0,0),False,(widthPix,heightPix))
while trialNum < trials.nTotal and expStop==False:
print(float(trialNum)/trials.nTotal)
if trials.nTotal > 0 and trialNum > 0:
if(float(trialNum)/trials.nTotal)%.2 == 0:
print('setting progress text')
progress.text = 'You have completed ' + str(trialNum) + ' of ' + str(trials.nTotal) + ' trials.'
drawProgress = True
else:
drawProgress = False
fixation.draw()
myWin.flip()
if not autopilot:
core.wait(1)
trial = trials.next()
# print('trial idx is',trials.thisIndex)
cuePos = trial.cuePos
# print(cuePos)
print("Doing trialNum",trialNum)
trialDone, trialStimuli, trialStimuliOrder, ts = oneTrial(stimuli)
#Shift positions so that the list starts at 1, which is positioned at (0,radius), and increases clockwise. This is what the MM code expects
MMPositions = list() #Mixture modelling positions
for dotPos in trialStimuliOrder:
if dotPos < (nDots/4):
MMPositions.append(dotPos + 19)
elif dotPos >= (nDots/4):
MMPositions.append(dotPos -5)
nBlips = checkTiming(ts)
if trialDone:
accuracy, response, expStop, clickPos = getResponse(trialStimuli)
responseCoord = response.pos.tolist()
spatialCoords= [item.pos.tolist() for item in stimuli]
try:
responseSpatialRelativeToXAxis = spatialCoords.index(responseCoord)
except ValueError:
print('coord not in list')
if responseSpatialRelativeToXAxis < (nDots/4):
responseSpatial = responseSpatialRelativeToXAxis + 19
elif responseSpatialRelativeToXAxis >= (nDots/4):
responseSpatial = responseSpatialRelativeToXAxis - 5
trialPositions = [item.pos.tolist() for item in trialStimuli]
responseTemporal = trialPositions.index(responseCoord)
# print('trial positions in sequence:',trialPositions)
# print('position of item nearest to click:',responseSpatial)
# print('Position in sequence of item nearest to click:',responseTemporal)
correctSpatial = trialStimuli[cuePos].pos
correctTemporal = cuePos
print(subject,'\t',
'dot-jump','\t',
False,'\t',
trialNum,'\t',
responseSpatial,'\t',
responseCoord[0],'\t',
responseCoord[1],'\t',
correctSpatial[0],'\t',
correctSpatial[1],'\t',
clickPos[0],'\t',
clickPos[1],'\t',
accuracy,'\t',
responseTemporal,'\t',
correctTemporal,'\t',
end='',
file = dataFile
)
for dot in range(nDots):
print(MMPositions[dot], '\t',end='', file=dataFile)
print(nBlips, file=dataFile)
trialNum += 1
dataFile.flush()
if expStop:
print('Participant cancelled experiment on trial', trialNum)
dataFile.flush()
|
alexholcombe/dot-jump
|
dataRaw/Fixed Cue/test_dot-jump21Nov2016_12-01.py
|
Python
|
gpl-3.0
| 25,538
|
[
"Gaussian"
] |
1109737c86df2ab38104444b8e9a5e1e569073a1652bcca9fcf853b6688ee5b8
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from robotide.lib.robot.model import SuiteVisitor
from robotide.lib.robot.utils import plural_or_not, secs_to_timestr
from .highlighting import HighlightingStream
class DottedOutput(object):
def __init__(self, width=78, colors='AUTO', stdout=None, stderr=None):
self._width = width
self._stdout = HighlightingStream(stdout or sys.__stdout__, colors)
self._stderr = HighlightingStream(stderr or sys.__stderr__, colors)
def start_suite(self, suite):
if not suite.parent:
self._stdout.write("Running suite '%s' with %d tests.\n"
% (suite.name, suite.test_count))
self._stdout.write('=' * self._width + '\n')
def end_test(self, test):
if test.passed:
self._stdout.write('.')
elif 'robot-exit' in test.tags:
self._stdout.write('x')
elif not test.critical:
self._stdout.write('f')
else:
self._stdout.highlight('F', 'FAIL')
def end_suite(self, suite):
if not suite.parent:
self._stdout.write('\n')
StatusReporter(self._stdout, self._width).report(suite)
self._stdout.write('\n')
def message(self, msg):
if msg.level in ('WARN', 'ERROR'):
self._stderr.error(msg.message, msg.level)
def output_file(self, name, path):
self._stdout.write('%-8s %s\n' % (name+':', path))
class StatusReporter(SuiteVisitor):
def __init__(self, stream, width):
self._stream = stream
self._width = width
def report(self, suite):
suite.visit(self)
stats = suite.statistics
self._stream.write("%s\nRun suite '%s' with %d test%s in %s.\n\n"
% ('=' * self._width, suite.name,
stats.all.total, plural_or_not(stats.all.total),
secs_to_timestr(suite.elapsedtime/1000.0)))
self._stream.highlight(suite.status + 'ED', suite.status)
self._stream.write('\n%s\n' % stats.message)
def visit_test(self, test):
if not test.passed and test.critical and 'robot-exit' not in test.tags:
self._stream.write('-' * self._width + '\n')
self._stream.highlight('FAIL')
self._stream.write(': %s\n%s\n' % (test.longname,
test.message.strip()))
|
fingeronthebutton/RIDE
|
src/robotide/lib/robot/output/console/dotted.py
|
Python
|
apache-2.0
| 3,029
|
[
"VisIt"
] |
da492b76c461520f472f398fe93371df4b1be6cabe840c0e760527e6f0556361
|
# -*- coding: utf-8 -*-
import datetime
from collections import OrderedDict
from gluon import current, IS_IN_SET, URL
from gluon.storage import Storage
from s3 import S3Method, S3Represent
from .controllers import deploy_index, inv_dashboard
RED_CROSS = "Red Cross / Red Crescent"
def config(settings):
"""
Template settings for IFRC's Resource Management System
http://eden.sahanafoundation.org/wiki/Deployments/IFRC
This version was developed for the Americas Zone
Hence Apellido Paterno (pr_person.middle_name) matches to auth_user.last_name
"""
T = current.T
# -------------------------------------------------------------------------
# System Name
#
settings.base.system_name = T("Resource Management System")
settings.base.system_name_short = T("RMS")
# -------------------------------------------------------------------------
# Custom Models
#
settings.base.custom_models = {"deploy": "RMS",
}
# -------------------------------------------------------------------------
# Pre-Populate
#
settings.base.prepopulate.append("RMS")
settings.base.prepopulate_demo.append("RMS/Demo")
# -------------------------------------------------------------------------
# Theme (folder to use for views/layout.html)
#
settings.base.theme = "RMS"
# Uncomment to disable responsive behavior of datatables
#settings.ui.datatables_responsive = False
# Uncomment to show a default cancel button in standalone create/update forms
settings.ui.default_cancel_button = True
# Limit Export Formats
settings.ui.export_formats = ("xls","pdf")
# @todo: configure custom icons
#settings.ui.custom_icons = {
# "male": "icon-male",
# "female": "icon-female",
# "medical": "icon-plus-sign-alt",
#}
# =========================================================================
# System Settings
# -------------------------------------------------------------------------
# Security Policy
settings.security.policy = 7
settings.security.map = True
# Authorization Settings
settings.auth.registration_requires_approval = True
settings.auth.registration_requires_verification = True
settings.auth.registration_requests_organisation = True
settings.auth.registration_organisation_required = True
settings.auth.registration_requests_site = True
settings.auth.registration_link_user_to = {"staff": T("Staff"),
"volunteer": T("Volunteer"),
#"member": T("Member")
}
# This hides the options from the UI
#settings.auth.registration_link_user_to_default = ["volunteer"]
#settings.auth.record_approval = True
# @ToDo: Should we fallback to organisation_id if site_id is None?
settings.auth.registration_roles = {"site_id": ["reader",
],
}
# Owner Entity
settings.auth.person_realm_human_resource_site_then_org = True
settings.auth.person_realm_member_org = True
# Activate entity role manager tabs for OrgAdmins
settings.auth.entity_role_manager = True
# Use HRM for the /default/person Profile
settings.auth.profile_controller = "hrm"
# -------------------------------------------------------------------------
def ifrc_realm_entity(table, row):
"""
Assign a Realm Entity to records
"""
tablename = table._tablename
# Do not apply realms for Master Data
# @ToDo: Restore Realms and add a role/functionality support for Master Data
if tablename in ("hrm_certificate",
"hrm_department",
"hrm_job_title",
"hrm_course",
"hrm_programme",
"member_membership_type",
"vol_award",
):
return None
db = current.db
s3db = current.s3db
realm_entity = 0
use_user_organisation = False
use_user_root_organisation = False
if tablename in ("inv_req",
"inv_track_item",
#"inv_send", # Only need to have site_id, to_site_id will manage via Recv, if-necessary
#"inv_recv", # Only need to have site_id, from_site_id will manage via Send, if-necessary
):
if tablename == "inv_req":
# Need a pr_realm with multiple inheritance
record_id = row["id"]
realm_name = "REQ_%s" % record_id
ritable = s3db.inv_req_item
query = (ritable.req_id == record_id) & \
(ritable.deleted == False)
request_items = db(query).select(ritable.site_id)
site_ids = set([ri.site_id for ri in request_items] + [row["site_id"]])
elif tablename == "inv_track_item":
# Inherit from inv_send &/or inv_recv
record = db(table.id == row["id"]).select(table.send_id,
table.recv_id,
limitby = (0, 1),
).first()
send_id = record.send_id
recv_id = record.recv_id
if send_id and recv_id:
# Need a pr_realm with dual inheritance
realm_name = "WB_%s" % send_id
send = db(stable.id == send_id).select(stable.site_id,
stable.to_site_id,
limitby = (0, 1),
).first()
site_ids = (send,site_id,
send.to_site_id,
)
elif send_id:
# Inherit from the Send
stable = s3db.inv_send
send = db(stable.id == send_id).select(stable.realm_entity,
limitby = (0, 1),
).first()
return send.realm_entity
elif recv_id:
# Inherit from the Recv
rtable = s3db.inv_recv
recv = db(rtable.id == recv_id).select(rtable.realm_entity,
limitby = (0, 1),
).first()
return recv.realm_entity
#elif tablename == "inv_send":
# record_id = row["id"]
# realm_name = "WB_%s" % record_id
# to_site_id = db(table.id == record_id).select(table.to_site_id,
# limitby = (0, 1),
# ).first().to_site_id
# site_ids = (row["site_id"],
# to_site_id,
# )
#elif tablename == "inv_recv":
# record_id = row["id"]
# realm_name = "GRN_%s" % record_id
# from_site_id = db(table.id == record_id).select(table.from_site_id,
# limitby = (0, 1),
# ).first().from_site_id
# site_ids = (row["site_id"],
# from_site_id
# )
# Find/create the Realm
rtable = s3db.pr_realm
realm = db(rtable.name == realm_name).select(rtable.pe_id,
limitby = (0, 1),
).first()
if not realm:
realm_id = rtable.insert(name = realm_name)
realm = Storage(id = realm_id)
s3db.update_super(rtable, realm)
realm_entity = realm.pe_id
# Lookup the PE ID for Sites involved
stable = s3db.org_site
sites = db(stable.site_id.belongs(site_ids)).select(stable.site_id,
stable.instance_type,
)
instance_types = {}
for site in sites:
instance_type = site.instance_type
if instance_type in instance_types:
instance_types[instance_type].append(site.site_id)
else:
instance_types[instance_type] = [site.site_id]
entities = []
for instance_type in instance_types:
itable = s3db.table(instance_type)
instances = db(itable.site_id.belongs(instance_types[instance_type])).select(itable.pe_id)
entities += [i.pe_id for i in instances]
# Get all current affiliations
rtable = s3db.pr_role
atable = s3db.pr_affiliation
query = (atable.deleted == False) & \
(atable.pe_id.belongs(entities)) & \
(rtable.deleted == False) & \
(rtable.id == atable.role_id)
affiliations = db(query).select(rtable.pe_id,
rtable.role,
)
# Remove affiliations which are no longer needed
from s3db.pr import OU, \
pr_add_affiliation, \
pr_remove_affiliation
for a in affiliations:
pe_id = a.pe_id
role = a.role
if pe_id not in entities:
pr_remove_affiliation(pe_id, realm_entity, role=role)
else:
entities.remove(pe_id)
# Add new affiliations
for e in entities:
pr_add_affiliation(pe_id, realm_entity, role="Parent", role_type=OU)
return realm_entity
elif tablename == "org_organisation":
# Suppliers & Partners should be in the realm of the user's root organisation
ottable = s3db.org_organisation_type
ltable = db.org_organisation_organisation_type
query = (ltable.organisation_id == row["id"]) & \
(ltable.organisation_type_id == ottable.id)
otype = db(query).select(ottable.name,
limitby = (0, 1),
).first()
if not otype or otype.name != RED_CROSS:
use_user_organisation = True
use_user_root_organisation = True
elif tablename in ("org_facility", "pr_forum", "pr_group"):
# Facilities, Forums and Groups should be in the realm of the user's organisation
use_user_organisation = True
elif tablename == "hrm_training":
# Inherit realm entity from the related HR record
htable = s3db.hrm_human_resource
query = (table.id == row["id"]) & \
(htable.person_id == table.person_id) & \
(htable.deleted != True)
rows = db(query).select(htable.realm_entity,
limitby = (0, 2)
)
if len(rows) == 1:
realm_entity = rows.first().realm_entity
else:
# Ambiguous => try course organisation
ctable = s3db.hrm_course
otable = s3db.org_organisation
query = (table.id == row["id"]) & \
(ctable.id == table.course_id) & \
(otable.id == ctable.organisation_id)
org = db(query).select(otable.pe_id,
limitby = (0, 1),
).first()
if org:
return org.pe_id
# otherwise: inherit from the person record
else:
# Entity reference fields
EID = "pe_id"
OID = "organisation_id"
SID = "site_id"
#GID = "group_id"
PID = "person_id"
# Owner Entity Foreign Key
realm_entity_fks = {"pr_contact": [("org_organisation", EID),
#("po_household", EID),
("pr_person", EID),
],
"pr_contact_emergency": EID,
"pr_physical_description": EID,
"pr_address": [("org_organisation", EID),
("pr_person", EID),
],
"pr_image": EID,
"pr_identity": PID,
"pr_education": PID,
#"pr_note": PID,
"hrm_human_resource": SID,
"hrm_training": PID,
"hrm_training_event": OID,
"inv_adj": SID,
"inv_adj_item": "adj_id",
"inv_inv_item": SID,
"inv_recv": SID,
"inv_send": SID,
#"inv_track_item": "track_org_id",
#"inv_req": "site_id",
"inv_req_item": "req_id",
#"po_household": "area_id",
#"po_organisation_area": "area_id",
}
# Default Foreign Keys (ordered by priority)
default_fks = (#"household_id",
"catalog_id",
"project_id",
"project_location_id",
)
# Link Tables
#realm_entity_link_table = {
# "project_task": Storage(tablename = "project_task_project",
# link_key = "task_id"
# )
# }
#if tablename in realm_entity_link_table:
# # Replace row with the record from the link table
# link_table = realm_entity_link_table[tablename]
# table = s3db[link_table.tablename]
# rows = db(table[link_table.link_key] == row.id).select(table.id,
# limitby = (0, 1),
# )
# if rows:
# # Update not Create
# row = rows.first()
# Check if there is a FK to inherit the realm_entity
fk = realm_entity_fks.get(tablename, None)
fks = [fk] if not isinstance(fk, list) else list(fk)
fks.extend(default_fks)
for default_fk in fks:
if isinstance(default_fk, tuple):
instance_type, fk = default_fk
else:
instance_type, fk = None, default_fk
if fk not in table.fields:
continue
# Inherit realm_entity from parent record
if fk == EID:
if instance_type:
ftable = s3db.table(instance_type)
if not ftable:
continue
else:
ftable = s3db.pr_person
query = (ftable[EID] == row[EID])
elif fk == SID:
# Need to get the entity from the instance, not the super
from s3db.pr import pr_get_pe_id
realm_entity = pr_get_pe_id("org_site",
row[SID])
break
elif fk == OID:
ftable = s3db.org_organisation
query = (ftable.id == row[OID])
else:
# PID & other FKs not in row, so need to load
ftablename = table[fk].type[10:] # reference tablename
ftable = s3db[ftablename]
query = (table.id == row["id"]) & \
(table[fk] == ftable.id)
record = db(query).select(ftable.realm_entity,
limitby = (0, 1),
).first()
if record:
return record.realm_entity
#else:
# Continue to loop through the rest of the default_fks
# Fall back to default get_realm_entity function
if use_user_organisation:
auth = current.auth
user = auth.user
if user:
# @ToDo - this might cause issues if the user's org is different
# from the realm that gave them permissions to create the record
if use_user_root_organisation:
organisation_id = auth.root_org()
else:
organisation_id = user.organisation_id
from s3db.pr import pr_get_pe_id
realm_entity = pr_get_pe_id("org_organisation",
organisation_id)
else:
# Prepop data - need to handle this separately
if tablename == "org_organisation":
# Use org_organisation_organisation
ltable = s3db.org_organisation_organisation
otable = s3db.org_organisation
query = (ltable.organisation_id == row["id"]) & \
(ltable.parent_id == otable.id)
parent = db(query).select(otable.realm_entity,
limitby = (0, 1),
).first()
if parent:
return parent.realm_entity
return realm_entity
settings.auth.realm_entity = ifrc_realm_entity
# -------------------------------------------------------------------------
# L10n (Localization) settings
#
settings.L10n.languages = OrderedDict([
("en", "English"),
("pt-br", "Portuguese (Brazil)"),
("es", "Spanish"),
("fr", "French"),
])
# Default Language
settings.L10n.default_language = "en"
# Default timezone for users
settings.L10n.timezone = "America/Bogota"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Unsortable 'pretty' date format (for use in English)
settings.L10n.date_format = "%d-%b-%Y"
# Make last name in person/user records mandatory
#settings.L10n.mandatory_lastname = True # mother's surname
settings.L10n.mandatory_middlename = True # father's surname
# Uncomment this to Translate Layer Names
settings.L10n.translate_gis_layer = True
# Translate Location Names
settings.L10n.translate_gis_location = True
# Uncomment this for Alternate Location Names
settings.L10n.name_alt_gis_location = True
# Uncomment this to Translate Organisation Names/Acronyms
settings.L10n.translate_org_organisation = True
# Names of Orgs with specific settings
HNRC = "Honduran Red Cross"
# -------------------------------------------------------------------------
# Finance settings
#
def currencies(default):
""" RMS- and NS-specific currencies (lazy setting) """
# Currencies that are common for all NS
currencies = {"EUR" : "Euros",
"CHF" : "Swiss Francs",
"USD" : "United States Dollars",
}
# NS-specific currencies
root_org = current.auth.root_org_name()
if root_org == HNRC:
currencies["HNL"] = "Honduran Lempira"
return currencies
settings.fin.currencies = currencies
def currency_default(default):
""" NS-specific default currencies (lazy setting) """
root_org = current.auth.root_org_name()
if root_org == HNRC:
default = "HNL"
#else:
# default = "USD"
return default
settings.fin.currency_default = currency_default
def currency_represent(currency):
""" NS-specific currency represent """
if currency == "HNL":
root_org = current.auth.root_org_name()
if root_org == HNRC:
return "L"
return currency
# -------------------------------------------------------------------------
# Map Settings
# Display Resources recorded to Admin-Level Locations on the map
# @ToDo: Move into gis_config?
settings.gis.display_L0 = True
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# GeoNames username
settings.gis.geonames_username = "rms_dev"
# @ToDo: Lazy fn once we have NS to enable this for
# (off for HN & off by default)
settings.gis.postcode_selector = False
# -------------------------------------------------------------------------
# Filter Manager
#settings.search.filter_manager = False
# Use the label 'Camp' instead of 'Shelter'
settings.ui.camp = True
# Requires enabling fancyZoom JS & CSS
#settings.ui.thumbnail = (60,60)
# -------------------------------------------------------------------------
# Default Summary
#
settings.ui.summary = ({"common": True,
"name": "add",
"widgets": [{"method": "create"}],
},
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}],
},
{"name": "charts",
"label": "Report",
"widgets": [{"method": "report", "ajax_init": True}],
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map", "ajax_init": True}],
},
)
# -------------------------------------------------------------------------
# Content Management
#
#settings.cms.hide_index = True
settings.cms.richtext = True
# -------------------------------------------------------------------------
# Messaging
# Parser
#settings.msg.parser = "IFRC"
# =========================================================================
# Module Settings
# -------------------------------------------------------------------------
# Members
#
settings.member.cv_tab = True
# -------------------------------------------------------------------------
# Organisations
#
# Enable the use of Organisation Branches
settings.org.branches = True
settings.org.branches_tree_view = True
# Set the length of the auto-generated org/site code the default is 10
#settings.org.site_code_len = 3
# Set the label for Sites
settings.org.site_label = "Office/Warehouse/Facility"
# -------------------------------------------------------------------------
# Human Resource Management
#
# Uncomment to allow Staff & Volunteers to be registered without an email address
settings.hrm.email_required = True
settings.hrm.mix_staff = True
# Uncomment to show the Organisation name in HR represents
settings.hrm.show_organisation = True
# Uncomment to allow HRs to have multiple Job Titles
#settings.hrm.multiple_job_titles = True
# Uncomment to have each root Org use a different Job Title Catalog
settings.hrm.org_dependent_job_titles = True
settings.hrm.staff_departments = False
settings.hrm.teams = False
# Uncomment to disable the use of HR Credentials
settings.hrm.use_credentials = False
# Uncomment to disable the use of HR Certificates
#settings.hrm.use_certificates = False
# Uncomment to filter certificates by (root) Organisation & hence not allow Certificates from other orgs to be added to a profile (except by Admin)
#settings.hrm.filter_certificates = True
# Uncomment to auto-create certificates for courses
settings.hrm.create_certificates_from_courses = "organisation_id"
settings.hrm.use_code = True
settings.hrm.use_description = None # Replaced by Medical Information
# Uncomment to enable the use of HR Education
settings.hrm.use_education = True
# Uncomment to hide Job Titles
settings.hrm.use_job_titles = False
settings.hrm.use_medical = "Medical Information"
settings.hrm.use_national_id = True
settings.hrm.use_skills = True
# Custom label for Organisations in HR module
settings.hrm.organisation_label = "National Society / Branch"
# Custom label for Top-level Organisations in HR module
settings.hrm.root_organisation_label = "National Society"
# Uncomment to consolidate tabs into a single CV
settings.hrm.cv_tab = True
settings.hrm.vol_experience = "programme"
# Uncomment to consolidate tabs into Staff Record (set to False to hide the tab)
settings.hrm.record_tab = "record"
# Use Locations for Training Events, not Facilities
settings.hrm.event_site = False
# Training Instructors are Multiple
settings.hrm.training_instructors = "multiple"
# Training Filters are Contains
settings.hrm.training_filter_and = True
settings.hrm.record_label = "Information"
# Pass marks are defined by Course
settings.hrm.course_pass_marks = True
# Work History & Missions
settings.hrm.staff_experience = "both"
# Uncomment to do a search for duplicates in the new AddPersonWidget2
settings.pr.lookup_duplicates = True
settings.pr.separate_name_fields = 3
#def dob_required(default):
# """ NS-specific dob_required (lazy setting) """
# if current.auth.override is True:
# default = False
# else:
# root_org = current.auth.root_org_name()
# if root_org == HNRC:
# default = False
# else:
# # Human Talent module for zone
# default = True
# return default
#settings.pr.dob_required = dob_required
def hrm_course_grades(default):
""" Course Grades """
default = {0: T("No Show"),
1: T("Left Early"),
#2: T("Attendance"),
8: T("Pass"),
9: T("Fail"),
}
return default
settings.hrm.course_grades = hrm_course_grades
# =========================================================================
def vol_programme_active(person_id):
"""
Whether a Volunteer counts as 'Active' based on the number of hours
they've done (both Trainings & Programmes) per month, averaged over
the last year.
If nothing recorded for the last 3 months, don't penalise as assume
that data entry hasn't yet been done.
@ToDo: This should be based on the HRM record, not Person record
- could be active with Org1 but not with Org2
"""
now = current.request.utcnow
# Time spent on Programme work
htable = current.s3db.hrm_programme_hours
query = (htable.deleted == False) & \
(htable.person_id == person_id) & \
(htable.date != None)
programmes = current.db(query).select(htable.hours,
htable.date,
orderby = htable.date,
)
if programmes:
# Ignore up to 3 months of records
three_months_prior = (now - datetime.timedelta(days = 92))
end = max(programmes.last().date, three_months_prior.date())
last_year = end - datetime.timedelta(days = 365)
# Is this the Volunteer's first year?
if programmes.first().date > last_year:
# Only start counting from their first month
start = programmes.first().date
else:
# Start from a year before the latest record
start = last_year
# Total hours between start and end
programme_hours = 0
for programme in programmes:
if programme.date >= start and programme.date <= end and programme.hours:
programme_hours += programme.hours
# Average hours per month
months = max(1, (end - start).days / 30.5)
average = programme_hours / months
# Active?
if average >= 8:
return True
return False
def hrm_vol_active(default):
""" Whether & How to track Volunteers as Active """
#root_org = current.auth.root_org_name()
#if root_org in (ARCS, IRCS):
# # Simple checkbox
# return True
#elif root_org in (CVTL, PMI, PRC):
# # Use formula based on hrm_programme
# return vol_programme_active
#elif root_org in (CRMADA, ):
# # Use formula based on vol_activity
# return vol_activity_active
#return False
# Use formula based on hrm_programme
return vol_programme_active
settings.hrm.vol_active = hrm_vol_active
settings.hrm.vol_active_tooltip = "A volunteer is defined as active if they've participated in an average of 8 or more hours of Program work or Trainings per month in the last year"
# Roles which are permitted to export ID cards
ID_CARD_EXPORT_ROLES = ("ORG_ADMIN", "hr_manager", "hr_assistant")
# -------------------------------------------------------------------------
# RIT
settings.deploy.team_label = "RIT"
settings.customise_deploy_home = deploy_index # Imported from controllers.py
# Alerts get sent to all recipients
settings.deploy.manual_recipients = False
settings.deploy.post_to_twitter = True
# -------------------------------------------------------------------------
# Projects
settings.project.assign_staff_tab = False
# Uncomment this to use settings suitable for a global/regional organisation (e.g. DRR)
settings.project.mode_3w = True
# Uncomment this to use DRR (Disaster Risk Reduction) extensions
settings.project.mode_drr = True
# Uncomment this to use Activity Types for Activities & Projects
#settings.project.activity_types = True
# Uncomment this to use Codes for projects
settings.project.codes = True
# Uncomment this to call project locations 'Communities'
#settings.project.community = True
# Uncomment this to enable Demographics in 3W projects
#settings.project.demographics = True
# Uncomment this to enable Hazards in 3W projects
settings.project.hazards = True
# Uncomment this to enable Indicators in projects
# Just HNRC
#settings.project.indicators = True
# Uncomment this to use multiple Budgets per project
settings.project.multiple_budgets = True
# Uncomment this to use multiple Organisations per project
settings.project.multiple_organisations = True
# Ondelete behaviour for ProjectPlanningModel
settings.project.planning_ondelete = "RESTRICT"
# Uncomment this to enable Programmes in projects
settings.project.programmes = True
# Uncomment this to enable Themes in 3W projects
settings.project.themes = True
# Uncomment this to customise
# Links to Filtered Components for Donors & Partners
settings.project.organisation_roles = {
1: T("Host National Society"),
2: T("Partner"),
3: T("Donor"),
#4: T("Customer"), # T("Beneficiary")?
#5: T("Supplier"),
9: T("Partner National Society"),
}
# -------------------------------------------------------------------------
# Inventory Management
settings.customise_inv_home = inv_dashboard() # Imported from controllers.py
# Hide Staff Management Tabs for Facilities in Inventory Module
settings.inv.facility_manage_staff = False
settings.inv.document_filing = True
settings.inv.minimums = True
settings.inv.send_gift_certificate = True
settings.inv.send_packaging = True
# Uncomment if you need a simpler (but less accountable) process for managing stock levels
#settings.inv.direct_stock_edits = True
settings.inv.stock_count = False
settings.inv.item_status = {#0: current.messages["NONE"], # Not defined yet
0: T("Good"),
1: T("Damaged"),
#1: T("Dump"),
#2: T("Sale"),
#3: T("Reject"),
#4: T("Surplus")
}
settings.inv.recv_types = {#0: current.messages["NONE"], In Shipment Types
#11: T("Internal Shipment"), In Shipment Types
32: T("Donation"),
34: T("Purchase"),
36: T("Loan"), # 'Consignment'?
37: T("In Transit"), # Loaning warehouse space to another agency
}
# Calculate Warehouse Free Capacity
settings.inv.warehouse_free_capacity_calculated = True
# Use structured Bins
settings.inv.bin_site_layout = True
settings.inv.recv_ref_writable = True
settings.inv.send_ref_writable = True
# Use Stock Cards
settings.inv.stock_cards = True
# Disable Alternate Items
settings.supply.use_alt_name = False
transport_opts = {"Air": T("Air"),
"Sea": T("Sea"),
"Road": T("Road"),
"Hand": T("Hand"),
}
# -------------------------------------------------------------------------
# Requestions
# Uncomment to disable Inline Forms
settings.inv.req_inline_forms = False
# No need to use Commits (default anyway)
#settings.inv.req_use_commit = False
# Should Requests ask whether Transportation is required?
settings.inv.req_ask_transport = True
# Request Numbers are entered manually
settings.inv.generate_req_number = False
settings.inv.req_pack_values = False
# Don't Match Requests (they are assigned instead)
settings.inv.req_match_tab = False
# HNRC disable Request Matching as don't want users making requests to see what stock is available
# PIRAC want this to be done by the Logistics Approver
settings.inv.req_prompt_match = False
# Uncomment to disable Recurring Request
settings.inv.req_recurring = False
# Use Order Items
settings.inv.req_order_item = True
# Use Workflow
settings.inv.req_workflow = True
# =========================================================================
# Template Modules
#
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "RMS",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
#module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
#module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
#module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
#module_type = None # No Menu
)),
("setup", Storage(
name_nice = T("Setup"),
#description = "WebSetup",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # No Menu
)),
("sync", Storage(
name_nice = T("Synchronization"),
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
#module_type = None # This item is handled separately for the menu
)),
("translate", Storage(
name_nice = T("Translation Functionality"),
#description = "Selective translation of strings based on module.",
#module_type = None,
)),
# Uncomment to enable internal support requests
("support", Storage(
name_nice = T("Support"),
#description = "Support Requests",
restricted = True,
#module_type = None # This item is handled separately for the menu
)),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
#module_type = 6, # 6th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
#module_type = 10
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
#module_type = 1
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = T("Staff"),
#description = "Human Resources Management",
restricted = True,
#module_type = 2,
)),
("vol", Storage(
name_nice = T("Volunteers"),
#description = "Human Resources Management",
restricted = True,
#module_type = 2,
)),
("cms", Storage(
name_nice = T("Content Management"),
#description = "Content Management System",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
#module_type = 10,
)),
("msg", Storage(
name_nice = T("Messaging"),
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
#module_type = None,
)),
("supply", Storage(
name_nice = T("Supply Chain Management"),
#description = "Used within Inventory Management, Request Management and Asset Management",
restricted = True,
#module_type = None, # Not displayed
)),
("inv", Storage(
name_nice = T("Warehouses"),
#description = "Receiving and Sending Items",
restricted = True,
#module_type = 4
)),
#("proc", Storage(
# name_nice = T("Procurement"),
# restricted = True,
# #module_type = None, # Not displayed
# )),
#("asset", Storage(
# name_nice = T("Assets"),
# #description = "Recording and Assigning Assets",
# restricted = True,
# #module_type = 5,
# )),
#("req", Storage(
# name_nice = T("Requests"),
# #description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
# restricted = True,
# #module_type = 10,
# )),
("project", Storage(
name_nice = T("Projects"),
#description = "Tracking of Projects, Activities and Tasks",
restricted = True,
#module_type = 2
)),
("budget", Storage(
name_nice = T("Budgets"),
#description = "Tracking of Budgets",
restricted = True,
#module_type = None
)),
#("survey", Storage(
# name_nice = T("Assessments"),
# #description = "Create, enter, and manage surveys.",
# restricted = True,
# #module_type = 5,
# )),
# Used by RIT
("event", Storage(
name_nice = T("Events"),
#description = "Events",
restricted = True,
#module_type = 10
)),
("member", Storage(
name_nice = T("Partners"),
#description = "Membership Management System",
restricted = True,
#module_type = 10,
)),
("deploy", Storage(
name_nice = T("Regional Intervention Teams"),
#description = "Alerting and Deployment of Disaster Response Teams",
restricted = True,
#module_type = 10,
)),
#("po", Storage(
# name_nice = T("Recovery Outreach"),
# #description = "Population Outreach",
# restricted = True,
# #module_type = 10,
# )),
("stats", Storage(
name_nice = T("Statistics"),
#description = "Manages statistics",
restricted = True,
#module_type = None,
)),
#("vulnerability", Storage(
# name_nice = T("Vulnerability"),
# #description = "Manages vulnerability indicators",
# restricted = True,
# #module_type = 10,
# )),
])
# -------------------------------------------------------------------------
# Functions which are local to this Template
# -------------------------------------------------------------------------
def ns_only(tablename,
fieldname = "organisation_id",
required = True,
branches = True,
updateable = True,
limit_filter_opts = True
):
"""
Function to configure an organisation_id field to be restricted to just
NS/Branch
@param required: Field is mandatory
@param branches: Include Branches
@param updateable: Limit to Orgs which the user can update
@param limit_filter_opts: Also limit the Filter options
NB If limit_filter_opts=True, apply in customise_xx_controller inside prep,
after standard_prep is run
"""
# Lookup organisation_type_id for Red Cross
db = current.db
s3db = current.s3db
ttable = s3db.org_organisation_type
try:
type_id = db(ttable.name == RED_CROSS).select(ttable.id,
cache = s3db.cache,
limitby = (0, 1),
).first().id
except AttributeError:
# No IFRC prepop done - skip (e.g. testing impacts of CSS changes in this theme)
return
# Load standard model
f = s3db[tablename][fieldname]
if limit_filter_opts:
# Find the relevant filter widget & limit it's options
filter_widgets = s3db.get_config(tablename, "filter_widgets")
filter_widget = None
if filter_widgets:
from s3 import FS, S3HierarchyFilter
for w in filter_widgets:
if isinstance(w, S3HierarchyFilter) and \
w.field == "organisation_id":
filter_widget = w
break
if filter_widget is not None:
selector = FS("organisation_organisation_type.organisation_type_id")
filter_widget.opts["filter"] = (selector == type_id)
# Label
if branches:
f.label = T("National Society / Branch")
else:
f.label = T("National Society")
# Requires
# Filter by type
ltable = db.org_organisation_organisation_type
rows = db(ltable.organisation_type_id == type_id).select(ltable.organisation_id)
filter_opts = [row.organisation_id for row in rows]
auth = current.auth
s3_has_role = auth.s3_has_role
Admin = s3_has_role("ADMIN")
if branches:
if Admin:
parent = True
else:
# @ToDo: Set the represent according to whether the user can see resources of just a single NS or multiple
# @ToDo: Consider porting this into core
user = auth.user
if user:
realms = user.realms
#delegations = user.delegations
if realms:
parent = True
else:
parent = False
else:
parent = True
else:
# Keep the represent function as simple as possible
parent = False
# Exclude branches
btable = s3db.org_organisation_branch
rows = db((btable.deleted != True) &
(btable.branch_id.belongs(filter_opts))).select(btable.branch_id)
filter_opts = list(set(filter_opts) - set(row.branch_id for row in rows))
from s3db.org import org_OrganisationRepresent
organisation_represent = org_OrganisationRepresent
represent = organisation_represent(parent=parent)
f.represent = represent
from s3 import IS_ONE_OF
requires = IS_ONE_OF(db, "org_organisation.id",
represent,
filterby = "id",
filter_opts = filter_opts,
updateable = updateable,
orderby = "org_organisation.name",
sort = True,
)
if not required:
from gluon import IS_EMPTY_OR
requires = IS_EMPTY_OR(requires)
f.requires = requires
if parent:
# Use hierarchy-widget
from s3 import FS, S3HierarchyWidget
# No need for parent in represent (it's a hierarchy view)
node_represent = organisation_represent(parent = False)
# Filter by type
# (no need to exclude branches - we wouldn't be here if we didn't use branches)
selector = FS("organisation_organisation_type.organisation_type_id")
f.widget = S3HierarchyWidget(lookup = "org_organisation",
filter = (selector == type_id),
represent = node_represent,
multiple = False,
leafonly = False,
)
else:
# Dropdown not Autocomplete
f.widget = None
# Comment
if (Admin or s3_has_role("ORG_ADMIN")):
# Need to do import after setting Theme
from s3layouts import S3PopupLink
from s3 import S3ScriptItem
add_link = S3PopupLink(c = "org",
f = "organisation",
vars = {"organisation_type.name": RED_CROSS},
label = T("Create National Society"),
title = T("National Society"),
)
comment = f.comment
if not comment or isinstance(comment, S3PopupLink):
f.comment = add_link
elif isinstance(comment[1], S3ScriptItem):
# Don't overwrite scripts
f.comment[0] = add_link
else:
f.comment = add_link
else:
# Not allowed to add NS/Branch
f.comment = ""
# -------------------------------------------------------------------------
def user_org_default_filter(selector, tablename=None):
"""
Default filter for organisation_id:
* Use the user's organisation if logged-in and associated with an
organisation.
"""
auth = current.auth
user_org_id = auth.is_logged_in() and auth.user.organisation_id
if user_org_id:
return user_org_id
else:
# no default
return {}
# -------------------------------------------------------------------------
#def user_org_and_children_default_filter(selector, tablename=None):
# """
# Default filter for organisation_id:
# * Use the user's organisation if logged-in and associated with an
# organisation.
# """
# auth = current.auth
# user_org_id = auth.is_logged_in() and auth.user.organisation_id
# if user_org_id:
# db = current.db
# s3db = current.s3db
# otable = s3db.org_organisation
# org = db(otable.id == user_org_id).select(otable.pe_id,
# limitby = (0, 1),
# ).first()
# if org:
# from s3db.pr import pr_get_descendants
# pe_id = org.pe_id
# pe_ids = pr_get_descendants((pe_id,),
# entity_types=("org_organisation",))
# rows = db(otable.pe_id.belongs(pe_ids)).select(otable.id)
# ids = [row.id for row in rows]
# ids.append(user_org_id)
# return ids
# else:
# return user_org_id
# else:
# # no default
# return {}
# -------------------------------------------------------------------------
def customise_auth_user_controller(**attr):
"""
Customise admin/user() and default/user() controllers
"""
# Organisation needs to be an NS/Branch
ns_only("auth_user",
required = True,
branches = True,
updateable = False, # Need to see all Orgs in Registration screens
)
table = current.db.auth_user
table.first_name.label = T("Forenames")
table.last_name.label = T("Father's Surname")
auth = current.auth
messages = auth.messages
messages.lock_keys = False
if auth.root_org_name() == "Red Cross Society of Panama":
messages.welcome_email = \
"""Estimado, estimada,
Le damos la más cordial bienvenida al Sistema de Gestión de Recursos (RMS).
Su nombre de usuarios es: su correo electrónico
Contraseña: cruzroja (puede ser cambiando por usted cuando lo desee)
Para acceder a la plataforma visite: https://rms.cruzroja.org
Si tiene algún problema para acceder a la plataforma póngase en contacto con:
Haylin Mosquera – Coordinadora de Voluntariado de la Cruz Roja Panameña – E: voluntariado@cruzroja.org.pa
Saludos Cordiales
EQUIPO DE SOPORTE
RMS – Sistema de Gestión de Recursos
Albrook, Calle Jorge Bolivar Alemán, Edifico 453
Ciudad de Panamá, Panamá
Tel: (507) 315-1388 / 315-1389
Email: rmssoporte@cruzroja.org.pa
www.cruzroja.org.pa / rms.cruzroja.org"""
else:
if current.session.s3.language == "es":
messages.welcome_email = \
"""Estimado, estimada,
Le damos la más cordial bienvenida al Sistema de Gestión de Recursos (RMS).
Su nombre de usuarios es: su correo electrónico
Contraseña: cruzroja (puede ser cambiando por usted cuando lo desee)
Para acceder a la plataforma visite: https://rms.cruzroja.org
Saludos Cordiales,
Equipo de Soporte RMS"""
else:
# "en"
messages.welcome_email = \
"""Dear,
We welcome you to the Resource Management System (RMS).
Your user name is: your e-mail address
Password: redcross (can be changed by you at any time)
To access the platform visit: https://rms.cruzroja.org
Best regards,
RMS Support Team"""
messages.lock_keys = True
return attr
settings.customise_auth_user_controller = customise_auth_user_controller
# -------------------------------------------------------------------------
def customise_deploy_alert_resource(r, tablename):
s3db = current.s3db
# Only send Alerts via Email
# @ToDo: Also send via Twitter
f = s3db[tablename].contact_method
f.readable = f.writable = False
#from s3 import S3SQLCustomForm
#crud_form = S3SQLCustomForm("mission_id",
# "subject",
# "body",
# "modified_on",
# )
#s3db.configure(tablename,
# crud_form = crud_form,
# list_fields = ["mission_id",
# "subject",
# "body",
# ],
# )
settings.customise_deploy_alert_resource = customise_deploy_alert_resource
# -------------------------------------------------------------------------
def deploy_application_onaccept(form):
"""
RIT Members should be added to the RIT Role
"""
db = current.db
s3db = current.s3db
htable = db.hrm_human_resource
ptable = db.pr_person
# Find the Person
human_resource_id = form.vars.get("human_resource_id")
if human_resource_id:
query = (htable.id == human_resource_id)
else:
table = db.deploy_application
query = (table.id == form.vars.get("id")) & \
(table.human_resource_id == htable.id)
hr = db(query).select(htable.person_id,
limitby = (0, 1),
).first()
person_id = hr.person_id
# Do they have a User Account?
ltable = s3db.pr_person_user
query = (ptable.id == person_id) & \
(ltable.pe_id == ptable.pe_id)
link = db(query).select(ltable.user_id,
limitby = (0, 1),
).first()
if link:
# Add them to the RIT role
current.auth.s3_assign_role(link.user_id, "RIT_MEMBER")
# -------------------------------------------------------------------------
def customise_deploy_application_resource(r, tablename):
current.s3db.configure(tablename,
create_onaccept = deploy_application_onaccept,
)
settings.customise_deploy_application_resource = customise_deploy_application_resource
# -------------------------------------------------------------------------
def customise_deploy_mission_resource(r, tablename):
s3db = current.s3db
s3db[tablename].event_type_id.label = T("Disaster Type")
COUNTRY = current.messages.COUNTRY
from s3 import S3SQLCustomForm
crud_form = S3SQLCustomForm("name",
"date",
"location_id",
"event_type_id",
)
#from s3 import S3DateFilter, S3LocationFilter, S3OptionsFilter, S3TextFilter
#filter_widgets = [S3TextFilter(["name",
# "event_type_id$name",
# "location_id",
# ],
# label = T("Search"),
# ),
# S3LocationFilter("location_id",
# label = COUNTRY,
# widget = "multiselect",
# levels = ["L0"],
# hidden = True,
# ),
# S3OptionsFilter("event_type_id",
# widget = "multiselect",
# hidden = True,
# ),
# #S3OptionsFilter("status",
# # options = s3db.deploy_mission_status_opts,
# # hidden = True,
# # ),
# S3DateFilter("date",
# hide_time = True,
# hidden = True,
# ),
# ]
list_fields = ["name",
"date",
"event_type_id",
(COUNTRY, "location_id"),
(T("Responses"), "response_count"),
(T("Members Deployed"), "hrquantity"),
]
s3db.configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
)
settings.customise_deploy_mission_resource = customise_deploy_mission_resource
# -------------------------------------------------------------------------
def customise_event_event_type_resource(r, tablename):
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Disaster Type"),
title_display = T("Disaster Type Details"),
title_list = T("Disaster Types"),
title_update = T("Edit Disaster Type Details"),
title_upload = T("Import Disaster Types"),
label_list_button = T("List Disaster Types"),
label_delete_button = T("Delete Disaster Type"),
msg_record_created = T("Disaster Type added"),
msg_record_modified = T("Disaster Type Details updated"),
msg_record_deleted = T("Disaster Type deleted"),
msg_list_empty = T("No Disaster Types currently defined"),
)
settings.customise_event_event_type_resource = customise_event_event_type_resource
# -------------------------------------------------------------------------
def customise_hrm_certificate_controller(**attr):
table = current.s3db.hrm_course
auth = current.auth
if auth.s3_has_role("ADMIN"):
# See all Certificates
pass
elif auth.s3_has_roles(("training_coordinator",
"training_assistant",
)):
# Only show this Center's Certificates
organisation_id = auth.user.organisation_id
current.response.s3.filter = (table.organisation_id == organisation_id) | \
(table.organisation_id == None)
# Default to this Training Center
table.organisation_id.default = organisation_id
else:
# See NS Certificates
organisation_id = auth.root_org()
current.response.s3.filter = (table.organisation_id == organisation_id) | \
(table.organisation_id == None)
# Default to this NS
table.organisation_id.default = organisation_id
return attr
settings.customise_hrm_certificate_controller = customise_hrm_certificate_controller
# -------------------------------------------------------------------------
def customise_hrm_course_controller(**attr):
table = current.s3db.hrm_course
auth = current.auth
if auth.s3_has_role("ADMIN"):
# See all Courses
pass
elif auth.s3_has_roles(("training_coordinator",
"training_assistant",
)):
# Only show this Center's courses
current.response.s3.filter = (table.organisation_id == auth.user.organisation_id) | (table.organisation_id == None)
else:
# See NS Courses
current.response.s3.filter = (table.organisation_id == auth.root_org()) | (table.organisation_id == None)
return attr
settings.customise_hrm_course_controller = customise_hrm_course_controller
# -------------------------------------------------------------------------
def customise_hrm_course_resource(r, tablename):
from gluon import IS_EMPTY_OR, IS_NOT_IN_DB
from s3 import S3SQLCustomForm
db = current.db
auth = current.auth
s3db = current.s3db
table = s3db[tablename]
# Code should be Unique
f = table.code
f.requires = IS_EMPTY_OR(IS_NOT_IN_DB(db, "hrm_course.code"))
if auth.s3_has_roles(("training_coordinator",
"training_assistant",
)):
f = table.organisation_id
f.label = T("Training Center")
f.comment = False # Don't create here
from s3db.org import org_OrganisationRepresent
org_represent = org_OrganisationRepresent(parent = False)
f.represent = org_represent
list_fields = ["code",
"name",
]
has_role = auth.s3_has_role
if has_role("ADMIN"):
table.organisation_id.label = T("National Society / Training Center")
list_fields.insert(0, "organisation_id")
#f.readable = f.writable = True
#ttable = s3db.org_organisation_type
#try:
# type_id = db(ttable.name == "Training Center").select(ttable.id,
# limitby = (0, 1),
# ).first().id
#except:
# # No/incorrect prepop done - skip (e.g. testing impacts of CSS changes in this theme)
# pass
#else:
# ltable = s3db.org_organisation_organisation_type
# rows = db(ltable.organisation_type_id == type_id).select(ltable.organisation_id)
# filter_opts = [row.organisation_id for row in rows]
# f.requires = IS_ONE_OF(db, "org_organisation.id",
# org_represent,
# orderby = "org_organisation.name",
# sort = True,
# filterby = "id",
# filter_opts = filter_opts,
# )
elif has_role("training_coordinator"):
f.default = auth.user.organisation_id
crud_form = S3SQLCustomForm("organisation_id",
"code",
"name",
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
orderby = "hrm_course.code",
)
settings.customise_hrm_course_resource = customise_hrm_course_resource
# -------------------------------------------------------------------------
#def customise_hrm_department_controller(**attr):
# # Organisation needs to be an NS/Branch
# ns_only("hrm_department",
# required = False,
# branches = False,
# )
# return attr
#settings.customise_hrm_department_controller = customise_hrm_department_controller
# -------------------------------------------------------------------------
#def emergency_contact_represent(row):
# """
# Representation of Emergency Contacts (S3Represent label renderer)
# @param row: the row
# """
# items = [row["pr_contact_emergency.name"]]
# relationship = row["pr_contact_emergency.relationship"]
# if relationship:
# items.append(" (%s)" % relationship)
# phone_number = row["pr_contact_emergency.phone"]
# if phone_number:
# items.append(": %s" % phone_number)
# return "".join(items)
# -------------------------------------------------------------------------
def customise_hrm_home():
#from gluon import URL
from s3 import s3_redirect_default
has_role = current.auth.s3_has_role
len_roles = len(current.session.s3.roles)
if (len_roles <= 2) or \
(len_roles == 3 and has_role("RIT_MEMBER") and not has_role("ADMIN")):
# No specific Roles
# Go to Personal Profile
s3_redirect_default(URL(f="person"))
else:
# Bypass home page & go direct to searchable list of Staff
s3_redirect_default(URL(f = "human_resource",
args = "summary",
))
settings.customise_hrm_home = customise_hrm_home
# -------------------------------------------------------------------------
def customise_hrm_experience_resource(r, tablename):
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Work History"),
title_display = T("Work History Details"),
title_list = T("Work History"),
title_update = T("Edit Work History"),
label_list_button = T("List Work History"),
label_delete_button = T("Delete Work History"),
msg_record_created = T("Work History added"),
msg_record_modified = T("Work History updated"),
msg_record_deleted = T("Work History deleted"),
msg_list_empty = T("No entries currently registered"),
)
settings.customise_hrm_experience_resource = customise_hrm_experience_resource
# -------------------------------------------------------------------------
def hrm_human_resource_create_onaccept(form):
"""
If the Staff/Volunteer is RC then create them a user account with a random password
- only called when created by RIT_ADMIN through the web forms (not import)
- only happens if an email has been set
"""
db = current.db
s3db = current.s3db
form_vars = form.vars
# Call normal onaccept
from s3db.hrm import hrm_human_resource_onaccept
hrm_human_resource_onaccept(form)
# Is the person RC?
organisation_id = form_vars.get("organisation_id")
if not organisation_id:
hr_id = form_vars.get("id")
if not hr_id:
# Nothing we can do!
current.log.warning("Cannot create user for HR as no id in the form")
return
htable = s3db.hrm_human_resource
hr = db(htable.id == hr_id).select(htable.id,
htable.person_id,
htable.type,
htable.organisation_id,
limitby = (0, 1),
).first()
try:
organisation_id = hr.organisation_id
except AttributeError:
# Nothing we can do!
current.log.warning("Cannot create user for HR %s as cannot find HR record" % hr_id)
return
else:
hr = None
ttable = s3db.org_organisation_type
ltable = s3db.org_organisation_organisation_type
query = (ttable.name == RED_CROSS) & \
(ltable.organisation_type_id == ttable.id) & \
(ltable.organisation_id == organisation_id)
RC = db(query).select(ltable.id,
limitby = (0, 1),
).first()
if not RC:
return
# Collect the Details needed
person_id = form_vars.get("person_id")
if not person_id:
if not hr:
hr_id = form_vars.get("id")
if not hr_id:
# Nothing we can do!
current.log.warning("Cannot create user for HR as no id in the form")
return
htable = s3db.hrm_human_resource
hr = db(htable.id == hr_id).select(htable.id,
htable.person_id,
htable.type,
limitby = (0, 1),
).first()
try:
person_id = hr.person_id
except AttributeError:
current.log.warning("Cannot create user for HR %s as cannot find HR record" % hr_id)
return
ptable = s3db.pr_person
person = db(ptable.id == person_id).select(ptable.id,
ptable.first_name,
ptable.middle_name, # NB We use middle_name for User in RMS Americas!
ptable.pe_id,
limitby = (0, 1),
).first()
try:
pe_id = person.pe_id
except AttributeError:
# Nothing we can do!
return
ctable = s3db.pr_contact
query = (ctable.pe_id == pe_id) & \
(ctable.contact_method == "EMAIL")
contact = db(query).select(ctable.value,
limitby = (0, 1),
).first()
try:
email = contact.value
except AttributeError:
# Nothing we can do!
hr_id = form_vars.get("id")
current.log.warning("Cannot create user for HR %s as cannot find Email" % hr_id)
return
hr_type = form_vars.get("type")
if not hr_type:
if not hr:
hr_id = form_vars.get("id")
if not hr_id:
# Nothing we can do!
current.log.warning("Cannot create user for HR as no id in the form")
return
htable = s3db.hrm_human_resource
hr = db(htable.id == hr_id).select(htable.id,
htable.type,
limitby = (0, 1),
).first()
try:
hr_type = str(hr.type)
except AttributeError:
# Nothing we can do!
current.log.warning("Cannot create user for HR %s as cannot find HR record" % hr_id)
return
if hr_type == "1":
link_user_to = "staff"
else:
link_user_to = "volunteer"
# This field has been manually added to the form
language = current.request.post_vars.get("language")
auth = current.auth
# Generate a password
password, crypted = auth.s3_password(8)
# Create User
user = Storage(organisation_id = organisation_id,
language = language,
first_name = person.first_name,
last_name = person.middle_name, # NB We use middle_name for User in RMS Americas!
email = email,
link_user_to = link_user_to,
password = str(crypted),
)
#user = auth.get_or_create_user(user, login=False)
user_id = db.auth_user.insert(**user)
# Set the HR record to be owned by this user
if hr:
hr.update_record(owned_by_user = user_id)
else:
hr_id = form_vars.get("id")
db(s3db.hrm_human_resource.id == hr_id).update(owned_by_user = user_id)
# Set the Person record to be owned by this user
person.update_record(owned_by_user = user_id)
# Cascade down to components
# pr_address
atable = s3db.pr_address
db(atable.pe_id == pe_id).update(owned_by_user = user_id)
# pr_contact
db(ctable.pe_id == pe_id).update(owned_by_user = user_id)
# Link to Person so that we find this in the 'Link'
ltable = s3db.pr_person_user
ltable.insert(pe_id = pe_id,
user_id = user_id,
)
# Approve User, link to Person & send them a Welcome email
user.update(id = user_id)
messages = auth.messages
messages.lock_keys = False
messages.welcome_email = \
"""Welcome to %(system_name)s
- You can start using %(system_name)s at: %(url)s
- Your password is: %(password)s
- To edit your profile go to: %(url)s%(profile)s
Thank you"""
messages.lock_keys = True
auth.s3_approve_user(user, password=password)
# -------------------------------------------------------------------------
def hrm_human_resource_onvalidation(form):
"""
Check that the Organization ID is unique per NS
"""
# Read Code
form_vars_get = form.vars.get
code = form_vars_get("code")
if code is None:
return
db = current.db
s3db = current.s3db
# Lookup Root Org
organisation_id = form_vars_get("organisation_id")
otable = s3db.org_organisation
root_org = db(otable.id == organisation_id).select(otable.root_organisation,
limitby = (0, 1),
).first()
root_organisation = root_org.root_organisation
# Check for another HR in the same NS with same code
htable = s3db.hrm_human_resource
query = (htable.code == code) & \
(htable.organisation_id == otable.id) & \
(otable.root_organisation == root_organisation)
human_resource_id = form_vars_get("id")
if human_resource_id:
# Update Form: Skip our own record
query &= (htable.id != human_resource_id)
match = db(query).select(htable.id,
limitby = (0, 1),
).first()
if match:
# Error
form.errors["code"] = current.T("Organization ID already in use")
# -------------------------------------------------------------------------
def customise_hrm_human_resource_resource(r, tablename):
# Organization ID needs to be unique per NS
current.s3db.configure(tablename,
onvalidation = hrm_human_resource_onvalidation,
)
settings.customise_hrm_human_resource_resource = customise_hrm_human_resource_resource
# -------------------------------------------------------------------------
def customise_hrm_human_resource_controller(**attr):
#controller = current.request.controller
#if controller != "deploy":
# # Default Filter
# from s3 import s3_set_default_filter
# s3_set_default_filter("~.organisation_id",
# user_org_and_children_default_filter,
# tablename = "hrm_human_resource")
s3 = current.response.s3
# Enable scalability-optimized strategies
settings.base.bigtable = True
if current.request.function == "trainee":
EXTERNAL = True
else:
EXTERNAL = False
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
from s3 import FS
db = current.db
s3db = current.s3db
auth = current.auth
resource = r.resource
table = r.table
has_role = auth.s3_has_role
has_roles = auth.s3_has_roles
if EXTERNAL:
f = table.organisation_id
f.label = T("Organization")
# Organisation cannot be an NS/Branch
# Lookup organisation_type_id for Red Cross
ttable = s3db.org_organisation_type
type_ids = db(ttable.name.belongs((RED_CROSS, "Training Center"))).select(ttable.id,
cache = s3db.cache,
limitby = (0, 2),
)
if type_ids:
from s3 import IS_ONE_OF
ltable = db.org_organisation_organisation_type
type_ids = [t.id for t in type_ids]
rows = db(ltable.organisation_type_id.belongs(type_ids)).select(ltable.organisation_id)
not_filter_opts = [row.organisation_id for row in rows]
f.requires = IS_ONE_OF(db, "org_organisation.id",
f.represent,
not_filterby = "id",
not_filter_opts = not_filter_opts,
updateable = True,
orderby = "org_organisation.name",
sort = True,
)
resource.add_filter(~FS("organisation_id").belongs(not_filter_opts))
# Find the relevant filter widget & limit it's options
filter_widgets = s3db.get_config("hrm_human_resource", "filter_widgets")
filter_widget = None
if filter_widgets:
from s3 import S3HierarchyFilter
for w in filter_widgets:
if isinstance(w, S3HierarchyFilter) and \
w.field == "organisation_id":
filter_widget = w
break
if filter_widget is not None:
filter_widget.opts["filter"] = (~FS("id").belongs(not_filter_opts))
else:
otable = s3db.org_organisation
otable.root_organisation.label = T("National Society")
# Organisation needs to be an NS/Branch
ns_only("hrm_human_resource",
required = True,
branches = True,
# default
#limit_filter_opts = True,
)
export_formats = list(settings.get_ui_export_formats())
if r.method in ("create", "summary", None):
# Provide a default Organization ID
organisation_id = auth.user.organisation_id
if organisation_id:
org = db(otable.id == organisation_id).select(otable.root_organisation,
limitby = (0, 1),
).first()
root_organisation_id = org.root_organisation
f = table.code
query = (otable.root_organisation == root_organisation_id) & \
(otable.id == table.organisation_id)
last_code = db(query).select(f,
limitby = (0, 1),
orderby = ~f,
).first()
last_code = last_code.code
if last_code:
f.default = int(last_code) + 1
else:
f.default = 1
if not r.id:
# Filter to just RC people
resource.add_filter(FS("organisation_id$organisation_type.name") == RED_CROSS)
if has_role("RIT_ADMIN", include_admin=False):
# Create a User Account for the HR to manage their own profile
def add_language(form):
from gluon import LABEL, OPTION, SELECT
from s3 import s3_addrow
formstyle = settings.get_ui_formstyle()
language_opts = [OPTION(T("Spanish"),
_value = "es",
_selected = "selected",
),
OPTION(T("French"),
_value = "fr",
),
OPTION(T("English"),
_value = "en",
),
]
s3_addrow(form,
LABEL("%s:" % T("Language"),
_id = "auth_user_language__label",
_for = "auth_user_language",
),
SELECT(_id = "auth_user_language",
_name = "language",
*language_opts
),
"",
formstyle,
"auth_user_language__row",
position = 3,
)
resource.configure(create_onaccept = hrm_human_resource_create_onaccept,
form_postp = add_language,
)
# Custom list_fields
list_fields = [(T("Full Name"), "person_id"),
"organisation_id",
(T("Program"), "person_id$hours.programme_id"),
(T("National ID"), "person_id$national_id.value"),
"code",
(T("Email"), "email.value"),
(settings.get_ui_label_mobile_phone(), "phone.value"),
]
r.resource.configure(list_fields = list_fields)
# Bind method for signature list export + add export icon
from .siglist import HRSignatureList
s3db.set_method("hrm", "human_resource",
method = "siglist",
action = HRSignatureList,
)
export_formats.append(("siglist.pdf", "fa fa-list", T("Export Signature List")))
s3.formats["siglist.pdf"] = r.url(method = "siglist")
if has_roles(ID_CARD_EXPORT_ROLES):
if r.representation == "card":
# Configure ID card layout
from .idcards import IDCardLayout
resource.configure(pdf_card_layout = IDCardLayout)
if not r.id and not r.component:
# Add export-icon for ID cards
export_formats.append(("card", "fa fa-id-card", T("Export ID Cards")))
s3.formats["card"] = r.url(method = "")
settings.ui.export_formats = export_formats
if not has_role("ADMIN") and \
has_roles(("training_coordinator", "training_assistant")):
# Filter People to just those trained by this Reference Center
resource.add_filter(FS("training.training_event_id$organisation_id") == auth.user.organisation_id)
# Default to Volunteers
table.type.default = 2
# Hide Venues from the list of Offices
from gluon import IS_EMPTY_OR
ttable = s3db.org_facility_type
ltable = s3db.org_site_facility_type
query = (ltable.facility_type_id == ttable.id) & \
(ttable.name == "Venue")
venues = db(query).select(ltable.site_id)
venues = [v.site_id for v in venues]
stable = s3db.org_site
dbset = db(~stable.site_id.belongs(venues))
f = table.site_id
new_requires = f.requires.other
new_requires.dbset = dbset
f.requires = IS_EMPTY_OR(new_requires)
table = s3db.pr_person
table.first_name.label = T("Forenames")
table.middle_name.label = T("Father's Surname")
table.last_name.label = T("Mother's Surname")
# For the filter
s3db.hrm_competency.skill_id.label = T("Language")
return True
s3.prep = custom_prep
return attr
settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller
# -------------------------------------------------------------------------
def customise_hrm_insurance_resource(r, tablename):
table = current.s3db.hrm_insurance
table.type.default = "HEALTH"
table.insurance_number.label = T("Affiliate Number")
table.phone.label = T("Emergency Number")
table.insurer.label = "%s / %s" % (T("Insurance Company"),
T("Social Work or Prepaid"),
)
settings.customise_hrm_insurance_resource = customise_hrm_insurance_resource
# -------------------------------------------------------------------------
def customise_hrm_job_title_resource(r, tablename):
s3db = current.s3db
f = s3db.hrm_job_title.type
f.default = 3 # Both
#f.readable = f.writable = False
label = T("Position")
label_create = T("Create Position")
current.response.s3.crud_strings[tablename] = Storage(
label_create = label_create,
title_display = T("Position Details"),
title_list = T("Position Catalog"),
title_update = T("Edit Position"),
title_upload = T("Import Positions"),
label_list_button = T("List Positions"),
label_delete_button = T("Delete Position"),
msg_record_created = T("Position added"),
msg_record_modified = T("Position updated"),
msg_record_deleted = T("Position deleted"),
msg_list_empty = T("Currently no entries in the catalog"),
)
from s3layouts import S3PopupLink
f = s3db.hrm_job_title_id.attr
f.label = label
f.comment = S3PopupLink(c = "hrm",
f = "job_title",
label = label_create,
title = label,
)
settings.customise_hrm_job_title_resource = customise_hrm_job_title_resource
# -------------------------------------------------------------------------
def customise_hrm_job_title_controller(**attr):
s3 = current.response.s3
# Organisation needs to be an NS
ns_only("hrm_job_title",
required = False,
branches = False,
)
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if current.auth.s3_has_role("ADMIN"):
from s3 import S3OptionsFilter, S3TextFilter
filter_widgets = [S3TextFilter(["name",
],
label=T("Search")
),
S3OptionsFilter("organisation_id",
),
]
current.s3db.configure("hrm_job_title",
filter_widgets = filter_widgets,
)
return result
s3.prep = custom_prep
return attr
settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller
# -------------------------------------------------------------------------
def customise_hrm_programme_controller(**attr):
table = current.s3db.hrm_programme
# Organisation needs to be an NS/Branch
ns_only("hrm_programme",
required = False,
branches = False,
)
# non-Admins should only see programmes for their NS
auth = current.auth
if not auth.s3_has_role("ADMIN"):
current.response.s3.filter = (table.organisation_id == auth.root_org())
f = table.name_long
f.readable = f.writable = False
return attr
settings.customise_hrm_programme_controller = customise_hrm_programme_controller
# -------------------------------------------------------------------------
def customise_hrm_programme_hours_controller(**attr):
# Default Filter
from s3 import s3_set_default_filter
s3_set_default_filter("~.person_id$human_resource.organisation_id",
user_org_default_filter,
tablename = "hrm_programme_hours")
attr["csv_template"] = ("../../themes/RMS/formats", "hrm_programme_hours")
return attr
settings.customise_hrm_programme_hours_controller = customise_hrm_programme_hours_controller
# -------------------------------------------------------------------------
def skip_create(deduplicate):
""" Decorator for deduplicators to prevent creation of new records """
def wrapped(item):
if callable(deduplicate):
deduplicate(item)
item.strategy = [item.METHOD.UPDATE]
return wrapped
def customise_hrm_programme_hours_resource(r, tablename):
from s3 import S3SQLCustomForm
s3db = current.s3db
phtable = s3db.hrm_programme_hours
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Hours of Service"),
title_display = T("Hours Details"),
title_list = T("Hours of Service"),
title_update = T("Edit Hours"),
label_list_button = T("List Hours"),
label_delete_button = T("Delete Hours"),
msg_record_created = T("Hours added"),
msg_record_modified = T("Hours updated"),
msg_record_deleted = T("Hours deleted"),
msg_list_empty = T("Currently no hours recorded"),
)
# Show new custom fields
phtable.event.readable = phtable.event.writable = True
phtable.place.readable = phtable.place.writable = True
# Hide old fields so they don't appear in list_fields in hrm_Record
#phtable.programme_id.readable = phtable.programme_id.writable = False
phtable.job_title_id.readable = phtable.job_title_id.writable = False
crud_form = S3SQLCustomForm("date",
"programme_id",
"place",
"event",
"hours",
)
# Only visible in hrm_Record which controls list_fields itself
#list_fields = ["date",
# "programme_id",
# "place",
# "event",
# "training_id$training_event_id$location_id",
# "training_id$training_event_id$course_id",
# "hours",
# ]
configure = s3db.configure
configure("hrm_programme_hours",
crud_form = crud_form,
#list_fields = list_fields,
)
# Prevent create during imports
get_config = s3db.get_config
configure("pr_person",
deduplicate = skip_create(get_config("pr_person", "deduplicate")),
)
configure("org_organisation",
deduplicate = skip_create(get_config("org_organisation", "deduplicate")),
)
configure("hrm_programme",
deduplicate = skip_create(get_config("hrm_programme", "deduplicate")),
)
settings.customise_hrm_programme_hours_resource = customise_hrm_programme_hours_resource
# -------------------------------------------------------------------------
def customise_hrm_skill_resource(r, tablename):
#label = T("Language")
label_create = T("Create Language")
current.response.s3.crud_strings[tablename] = Storage(
label_create = label_create,
title_display = T("Language Details"),
title_list = T("Language Catalog"),
title_update = T("Edit Language"),
label_list_button = T("List Languages"),
label_delete_button = T("Delete Language"),
msg_record_created = T("Language added"),
msg_record_modified = T("Language updated"),
msg_record_deleted = T("Language deleted"),
msg_list_empty = T("Currently no entries in the catalog"),
)
# No use since cannot be sure this runs before hrm_competency table is loaded
#from s3layouts import S3PopupLink
#f = current.s3db.hrm_skill_id.attr
#f.label = label
#f.comment = S3PopupLink(c = "hrm",
# f = "skill",
# label = label_create,
# title = label,
# )
settings.customise_hrm_skill_resource = customise_hrm_skill_resource
# -------------------------------------------------------------------------
def customise_hrm_competency_resource(r, tablename):
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Language"),
title_display = T("Language Details"),
title_list = T("Languages"),
title_update = T("Edit Language"),
label_list_button = T("List Languages"),
label_delete_button = T("Delete Language"),
msg_record_created = T("Language added"),
msg_record_modified = T("Language updated"),
msg_record_deleted = T("Language deleted"),
msg_list_empty = T("No entries currently registered"),
)
label = T("Language")
from s3layouts import S3PopupLink
f = current.s3db.hrm_competency.skill_id
f.label = label
f.comment = S3PopupLink(c = "hrm",
f = "skill",
label = T("Create Language"),
title = label,
)
settings.customise_hrm_competency_resource = customise_hrm_competency_resource
# -------------------------------------------------------------------------
def hrm_training_onaccept(form):
"""
Add People to the RIT Alert List if they have passed the RIT course
"""
db = current.db
s3db = current.s3db
form_vars = form.vars
# Lookup full record
table = db.hrm_training
record = db(table.id == form_vars.id).select(table.id,
table.person_id,
table.course_id,
table.grade,
limitby = (0, 1),
).first()
try:
course_id = record.course_id
except AttributeError:
current.log.error("Cannot find Training record")
return
# Lookup the RIT Course ID
ctable = db.hrm_course
row = db(ctable.name == "Regional Intervention Teams").select(ctable.id,
cache = s3db.cache,
limitby = (0, 1),
).first()
try:
rit_course_id = row.id
except AttributeError:
current.log.error("Cannot find RIT Course: Prepop not done?")
return
if course_id != rit_course_id:
# Nothing to do
return
if record.grade != 8:
# Not passed: Nothing to do
return
# Is person already a RIT Member?
person_id = record.person_id
htable = s3db.hrm_human_resource
hr = db(htable.person_id == person_id).select(htable.id,
limitby = (0, 1),
).first()
try:
human_resource_id = hr.id
except AttributeError:
current.log.error("Cannot find Human Resource record")
return
dtable = s3db.deploy_application
exists = db(dtable.human_resource_id == human_resource_id).select(dtable.id,
limitby = (0, 1),
).first()
if not exists:
# Add them to the list
dtable.insert(human_resource_id = human_resource_id)
# Add them to the RIT role
ltable = s3db.pr_person_user
ptable = db.pr_person
query = (ptable.id == person_id) & \
(ltable.pe_id == ptable.pe_id)
link = db(query).select(ltable.user_id,
limitby = (0, 1),
).first()
if link:
current.auth.s3_assign_role(link.user_id, "RIT_MEMBER")
# -------------------------------------------------------------------------
def hrm_training_postimport(import_info):
"""
Create Users for Persons created
"""
training_ids = import_info["created"]
if not training_ids:
# No new people created
return
db = current.db
s3db = current.s3db
# Find all the Persons
ttable = s3db.hrm_training
ptable = s3db.pr_person
query = (ttable.id.belongs(training_ids)) & \
(ttable.person_id == ptable.id)
trainings = db(query).select(ptable.pe_id)
person_pe_ids = {p.pe_id for p in trainings}
if not person_pe_ids:
# No people?
return
# Remove those with a User Account
ltable = s3db.pr_person_user
users = db(ltable.pe_id.belongs(person_pe_ids)).select(ltable.pe_id)
user_pe_ids = [u.pe_id for u in users]
discard = person_pe_ids.discard
for pe_id in user_pe_ids:
discard(pe_id)
if not person_pe_ids:
# Nobody without a User Account already
return
# Read Person Details
ctable = s3db.pr_contact
dtable = s3db.pr_person_details
htable = s3db.hrm_human_resource
left = [ctable.on((ctable.pe_id == ptable.pe_id) & \
(ctable.contact_method == "EMAIL")
),
dtable.on(dtable.person_id == ptable.id),
htable.on(htable.person_id == ptable.id),
]
persons = db(ptable.pe_id.belongs(person_pe_ids)).select(ptable.id,
ptable.first_name,
# Americas use Apellido Paterno for Last Name
ptable.middle_name,
#ptable.last_name,
ctable.value,
dtable.language,
htable.type,
htable.organisation_id,
left = left,
)
auth = current.auth
utable = db.auth_user
create_user = utable.insert
approve_user = auth.s3_approve_user
cert_table = s3db.hrm_certification
# For each Person
for p in persons:
person = p["pr_person"]
hr = p["hrm_human_resource"]
if hr.type == 1:
link_user_to = "staff"
else:
link_user_to = "volunteer"
# Set random password
password, crypted = auth.s3_password(8)
# Create a User Account
user = Storage(first_name = person.first_name,
last_name = person.middle_name,
#last_name = person.last_name,
email = p["pr_contact.value"],
language = p["pr_person_details.language"],
password = crypted,
organisation_id = hr.organisation_id,
link_user_to = link_user_to,
)
user_id = create_user(**user)
# Standard Approval (inc Link to Person/HR and Send out Welcome Email with password)
user["id"] = user_id
approve_user(user, password)
# Fixup permissions
person_id = person.id
db(htable.person_id == person_id).update(owned_by_user = user_id)
db(ttable.person_id == person_id).update(owned_by_user = user_id)
db(cert_table.person_id == person_id).update(owned_by_user = user_id)
# -------------------------------------------------------------------------
def customise_hrm_training_controller(**attr):
s3 = current.response.s3
# Default Filter
#from s3 import s3_set_default_filter
#s3_set_default_filter("~.person_id$human_resource.organisation_id",
# user_org_default_filter,
# tablename = "hrm_training")
auth = current.auth
if not auth.s3_has_role("ADMIN") and \
auth.s3_has_roles(("training_coordinator", "training_assistant")):
TC = True
# Filter Trainings to just those done by this Reference Center
from s3 import FS
query = FS("~.training_event_id$organisation_id") == auth.user.organisation_id
s3.filter = query
else:
TC = False
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "import":
# HR records may be created via importing them as participants
s3db = current.s3db
# Default to Volunteers
s3db.hrm_human_resource.type.default = 2
if TC:
# Doesn't work as email created after human_resource
#s3db.configure("hrm_human_resource",
# create_onaccept = hrm_human_resource_create_onaccept,
# )
# Create User Accounts for those Persons without them
s3db.configure("hrm_training",
postimport = hrm_training_postimport,
)
return True
s3.prep = custom_prep
return attr
settings.customise_hrm_training_controller = customise_hrm_training_controller
# -------------------------------------------------------------------------
def customise_hrm_training_resource(r, tablename):
s3db = current.s3db
table = s3db.hrm_training
f = table.grade
f.readable = f.writable = True
f = table.qualitative_feedback
f.readable = f.writable = True
s3db.hrm_certification.number.label = T("Registration Number")
from s3 import S3SQLCustomForm, S3TextFilter, S3OptionsFilter, S3DateFilter
if r.function == "person":
crud_form = S3SQLCustomForm("course_id",
"end_date",
"grade",
"grade_details",
"qualitative_feedback",
"certification_from_training.number",
)
else:
crud_form = S3SQLCustomForm("person_id",
"end_date",
"grade",
"grade_details",
"qualitative_feedback",
"certification_from_training.number",
)
from s3db.org import org_SiteRepresent
filter_widgets = [
S3TextFilter(["person_id$first_name",
"person_id$last_name",
"course_id$name",
"comments",
],
label = T("Search"),
comment = T("You can search by trainee name, course name or comments. You may use % as wildcard. Press 'Search' without input to list all trainees."),
_class = "filter-search",
),
S3OptionsFilter("training_event_id$site_id",
label = T("Country"),
represent = org_SiteRepresent(show_type = False),
),
S3OptionsFilter("person_id$human_resource.organisation_id",
label = T("Organization"),
),
S3OptionsFilter("course_id",
),
S3OptionsFilter("grade",
),
S3DateFilter("date",
hide_time=True,
),
]
s3db.add_custom_callback(tablename,
"onaccept",
hrm_training_onaccept,
)
s3db.configure(tablename,
crud_form = crud_form,
filter_widgets = filter_widgets,
)
settings.customise_hrm_training_resource = customise_hrm_training_resource
# -------------------------------------------------------------------------
def customise_hrm_training_event_resource(r, tablename):
from s3 import IS_ONE_OF, S3SQLCustomForm, S3SQLInlineComponent
from s3db.org import org_OrganisationRepresent
db = current.db
auth = current.auth
s3db = current.s3db
table = s3db.hrm_training_event
org_represent = org_OrganisationRepresent(parent = False)
f = table.organisation_id
f.label = T("Training Center")
f.comment = False # Don't create here
f.represent = org_represent
list_fields = ["organisation_id",
"course_id",
#"site_id",
"location_id",
"start_date",
"training_event_instructor.person_id",
"comments",
]
if auth.s3_has_role("ADMIN"):
#f.readable = f.writable = True
ttable = s3db.org_organisation_type
try:
type_id = db(ttable.name == "Training Center").select(ttable.id,
limitby = (0, 1),
).first().id
except AttributeError:
# No/incorrect prepop done - skip (e.g. testing impacts of CSS changes in this theme)
pass
else:
ltable = s3db.org_organisation_organisation_type
rows = db(ltable.organisation_type_id == type_id).select(ltable.organisation_id)
filter_opts = [row.organisation_id for row in rows]
f.requires = IS_ONE_OF(db, "org_organisation.id",
org_represent,
orderby = "org_organisation.name",
sort = True,
filterby = "id",
filter_opts = filter_opts,
)
elif auth.s3_has_roles(("training_coordinator", "training_assistant")):
organisation_id = auth.user.organisation_id
f.default = organisation_id
f.writable = False
list_fields.pop(0) # organisation_id
table.course_id.requires.set_filter(filterby = "organisation_id",
filter_opts = [organisation_id],
)
# Hours are Optional
from gluon import IS_EMPTY_OR
table.hours.requires = IS_EMPTY_OR(table.hours)
#site_represent = S3Represent(lookup = "org_site")
# Filter list of Venues
#f = table.site_id
#f.default = None
#f.label = T("Country")
#f.represent = site_represent
#ftable = s3db.org_facility
#ltable = s3db.org_site_facility_type
#ttable = s3db.org_facility_type
#query = (ftable.deleted == False) & \
# (ftable.site_id == ltable.site_id) & \
# (ltable.facility_type_id == ttable.id) & \
# (ttable.name == "Venue")
#rows = db(query).select(ftable.site_id)
#filter_opts = [row.site_id for row in rows]
#f.requires = IS_ONE_OF(db, "org_site.site_id",
# site_represent,
# filterby = "site_id",
# filter_opts = filter_opts,
# )
# Multiple Instructors
crud_form = S3SQLCustomForm("organisation_id",
# @ToDo: Filter Courses by Training Center
"course_id",
#"site_id",
"location_id",
"start_date",
"end_date",
S3SQLInlineComponent("training_event_instructor",
label = T("Instructor"),
fields = [("", "person_id")],
# @ToDo: Filter to HRMs (this should be done through AC?)
#filterby = ({"field": "type",
# "options": 3,
# },),
),
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
)
settings.customise_hrm_training_event_resource = customise_hrm_training_event_resource
# -------------------------------------------------------------------------
def hrm_training_event_report_pdf_export(r, **attr):
"""
Generate a PDF Export of a training Event Report
"""
from s3 import s3_fullname, s3_str
record = r.record
T = current.T
db = current.db
s3db = current.s3db
current_language = T.accepted_language
if current_language == "es":
# Reach different translation
title = s3_str(T("Training Event Report"))
else:
title = s3_str(T("Training Report"))
if record.course_id:
course_name = s3db.hrm_training_event.course_id.represent(record.course_id)
title = "%s: %s" % (title, course_name)
def callback(r):
from gluon.html import DIV, TABLE, TD, TH, TR
from s3db.org import org_OrganisationRepresent, \
org_organisation_logo
rtable = s3db.hrm_training_event_report
date_represent = rtable.date.represent
org_represent = org_OrganisationRepresent(parent = False,
acronym = False,
)
# Logo
otable = db.org_organisation
org_id = record.organisation_id
org = db(otable.id == org_id).select(otable.name,
otable.acronym, # Present for consistent cache key
otable.logo,
limitby = (0, 1),
).first()
#if settings.get_L10n_translate_org_organisation():
#org_name = org_represent(org_id)
#else:
# org_name = org.name
logo = org.logo
if logo:
logo = org_organisation_logo(org)
elif settings.get_org_branches():
from s3db.org import org_root_organisation
root_org = current.cache.ram(
# Common key with auth.root_org
"root_org_%s" % org_id,
lambda: org_root_organisation(org_id),
time_expire = 120
)
logo = org_organisation_logo(root_org)
# Read the report
report = db(rtable.training_event_id == r.id).select(limitby = (0, 1),
).first()
# Header
header = TABLE(TR(TH("%s:" % T("Name")),
TD(s3_fullname(report.person_id)),
TH("%s:" % T("Training Date")),
TD(date_represent(record.start_date)),
),
TR(TH("%s:" % T("Position")),
TD(rtable.job_title_id.represent(report.job_title_id)),
TH("%s:" % T("Finance Codes")),
TD(report.code),
),
TR(TH("%s:" % T("National Society Visited")),
TD(org_represent(report.organisation_id)),
TH("%s:" % T("Report Date")),
TD(date_represent(report.date)),
),
TR(TH("%s:" % T("Training Purpose")),
TD(report.purpose,
_colspan = 3,
),
),
)
# Main
main = TABLE(TR(TH("1. %s" % T("Objectives"))),
TR(TD(report.objectives)),
TR(TH("2. %s" % T("Methodology"))),
TR(TD(report.methodology)),
TR(TH("3. %s" % T("Implemented Actions"))),
TR(TD(report.actions)),
TR(TH("4. %s" % T("About the participants"))),
TR(TD(report.participants)),
TR(TH("5. %s" % T("Results and Lessons Learned"))),
TR(TD(report.results)),
TR(TH("6. %s" % T("Follow-up Required"))),
TR(TD(report.followup)),
TR(TH("7. %s" % T("Additional relevant information"))),
TR(TD(report.additional)),
TR(TH("8. %s" % T("General Comments"))),
TR(TD(report.comments)),
)
output = DIV(TABLE(TR(TD(logo),
#TD(org_name), # This isn't rtl-proof, check vol_service_record for how to handle that if-required
)),
TABLE(TR(TD(title))),
TABLE(header),
TABLE(main),
)
return output
attr["rheader"] = None
from s3.s3export import S3Exporter
exporter = S3Exporter().pdf
pdf_title = title
return exporter(r.resource,
request = r,
method = "list",
pdf_title = pdf_title,
pdf_table_autogrow = "B",
pdf_callback = callback,
**attr
)
# -------------------------------------------------------------------------
def customise_hrm_training_event_controller(**attr):
T = current.T
auth = current.auth
s3db = current.s3db
s3 = current.response.s3
if not auth.s3_has_role("ADMIN") and \
auth.s3_has_roles(("training_coordinator", "training_assistant")):
# Filter People to just those trained by this Reference Center
from s3 import FS
query = FS("~.organisation_id") == auth.user.organisation_id
s3.filter = query
s3db.set_method("hrm", "training_event",
method = "report_pdf_export",
action = hrm_training_event_report_pdf_export,
)
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.component_name == "training_event_report" and r.component_id:
from gluon.html import A, DIV#, URL
from s3 import ICON
s3.rfooter = DIV(A(ICON("print"),
" ",
T("PDF Report"),
_href = URL(args = [r.id, "report_pdf_export"]),#, extension="pdf"),
_class = "action-btn",
),
)
return result
s3.prep = custom_prep
from s3db.hrm import hrm_rheader
attr["rheader"] = lambda r: \
hrm_rheader(r, tabs=[(T("Training Event Details"), None),
(T("Participants"), "participant"),
(T("Report"), "training_event_report"),
])
return attr
settings.customise_hrm_training_event_controller = customise_hrm_training_event_controller
# -------------------------------------------------------------------------
def customise_hrm_training_event_report_resource(r, tablename):
s3db = current.s3db
table = s3db.hrm_training_event_report
table.person_id.default = current.auth.s3_logged_in_person()
table.person_id.label = T("Name")
ns_only("hrm_training_event_report",
required = False,
branches = False,
updateable = False,
)
table.organisation_id.label = T("National Society Visited")
table.code.label = T("Finance Codes")
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm("person_id",
"job_title_id",
"organisation_id",
"purpose",
"code",
"date",
(("1. %s" % table.objectives.label), "objectives"),
(("2. %s" % table.methodology.label), "methodology"),
(("3. %s" % table.actions.label), "actions"),
(("4. %s" % table.participants.label), "participants"),
(("5. %s" % table.results.label), "results"),
(("6. %s" % table.followup.label), "followup"),
(("7. %s" % table.additional.label), "additional"),
(("8. %s" % table.comments.label), "comments"),
S3SQLInlineComponent("document",
label = "9. %s" % T("Supporting Documentation"),
link = False,
fields = ["file"],
),
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
)
settings.customise_hrm_training_event_report_resource = customise_hrm_training_event_report_resource
# -------------------------------------------------------------------------
def customise_member_membership_resource(r, tablename):
from s3layouts import S3PopupLink
ADD_MEMBERSHIP_TYPE = T("Create Partner Type")
s3db = current.s3db
table = s3db.member_membership
table.code.label = T("Partner ID")
table.membership_type_id.comment = S3PopupLink(f = "membership_type",
label = ADD_MEMBERSHIP_TYPE,
title = ADD_MEMBERSHIP_TYPE,
tooltip = T("Add a new partner type to the catalog."),
)
list_fields = [(T("Full Name"), "person_id"),
"organisation_id",
"membership_type_id",
"code",
(T("National ID"), "person_id$national_id.value"),
(T("Email"), "email.value"),
(T("Mobile Phone"), "phone.value"),
"membership_fee",
(T("Paid"), "paid"),
]
s3db.configure(tablename,
list_fields = list_fields,
)
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Partner"),
title_display = T("Partner Details"),
title_list = T("Partners"),
title_update = T("Edit Partner Details"),
title_upload = T("Import Partners"),
label_list_button = T("List Partners"),
label_delete_button = T("Delete Partner"),
msg_record_created = T("Partner added"),
msg_record_modified = T("Partner updated"),
msg_record_deleted = T("Partner deleted"),
msg_list_empty = T("No Partners currently defined"),
)
settings.customise_member_membership_resource = customise_member_membership_resource
# -------------------------------------------------------------------------
def customise_member_membership_controller(**attr):
ns_only("member_membership",
required = True,
branches = True,
updateable = True,
)
return attr
settings.customise_member_membership_controller = customise_member_membership_controller
# -------------------------------------------------------------------------
def customise_member_membership_type_resource(r, tablename):
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Partner Type"),
title_display = T("Partner Type Details"),
title_list = T("Partner Types"),
title_update = T("Edit Partner Type Details"),
title_upload = T("Import Partner Types"),
label_list_button = T("List Partner Types"),
label_delete_button = T("Delete Partner Type"),
msg_record_created = T("Partner Type added"),
msg_record_modified = T("Partner Type updated"),
msg_record_deleted = T("Partner Type deleted"),
msg_list_empty = T("No Partner Types currently defined"),
)
settings.customise_member_membership_type_resource = customise_member_membership_type_resource
# -------------------------------------------------------------------------
def customise_member_membership_type_controller(**attr):
ns_only("member_membership_type",
required = False,
branches = False,
updateable = True,
)
return attr
settings.customise_member_membership_type_controller = customise_member_membership_type_controller
# -------------------------------------------------------------------------
#def on_inv_adj_close():
# """
# Nothing needed here currently
# """
# return
# -------------------------------------------------------------------------
#def customise_inv_adj_resource(r, tablename):
# current.s3db.configure(tablename,
# on_inv_adj_close = on_inv_adj_close,
# )
#settings.customise_inv_adj_resource = customise_inv_adj_resource
# -------------------------------------------------------------------------
def inv_pdf_header(r, title=None):
"""
PDF header for Stock Reports
@param r: the S3Request
@param title: the report title
"""
# Get organisation name and logo
from .layouts import OM
name, logo = OM().render()
from gluon.html import DIV, H2, H4, P, TABLE, TR, TD
# Report title and subtitle
title = H2(title) if title else ""
subtitle = ""
get_vars = r.get_vars
report = get_vars.get("report")
if report == "movements":
from s3 import S3TypeConverter, S3DateTime
# Get earliest/latest date from filter
convert = S3TypeConverter.convert
dtstr = get_vars.get("_transaction.date__ge")
earliest = convert(datetime.datetime, dtstr) if dtstr else ""
dtstr = get_vars.get("_transaction.date__le")
latest = convert(datetime.datetime, dtstr) if dtstr else ""
# Convert into local calendar/format
if earliest:
earliest = S3DateTime.date_represent(earliest, utc=True)
if latest:
latest = S3DateTime.date_represent(latest, utc=True)
# Add as subtitle
if earliest or latest:
subtitle = P(" - ".join((earliest, latest)))
output = TABLE(TR(TD(DIV(logo,
H4(name),
),
),
TD(DIV(title,
subtitle,
),
),
),
)
return output
# -------------------------------------------------------------------------
def customise_inv_inv_item_resource(r, tablename):
from s3db.inv import inv_item_total_weight, \
inv_item_total_volume, \
inv_stock_movements
s3db = current.s3db
# Add field methods for total weight and volume
from gluon import Field
table = s3db.inv_inv_item
table.total_weight = Field.Method("total_weight",
inv_item_total_weight,
)
table.total_volume = Field.Method("total_volume",
inv_item_total_volume,
)
resource = r.resource
if resource.tablename == "inv_inv_item" and r.method == "grouped":
report = r.get_vars.get("report")
if report == "movements":
# Inject a date filter for transactions
filter_widgets = resource.get_config("filter_widgets")
from s3 import S3DateFilter
date_filter = S3DateFilter("transaction_date",
label = T("Date"),
fieldtype = "date",
selector = "_transaction.date",
)
filter_widgets.insert(1, date_filter)
# Stock Reports
stock_reports = {"default": {
"title": T("Stock Position Report"),
"fields": [(T("Warehouse"), "site_id$name"),
"item_id$item_category_id",
#"bin",
"layout_id",
"item_id$name",
"quantity",
"pack_value",
"total_value",
],
"groupby": ["site_id",
#"supply_org_id",
],
"orderby": ["site_id$name",
"item_id$name",
],
"aggregate": [("sum", "quantity"),
("sum", "total_value"),
],
"pdf_header": inv_pdf_header,
},
"weight_and_volume": {
"title": T("Weight and Volume Report"),
"fields": [(T("Warehouse"), "site_id$name"),
"item_id$item_category_id",
#"bin",
"layout_id",
"item_id$name",
"quantity",
"item_id$weight",
"item_id$volume",
"total_weight",
"total_volume",
],
"groupby": ["site_id",
],
"orderby": ["site_id$name",
"item_id$name",
],
"aggregate": [("sum", "quantity"),
("sum", "total_weight"),
("sum", "total_volume"),
],
"pdf_header": inv_pdf_header,
},
"movements": {
"title": T("Stock Movements Report"),
"fields": [(T("Warehouse"), "site_id$name"),
"item_id$item_category_id",
#"bin",
"layout_id",
"item_id$name",
(T("Origin/Destination"), "sites"),
(T("Documents"), "documents"),
(T("Initial Quantity"), "original_quantity"),
(T("Incoming"), "quantity_in"),
(T("Outgoing"), "quantity_out"),
(T("Final Quantity"), "quantity"),
],
"groupby": ["site_id",
],
"orderby": ["site_id$name",
"item_id$name",
],
"aggregate": [("sum", "original_quantity"),
("sum", "quantity_in"),
("sum", "quantity_out"),
("sum", "quantity"),
],
"extract": inv_stock_movements,
"pdf_header": inv_pdf_header,
},
}
direct_stock_edits = settings.get_inv_direct_stock_edits()
list_fields = [(T("Description"), "item_id"),
(T("Reference"), "item_id$code"),
(T("Owner"), "owner_org_id"),
(T("Donor"), "supply_org_id"),
(T("Stock Location"), "site_id"),
(T("Physical Balance"), "quantity"),
(T("Unit Weight"), "item_id$weight"),
(T("Total Weight"), "total_weight"),
(T("Unit Volume"), "item_id$volume"),
(T("Total Volume"), "total_volume"),
(T("Unit Price"), "pack_value"),
(T("Total Price"), "total_value"),
(T("Comments"), "comments"),
]
filter_widgets = resource.get_config("filter_widgets")
if filter_widgets is not None:
from s3 import S3OptionsFilter
filter_widgets.insert(2, S3OptionsFilter("item_id",
#label=T("Status"),
hidden = True,
))
report_options = s3db.get_config(tablename, "report_options")
report_options.fact += [(T("Total Weight"), "total_weight"),
(T("Total Volume"), "total_volume"),
]
report_options.precision = {"total_value": 3,
"total_weight": 3,
"total_volume": 3,
}
s3db.configure("inv_inv_item",
create = direct_stock_edits,
deletable = direct_stock_edits,
editable = direct_stock_edits,
listadd = direct_stock_edits,
grouped = stock_reports,
# Needed for Field.Methods
extra_fields = ["quantity",
"pack_value",
"item_id$weight",
"item_id$volume",
"item_pack_id$quantity",
],
list_fields = list_fields,
)
settings.customise_inv_inv_item_resource = customise_inv_inv_item_resource
# -------------------------------------------------------------------------
def customise_inv_inv_item_controller(**attr):
ADMIN = current.auth.s3_has_role("ADMIN")
if ADMIN:
# ADMIN is allowed to Edit Inventory bypassing the need to use Adjustments
# - seems wrong to me as Adjustments aren't that heavy, but has been requested
settings.inv.direct_stock_edits = True
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
if not ADMIN and \
r.method == "import":
# Only ADMIN is allowed to Import Inventory
return False
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
return result
s3.prep = custom_prep
return attr
settings.customise_inv_inv_item_controller = customise_inv_inv_item_controller
# -------------------------------------------------------------------------
def on_inv_recv_process(row):
"""
Update any inv_order_item records
"""
db = current.db
s3db = current.s3db
recv_id = row.id
# Lookup the PO for this receive
rtable = s3db.inv_recv
record = db(rtable.id == recv_id).select(rtable.purchase_ref,
limitby = (0, 1),
).first()
purchase_ref = record.purchase_ref
if not purchase_ref:
return
# Lookup the REQs for this receive
rrtable = s3db.inv_recv_req
reqs = db(rrtable.recv_id == recv_id).select(rrtable.req_id)
req_ids = [row.req_id for row in reqs]
# Lookup the Order Items which match these REQs and PO
otable = s3db.inv_order_item
if len(req_ids) > 1:
query = (otable.req_id.belongs(req_ids))
else:
query = (otable.req_id == req_ids[0])
query &= (otable.purchase_ref == purchase_ref)
orders = db(query).select(otable.id,
otable.item_id,
)
if not orders:
return
# Lookup the Matching Items in the Shipment
order_items = [row.item_id for row in orders]
ttable = s3db.inv_track_item
query = (ttable.recv_id == recv_id) & \
(ttable.item_id.belongs(order_items))
recv_items = db(query).select(ttable.item_id)
if not recv_items:
return
recv_items = [row.item_id for row in recv_items]
orders_to_update = []
for row in orders:
if row.item_id in recv_items:
orders_to_update.append(row.id)
if orders_to_update:
if len(orders_to_update) > 1:
query = (otable.id.belongs(orders_to_update))
else:
query = (otable.id == orders_to_update[0])
db(query).update(recv_id = recv_id)
# -------------------------------------------------------------------------
def customise_inv_recv_resource(r, tablename):
if not r.interactive and r.representation != "aadata":
return
db = current.db
s3db = current.s3db
table = s3db.inv_recv
# Use Custom Represent for Sites to send to
from .controllers import org_SiteRepresent
table.from_site_id.requires.other.label = org_SiteRepresent()
# Filter list of Orgs
# - all root NS
# - our Branches
# - our Donors/Suppliers (& their branches, if-any)
ttable = s3db.org_organisation_type
try:
type_id = db(ttable.name == RED_CROSS).select(ttable.id,
cache = s3db.cache,
limitby = (0, 1),
).first().id
except AttributeError:
# No IFRC prepop done - skip (e.g. testing impacts of CSS changes in this theme)
pass
else:
root_org_id = current.auth.root_org()
otable = s3db.org_organisation
root_org = db(otable.id == root_org_id).select(otable.pe_id,
limitby = (0, 1),
).first()
ltable = db.org_organisation_organisation_type
try:
query = ((ltable.organisation_type_id == type_id) & (ltable.organisation_id == otable.id) & (otable.id == otable.root_organisation)) | \
(otable.root_organisation == root_org_id) | \
((ltable.organisation_type_id != type_id) & (ltable.organisation_id == otable.id) & (otable.realm_entity == root_org.pe_id))
except:
# No root_org: we must be testing as Admin
query = (ltable.organisation_type_id == type_id) & \
(ltable.organisation_id == otable.id) & \
(otable.id == otable.root_organisation)
orgs = db(query).select(otable.id)
org_ids = [row.id for row in orgs]
table.organisation_id.requires.other.set_filter(filterby = "id",
filter_opts = org_ids,
)
f = table.transport_type
f.requires = IS_IN_SET(transport_opts)
f.represent = S3Represent(options = transport_opts)
from s3 import IS_ONE_OF, S3SQLCustomForm, S3SQLInlineLink
crud_form = S3SQLCustomForm(S3SQLInlineLink("req",
field = "req_id",
label = T("Request Number"),
# @ToDo: Filter appropriately
#requires = IS_ONE_OF()
),
"recv_ref",
"site_id",
"type",
"organisation_id",
"from_site_id",
"eta",
"date",
"send_ref",
"purchase_ref",
"sender_id",
"recipient_id",
"transport_type",
"status",
"grn_status",
"cert_status",
"filing_status",
"comments",
)
s3db.configure(tablename,
addbtn = True,
crud_form = crud_form,
listadd = False,
list_fields = ["recv_ref",
(T("Request Number"), "recv_req.req_id"),
"send_ref",
"purchase_ref",
"recipient_id",
"organisation_id",
"from_site_id",
"site_id",
"date",
"type",
"status",
],
on_inv_recv_process = on_inv_recv_process,
)
# Custom GRN
s3db.set_method("inv", "recv",
method = "form",
action = PrintableShipmentForm,
)
settings.customise_inv_recv_resource = customise_inv_recv_resource
# -------------------------------------------------------------------------
def customise_inv_recv_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.component_name == "document":
s3.crud_strings["doc_document"].label_create = T("File Signed Document")
field = current.s3db.doc_document.name
field.label = T("Type")
document_type_opts = {"REQ": T("Requisition"),
"GRN": T("GRN"),
"WB": T("Waybill"),
}
#from s3 import S3Represent
field.requires = IS_IN_SET(document_type_opts)
field.represent = S3Represent(options = document_type_opts)
elif r.get_vars.get("incoming"):
s3.crud_strings.inv_recv.title_list = T("Incoming Shipments")
# Filter to just Shipments able to be Received
# SHIP_STATUS_IN_PROCESS = 0
# SHIP_STATUS_SENT = 2
from s3 import s3_set_default_filter
s3_set_default_filter("~.status",
[0, 2],
tablename = "inv_recv")
return result
s3.prep = custom_prep
return attr
settings.customise_inv_recv_controller = customise_inv_recv_controller
# -------------------------------------------------------------------------
def on_inv_send_process(record):
"""
Remove req_fulfil Dashboard Alert if completed
"""
from s3db.inv import REQ_STATUS_COMPLETE, \
REQ_STATUS_PARTIAL
db = current.db
s3db = current.s3db
# Which Requests did we act on & what is their Status?
srtable = s3db.inv_send_req
rtable = s3db.inv_req
query = (srtable.send_id == record.id) & \
(srtable.req_id == rtable.id)
reqs = db(query).select(rtable.id,
rtable.transit_status,
)
req_ids_to_delete = []
req_ids_to_check = []
for row in reqs:
transit_status = row.transit_status
if transit_status == REQ_STATUS_COMPLETE:
req_ids_to_delete.append(row.id)
elif transit_status == REQ_STATUS_PARTIAL:
req_ids_to_check.append(row.id)
if req_ids_to_check:
ritable = s3db.inv_req_item
query = (ritable.req_id.belongs(req_ids_to_check)) & \
(ritable.site_id == record.site_id)
req_items = db(query).select(ritable.req_id,
ritable.quantity,
ritable.quantity_transit,
)
reqs = {}
for row in req_items:
if row.quantity_transit >= row.quantity:
item_complete = True
else:
item_complete = False
req_id = row.req_id
if req_id in reqs:
if reqs[req_id]:
if not item_complete:
# Any single Incomplete Item makes the Request Incomplete
reqs[req_id] = False
else:
reqs[req_id] = item_complete
for req_id in reqs:
if reqs[req_id]:
req_ids_to_delete.append(req_id)
if req_ids_to_delete:
ntable = s3db.auth_user_notification
query = (ntable.type == "req_fulfil") & \
(ntable.record_id.belongs(req_ids_to_delete))
resource = s3db.resource("auth_user_notification", filter = query)
resource.delete()
# -------------------------------------------------------------------------
def customise_inv_send_resource(r, tablename):
#from gluon import IS_IN_SET
s3db = current.s3db
table = s3db.inv_send
# Use Custom Represent for Sites to send to
from .controllers import org_SiteRepresent
table.to_site_id.requires.other.label = org_SiteRepresent()
f = table.transport_type
f.requires = IS_IN_SET(transport_opts)
f.represent = S3Represent(options = transport_opts)
from s3 import S3SQLCustomForm, S3SQLInlineLink
crud_form = S3SQLCustomForm(S3SQLInlineLink("req",
field = "req_id",
label = T("Request Number"),
),
"send_ref",
"site_id",
"type",
"to_site_id",
"organisation_id",
"sender_id",
"recipient_id",
"transport_type",
"transported_by",
"transport_ref",
"driver_name",
"driver_phone",
"vehicle_plate_no",
# Will only appear in Update forms:
"date",
"delivery_date",
"status",
"filing_status",
"comments",
)
s3db.configure(tablename,
addbtn = True,
crud_form = crud_form,
listadd = False,
list_fields = ["send_ref",
#"req_ref",
(T("Request Number"), "send_req.req_id"),
#"sender_id",
"site_id",
"date",
"recipient_id",
"delivery_date",
"to_site_id",
"status",
#"driver_name",
#"driver_phone",
#"vehicle_plate_no",
#"time_out",
#"comments",
],
on_inv_send_process = on_inv_send_process,
)
# Custom Waybill
s3db.set_method("inv", "send",
method = "form",
action = PrintableShipmentForm,
)
settings.customise_inv_send_resource = customise_inv_send_resource
# -------------------------------------------------------------------------
def customise_inv_send_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.component_name == "document":
s3.crud_strings["doc_document"].label_create = T("File Signed Document")
field = current.s3db.doc_document.name
field.label = T("Type")
document_type_opts = {"REQ": T("Requisition"),
"WB": T("Waybill"),
}
#from gluon import IS_IN_SET
#from s3 import S3Represent
field.requires = IS_IN_SET(document_type_opts)
field.represent = S3Represent(options = document_type_opts)
return result
s3.prep = custom_prep
return attr
settings.customise_inv_send_controller = customise_inv_send_controller
# -------------------------------------------------------------------------
def stock_limit_alerts(warehouse):
"""
Generate an Alert if Stock Level falls below Minimum
Cancel Alerts if Stock Level is above Minimum
"""
db = current.db
s3db = current.s3db
site_id = warehouse.site_id
# Read Minimums
mtable = s3db.inv_minimum
query = (mtable.site_id == site_id) &\
(mtable.deleted == False)
minimums = db(query).select(mtable.id,
mtable.item_id,
mtable.quantity,
)
item_ids = [row.item_id for row in minimums]
# Read current stock for each
itable = s3db.inv_inv_item
ptable = s3db.supply_item_pack
query = (itable.site_id == site_id) &\
(itable.item_id.belongs(item_ids)) &\
(itable.item_pack_id == ptable.id) &\
(itable.deleted == False)
inventory = db(query).select(itable.item_id,
itable.quantity,
ptable.quantity,
)
ntable = s3db.auth_user_notification
nquery = (ntable.tablename == "inv_minimum")
alerts = []
for row in minimums:
# What is the Stock for this Item?
item_id = row.item_id
minimum = row.quantity
minimum_id = row.id
stock = 0
for row in inventory:
if row["inv_inv_item.item_id"] == item_id:
stock += (row["inv_inv_item.quantity"] * row["supply_item_pack.quantity"])
query = nquery & (ntable.record_id == minimum_id)
if stock < minimum:
# Add Alert, if there is not one already present
query &= (ntable.deleted == False)
exists = current.db(query).select(ntable.id,
limitby = (0, 1),
).first()
if not exists:
alerts.append((item_id, stock, minimum_id))
else:
# Remove any Minimum Alerts for this Item/Warehouse
resource = s3db.resource("auth_user_notification",
filter = query,
)
resource.delete()
if alerts:
# Generate Alerts
warehouse_name = warehouse.name
from .controllers import inv_operators_for_sites
operators = inv_operators_for_sites([site_id])[site_id]["operators"]
languages = {}
for row in operators:
language = row["auth_user.language"]
if language not in languages:
languages[language] = []
languages[language].append((row["pr_person_user.pe_id"], row["pr_person_user.user_id"]))
# Bulk Lookup Item Represents
# - assumes that we are not using translate = True!
item_ids = [alert[0] for alert in alerts]
items = itable.item_id.represent.bulk(item_ids, show_link=False)
#from gluon import URL
from s3 import s3_str
url = "%s%s" % (settings.get_base_public_url(),
URL(c="inv", f="warehouse",
args = [warehouse.id, "inv_item"],
),
)
send_email = current.msg.send_by_pe_id
insert = ntable.insert
T = current.T
session = current.session
#session_s3 = session.s3
ui_language = session.s3.language
subject_T = T("%(item)s replenishment needed in %(site)s Warehouse. %(quantity)s remaining")
message_T = T("%(item)s replenishment needed in %(site)s Warehouse. %(quantity)s remaining. Please review at: %(url)s")
alert_T = T("%(item)s replenishment needed in %(site)s Warehouse. %(quantity)s remaining")
for alert in alerts:
item_id = alert[0]
item = items.get(item_id)
quantity = int(alert[1])
minimum_id = alert[2]
for language in languages:
T.force(language)
#session_s3.language = language # for date_represent
subject = s3_str(subject_T) % {"item": item,
"site": warehouse_name,
"quantity": quantity,
}
message = s3_str(message_T) % {"item": item,
"site": warehouse_name,
"quantity": quantity,
"url": url,
}
alert = s3_str(alert_T) % {"item": item,
"site": warehouse_name,
"quantity": quantity,
}
users = languages[language]
for user in users:
send_email(user[0],
subject = subject,
message = message,
)
# Add Alert to Dashboard
insert(user_id = user[1],
name = alert,
url = url,
tablename = "inv_minimum",
record_id = minimum_id,
)
# Restore language for UI
#session_s3.language = ui_language
T.force(ui_language)
# Interactive Notification
alert = s3_str(alert_T) % {"item": item,
"site": warehouse_name,
"quantity": quantity,
}
session.warning.append(alert)
# -------------------------------------------------------------------------
def on_free_capacity_update(warehouse):
"""
Generate an Alert if Free Capacity < 10% of Capacity
Cancel Alerts if Free capacity is above this
Trigger Stock Limit Alert creation/cancellation
"""
s3db = current.s3db
warehouse_id = warehouse.id
ntable = s3db.auth_user_notification
query = (ntable.tablename == "inv_warehouse") & \
(ntable.record_id == warehouse_id) & \
(ntable.type == "capacity")
free_capacity = warehouse.free_capacity
threshold = warehouse.capacity * 0.1
if free_capacity < threshold:
# Generate Capacity Alert, if there is not one already present
query = query & (ntable.deleted == False)
exists = current.db(query).select(ntable.id,
limitby = (0, 1),
).first()
if not exists:
#from gluon import URL
from s3 import s3_str
site_id = warehouse.site_id
warehouse_name = warehouse.name
url = "%s%s" % (settings.get_base_public_url(),
URL(c="inv", f="warehouse",
args = warehouse_id,
),
)
send_email = current.msg.send_by_pe_id
T = current.T
session_s3 = current.session.s3
ui_language = session_s3.language
subject_T = T("Stockpile Capacity in %(site)s Warehouse is less than %(threshold)s m3")
message_T = T("Stockpile Capacity in %(site)s Warehouse is less than %(threshold)s m3. Please review at: %(url)s")
alert_T = T("Stockpile Capacity in %(site)s Warehouse is less than %(threshold)s m3")
from .controllers import inv_operators_for_sites
operators = inv_operators_for_sites([site_id])[site_id]["operators"]
insert = ntable.insert
languages = {}
for row in operators:
language = row["auth_user.language"]
if language not in languages:
languages[language] = []
languages[language].append((row["pr_person_user.pe_id"], row["pr_person_user.user_id"]))
for language in languages:
T.force(language)
#session_s3.language = language # for date_represent
subject = s3_str(subject_T) % {"site": warehouse_name,
"threshold": threshold,
}
message = s3_str(message_T) % {"site": warehouse_name,
"threshold": threshold,
"url": url,
}
alert = s3_str(alert_T) % {"site": warehouse_name,
"threshold": threshold,
}
users = languages[language]
for user in users:
send_email(user[0],
subject = subject,
message = message,
)
# Add Alert to Dashboard
insert(user_id = user[1],
name = alert,
url = url,
type = "capacity",
tablename = "inv_warehouse",
record_id = warehouse_id,
)
# Restore language for UI
#session_s3.language = ui_language
T.force(ui_language)
else:
# Remove any Capacity Alerts
resource = s3db.resource("auth_user_notification",
filter = query,
)
resource.delete()
# Trigger Stock Limit Alert creation/cancellation
stock_limit_alerts(warehouse)
# -------------------------------------------------------------------------
def customise_inv_warehouse_resource(r, tablename):
s3db = current.s3db
settings.inv.recv_tab_label = "Received/Incoming Shipments"
settings.inv.send_tab_label = "Sent Shipments"
s3db.configure("inv_warehouse",
on_free_capacity_update = on_free_capacity_update,
)
# Only Nepal RC use Warehouse Types
field = s3db.inv_warehouse.warehouse_type_id
field.readable = field.writable = False
list_fields = s3db.get_config("inv_warehouse", "list_fields")
try:
list_fields.remove("warehouse_type_id")
except ValueError:
# Already removed
pass
settings.customise_inv_warehouse_resource = customise_inv_warehouse_resource
# -------------------------------------------------------------------------
def customise_org_facility_resource(r, tablename):
#root_org = current.auth.root_org_name()
#if root_org != HNRC:
# return
# Simplify Form
s3db = current.s3db
table = s3db.org_facility
table.code.readable = table.code.writable = False
table.opening_times.readable = table.opening_times.writable = False
table.website.readable = table.website.writable = False
field = s3db.org_site_facility_type.facility_type_id
field.readable = field.writable = False
# Simplify Search Fields
from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
text_fields = ["name",
#"code",
"comments",
"organisation_id$name",
"organisation_id$acronym",
]
for level in levels:
lfield = "location_id$%s" % level
text_fields.append(lfield)
s3db.configure("org_facility",
filter_widgets = [
S3TextFilter(text_fields,
label = T("Search"),
),
S3OptionsFilter("organisation_id"),
S3LocationFilter("location_id",
levels = levels,
),
]
)
settings.customise_org_facility_resource = customise_org_facility_resource
# -------------------------------------------------------------------------
def customise_org_office_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
# Organisation needs to be an NS/Branch
ns_only("org_office",
required = True,
branches = True,
# default
#limit_filter_opts = True,
)
return result
s3.prep = custom_prep
return attr
settings.customise_org_office_controller = customise_org_office_controller
# -------------------------------------------------------------------------
def org_organisation_organisation_type_onaccept(form):
"""
* Update the realm entity
* Ensure that all RC Orgs get added to RC org_group
"""
# Get the link
try:
link_id = form.vars.id
except AttributeError:
return
db = current.db
s3db = current.s3db
ttable = s3db.org_organisation_type
ltable = s3db.org_organisation_organisation_type
query = (ltable.id == link_id) & \
(ltable.organisation_type_id == ttable.id)
row = db(query).select(ltable.organisation_id,
ttable.name,
limitby = (0, 1),
).first()
if row:
organisation_id = row["org_organisation_organisation_type.organisation_id"]
# Update the realm entity
current.auth.set_realm_entity("org_organisation",
organisation_id,
force_update = True,
)
if row["org_organisation_type.name"] == RED_CROSS:
# RC Org: ensure a member of RC org_group
gtable = s3db.org_group
group = db(gtable.name == "RC").select(gtable.id,
limitby = (0, 1),
).first()
try:
group_id = group.id
except:
# IFRC prepop not done: Bail
return
mtable = s3db.org_group_membership
query = (mtable.organisation_id == organisation_id) & \
(mtable.group_id == group_id)
member = db(query).select(mtable.id,
limitby = (0, 1),
).first()
if not member:
membership_id = mtable.insert(group_id = group_id,
organisation_id = organisation_id,
)
onaccept = s3db.get_config("org_group_membership", "onaccept")
if onaccept:
mform = Storage(vars = Storage(id = membership_id))
onaccept(mform)
# -------------------------------------------------------------------------
def customise_org_organisation_resource(r, tablename):
s3db = current.s3db
# Ensure that realms get set properly
# Ensure that all RC Orgs get added to RC org_group
#from s3db.org import org_organisation_organisation_type_onaccept
from s3db.org import org_organisation_organisation_type_ondelete
s3db.configure("org_organisation_organisation_type",
onaccept = org_organisation_organisation_type_onaccept,
ondelete = org_organisation_organisation_type_ondelete,
)
if current.auth.override:
# Prepop
# - ensure that realms get set properly
from s3db.org import org_organisation_organisation_onaccept, \
org_organisation_organisation_ondelete
s3db.configure("org_organisation_organisation",
onaccept = org_organisation_organisation_onaccept,
ondelete = org_organisation_organisation_ondelete,
)
settings.customise_org_organisation_resource = customise_org_organisation_resource
# -------------------------------------------------------------------------
def customise_org_organisation_controller(**attr):
s3 = current.response.s3
type_filter = current.request.get_vars.get("organisation_type.name")
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive or r.representation == "aadata":
if not r.component or r.component_name == "branch":
resource = r.resource
table = resource.table
if r.function == "training_center":
auth = current.auth
if not auth.s3_has_role("ADMIN"):
# See NS Training Centers only
resource.add_filter(table.root_organisation == auth.root_org())
if not auth.s3_has_role("ORG_ADMIN"):
resource.configure(insertable = False)
type_label = T("Type")
if r.get_vars.get("caller") == "org_facility_organisation_id":
# Simplify
from s3 import S3SQLCustomForm
crud_form = S3SQLCustomForm("name",
"acronym",
"phone",
"comments",
)
resource.configure(crud_form = crud_form,
)
else:
list_fields = ["name",
"acronym",
"organisation_organisation_type.organisation_type_id",
"country",
"website",
]
if type_filter:
type_names = type_filter.split(",")
if len(type_names) == 1:
# Strip Type from list_fields
try:
list_fields.remove("organisation_organisation_type.organisation_type_id")
except ValueError:
# Already removed
pass
type_label = ""
if type_filter == RED_CROSS:
# Modify filter_widgets
filter_widgets = resource.get_config("filter_widgets")
# Remove type (always 'RC')
filter_widgets.pop(1)
# Modify CRUD Strings
s3.crud_strings.org_organisation = Storage(
label_create = T("Create National Society"),
title_display = T("National Society Details"),
title_list = T("Red Cross & Red Crescent National Societies"),
title_update = T("Edit National Society"),
title_upload = T("Import Red Cross & Red Crescent National Societies"),
label_list_button = T("List Red Cross & Red Crescent National Societies"),
label_delete_button = T("Delete National Society"),
msg_record_created = T("National Society added"),
msg_record_modified = T("National Society updated"),
msg_record_deleted = T("National Society deleted"),
msg_list_empty = T("No Red Cross & Red Crescent National Societies currently registered"),
)
# Add Region to list_fields
list_fields.insert(-1, "organisation_region.region_id")
# Region is required
f = current.s3db.org_organisation_region.region_id
f.requires = f.requires.other
else:
f = current.s3db.org_organisation_region.region_id
f.readable = f.writable = False
if type_filter == "Supplier":
# Show simple free-text contact field
contact_field = table.contact
contact_field.readable = True
contact_field.writable = True
# Include contact information in list_fields
list_fields = ["name",
"acronym",
"country",
"contact",
"phone",
"website",
]
resource.configure(list_fields = list_fields)
if r.interactive:
table.country.label = T("Country")
from s3 import S3SQLCustomForm, S3SQLInlineLink
crud_form = S3SQLCustomForm(
"name",
"acronym",
S3SQLInlineLink("organisation_type",
field = "organisation_type_id",
label = type_label,
multiple = False,
),
"organisation_region.region_id",
"country",
"contact",
"phone",
"website",
"logo",
"comments",
)
resource.configure(crud_form = crud_form)
return result
s3.prep = custom_prep
if type_filter == "Supplier":
# Suppliers have simpler Tabs (hide Offices, Warehouses and Contacts)
tabs = [(T("Basic Details"), None, {"native": 1}),
]
if settings.get_L10n_translate_org_organisation():
tabs.append((T("Local Names"), "name"))
from s3db.org import org_rheader
attr["rheader"] = lambda r: org_rheader(r, tabs=tabs)
elif type_filter == "Academic,Bilateral,Government,Intergovernmental,NGO,UN agency":
# Partners have simpler Tabs (hide Offices, Warehouses and Contacts)
tabs = [(T("Basic Details"), None, {"native": 1}),
(T("Projects"), "project"),
]
if settings.get_L10n_translate_org_organisation():
tabs.insert(1, (T("Local Names"), "name"))
from s3db.org import org_rheader
attr["rheader"] = lambda r: org_rheader(r, tabs=tabs)
else:
# Enable tab for PDF card configurations
settings.org.pdf_card_configs = True
return attr
settings.customise_org_organisation_controller = customise_org_organisation_controller
# -------------------------------------------------------------------------
def customise_org_site_layout_resource(r, tablename):
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Warehouse Location"),
title_display = T("Warehouse Location Details"),
title_list = T("Warehouse Locations"),
title_update = T("Edit Warehouse Location"),
label_list_button = T("List Warehouse Locations"),
label_delete_button = T("Delete Warehouse Location"),
msg_record_created = T("Warehouse Location added"),
msg_record_modified = T("Warehouse Location updated"),
msg_record_deleted = T("Warehouse Location deleted"),
msg_list_empty = T("No Warehouse Locations currently registered"),
)
settings.customise_org_site_layout_resource = customise_org_site_layout_resource
# -------------------------------------------------------------------------
def customise_pr_address_resource(r, tablename):
#if current.auth.root_org_name() in ("Honduran Red Cross",
# "Paraguayan Red Cross",
# ):
# Location Hierarchy loaded: Leave things as they are since we have the
# pass
#else:
s3db = current.s3db
s3db.gis_location.addr_street.label = T("Address")
s3db.configure("pr_address",
list_fields = ["type",
(current.messages.COUNTRY, "location_id$L0"),
(T("Address"), "location_id$addr_street"),
#(settings.get_ui_label_postcode(),
# "location_id$addr_postcode")
],
)
settings.customise_pr_address_resource = customise_pr_address_resource
# -------------------------------------------------------------------------
def customise_pr_contact_resource(r, tablename):
table = current.s3db[tablename]
table.comments.readable = table.comments.writable = False
table.contact_description.readable = table.contact_description.writable = False
table.priority.readable = table.priority.writable = False
settings.customise_pr_contact_resource = customise_pr_contact_resource
# -------------------------------------------------------------------------
def customise_pr_education_resource(r, tablename):
s3db = current.s3db
table = s3db[tablename]
table.country.readable = table.country.writable = True
table.grade.readable = table.grade.writable = False
table.major.readable = table.major.writable = False
s3db.configure(tablename,
list_fields = [# Normally accessed via component
#"person_id",
"year",
"level_id",
"award",
#"major",
#"grade",
"institute",
],
)
settings.customise_pr_education_resource = customise_pr_education_resource
# -------------------------------------------------------------------------
def customise_pr_forum_resource(r, tablename):
table = current.s3db.pr_forum
table.forum_type.readable = table.forum_type.writable = False
settings.customise_pr_forum_resource = customise_pr_forum_resource
# -------------------------------------------------------------------------
def customise_pr_forum_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.method == "assign":
auth = current.auth
has_role = auth.s3_has_role
if not has_role("ADMIN") and has_role("training_coordinator"):
# Filter people to just those Trained by this Reference Center or Staff of this Reference Center
from s3 import FS
organisation_id = auth.user.organisation_id
query = (FS("training.training_event_id$organisation_id") == organisation_id) | \
(FS("user.organisation_id") == organisation_id)
s3.filter = query
return result
s3.prep = custom_prep
return attr
settings.customise_pr_forum_controller = customise_pr_forum_controller
# -------------------------------------------------------------------------
#def customise_pr_group_controller(**attr):
# # Organisation needs to be an NS/Branch
# ns_only("org_organisation_team",
# required = False,
# branches = True,
# )
# return attr
#settings.customise_pr_group_controller = customise_pr_group_controller
# -------------------------------------------------------------------------
def customise_pr_person_resource(r, tablename):
table = current.s3db[tablename]
table.first_name.label = T("Forenames")
table.middle_name.label = T("Father's Surname")
table.last_name.label = T("Mother's Surname")
settings.customise_pr_person_resource = customise_pr_person_resource
# -------------------------------------------------------------------------
def customise_pr_person_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Enable scalability-optimized strategies
settings.base.bigtable = True
EXTERNAL = False
auth = current.auth
has_role = auth.s3_has_role
request = current.request
if "profile" in request.get_vars:
PROFILE = True
else:
len_roles = len(current.session.s3.roles)
if (len_roles <= 2) or \
(len_roles == 3 and has_role("RIT_MEMBER") and not has_role("ADMIN")):
PROFILE = True
else:
PROFILE = False
if request.function == "trainee_person":
EXTERNAL = True
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if PROFILE:
# Configure for personal mode
s3.crud_strings["pr_person"].update(
title_display = T("Profile"),
title_update = T("Profile")
)
# People can edit their own HR data
configure = s3db.configure
configure("hrm_human_resource",
deletable = False,
#editable = True,
insertable = False,
)
if not has_role("RIT_MEMBER"):
#configure("hrm_certification",
# deletable = True,
# editable = True,
# insertable = True,
# )
configure("hrm_training",
deletable = False,
editable = False,
insertable = False,
)
elif EXTERNAL:
s3.crud_strings["pr_person"].update(
title_display = T("External Trainee Details"),
title_update = T("External Trainee Details")
)
component_name = r.component_name
method = r.method
if method == "import":
# HR records may be created via import
# Default to Volunteers
s3db.hrm_human_resource.type.default = 2
# Doesn't work as email created after human_resource
#s3db.configure("hrm_human_resource",
# create_onaccept = hrm_human_resource_create_onaccept,
# )
elif method == "record" or component_name == "human_resource":
table = s3db.hrm_human_resource
if EXTERNAL:
db = current.db
f = table.organisation_id
f.label = T("Organization")
# Organisation cannot be an NS/Branch
# Lookup organisation_type_id for Red Cross
ttable = s3db.org_organisation_type
type_ids = db(ttable.name.belongs((RED_CROSS, "Training Center"))).select(ttable.id,
cache = s3db.cache,
limitby = (0, 2),
)
if type_ids:
from s3 import IS_ONE_OF
ltable = db.org_organisation_organisation_type
rows = db(ltable.organisation_type_id.belongs(type_ids)).select(ltable.organisation_id)
not_filter_opts = [row.organisation_id for row in rows]
f.requires = IS_ONE_OF(db, "org_organisation.id",
f.represent,
not_filterby = "id",
not_filter_opts = not_filter_opts,
updateable = True,
orderby = "org_organisation.name",
sort = True,
)
else:
# Organisation needs to be an NS/Branch
if auth.s3_has_roles(("surge_capacity_manager",
"ns_training_manager",
"ns_training_assistant",
"training_coordinator",
"training_assistant",
)):
updateable = False
else:
updateable = True
ns_only("hrm_human_resource",
required = True,
branches = True,
updateable = updateable,
)
f = table.essential
f.readable = f.writable = False
f = table.site_contact
f.readable = f.writable = False
if method == "record":
if not auth.s3_has_roles(("ORG_ADMIN",
"hr_manager",
"hr_assistant",
)):
table.organisation_id.writable = False
# Hide the Site field as this data isn't loaded & we want to keep things simple
# @ToDo: Re-enable for specific NS as-required
f = table.site_id
f.readable = f.writable = False
# Use default form (legacy)
#s3db.clear_config("hrm_human_resource", "crud_form")
elif not component_name:
s3db.configure("pr_person",
listadd = True,
)
# Basic Details tab
f = s3db.pr_person.middle_name
f.readable = f.writable = True
f = s3db.pr_person_details.nationality2
f.readable = f.writable = True
from s3 import S3SQLCustomForm
crud_form = S3SQLCustomForm("first_name",
"middle_name",
"last_name",
"date_of_birth",
"gender",
"person_details.marital_status",
"person_details.nationality",
"person_details.nationality2",
"comments",
)
s3db.configure("pr_person",
crud_form = crud_form,
)
elif component_name == "appraisal":
atable = r.component.table
atable.organisation_id.readable = atable.organisation_id.writable = False
# Organisation needs to be an NS
#ns_only("hrm_appraisal",
# required = True,
# branches = False,
# )
field = atable.supervisor_id
field.readable = field.writable = False
field = atable.job_title_id
field.comment = None
field.label = T("Sector") # RDRT-specific
from s3 import IS_ONE_OF
field.requires = IS_ONE_OF(current.db, "hrm_job_title.id",
field.represent,
filterby = "type",
filter_opts = (4,),
)
elif component_name == "certification":
ctable = r.component.table
ctable.organisation_id.readable = False
elif component_name == "competency":
ctable = r.component.table
ctable.skill_id.label = T("Language")
ctable.organisation_id.readable = False
elif component_name == "experience":
# 2 options here: Work Experience & Missions
# These have very different views
# Work Experience
etable = r.component.table
etable.organisation_id.readable = etable.organisation_id.writable = False
etable.job_title_id.readable = etable.job_title_id.writable = False
etable.responsibilities.readable = etable.responsibilities.writable = False
etable.hours.readable = etable.hours.writable = False
etable.supervisor_id.readable = etable.supervisor_id.writable = False
etable.organisation.readable = etable.organisation.writable = True
etable.job_title.readable = etable.job_title.writable = True
from s3 import S3LocationSelector
etable.location_id.label = T("Country")
etable.location_id.widget = S3LocationSelector(levels = ("L0",),
show_map = False,
show_postcode = False,
)
elif component_name == "identity":
#itable = r.component.table
# Default
#itable.country_code.readable = itable.country_code.writable = False
#itable.ia_name.readable = itable.ia_name.writable = False
f = r.component.table.ia_name
f.readable = f.writable = False
list_fields = ["type",
"value",
"valid_until",
]
s3db.configure("pr_identity",
list_fields = list_fields,
)
# Moved to MedicalTab
#elif component_name == "physical_description":
# from gluon import DIV
# dtable = r.component.table
# dtable.medical_conditions.comment = DIV(_class = "tooltip",
# _title = "%s|%s" % (T("Medical Conditions"),
# T("Chronic Illness, Disabilities, Mental/Psychological Condition etc."),
# ),
# )
# dtable.allergic.writable = dtable.allergic.readable = True
# dtable.allergies.writable = dtable.allergies.readable = True
# dtable.ethnicity.writable = dtable.ethnicity.readable = False
# dtable.other_details.writable = dtable.other_details.readable = False
# import json
# SEPARATORS = (",", ":")
# s3.jquery_ready.append('''S3.showHidden('%s',%s,'%s')''' % \
# ("allergic", json.dumps(["allergies"], separators=SEPARATORS), "pr_physical_description"))
if not EXTERNAL and \
auth.s3_has_roles(ID_CARD_EXPORT_ROLES):
# Show button to export ID card
settings.hrm.id_cards = True
return True
s3.prep = custom_prep
if current.request.controller in ("default", "hrm", "vol"):
attr["csv_template"] = ("../../themes/RMS/formats", "hrm_person")
# Common rheader for all views
from s3db.hrm import hrm_rheader
attr["rheader"] = lambda r: hrm_rheader(r, profile=PROFILE)
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# -------------------------------------------------------------------------
def customise_pr_physical_description_resource(r, tablename):
from gluon import DIV
from s3 import S3SQLCustomForm
s3db = current.s3db
#s3db.pr_physical_description.medical_conditions.comment = DIV(_class = "tooltip",
# _title = "%s|%s" % (T("Medical Conditions"),
# T("Chronic Illness, Disabilities, Mental/Psychological Condition etc."),
# ),
# )
s3db.pr_physical_description.medical_conditions.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Medical Conditions"),
T("It is important to include, if they exist: surgical history, medical restrictions, vaccines, etc."),
),
)
s3db.configure(tablename,
crud_form = S3SQLCustomForm("blood_type",
"medical_conditions",
"medication",
"diseases",
"allergic",
"allergies",
),
)
settings.customise_pr_physical_description_resource = customise_pr_physical_description_resource
# -------------------------------------------------------------------------
def customise_project_window_resource(r, tablename):
r.resource.configure(deletable = False,
insertable = False,
)
settings.customise_project_window_resource = customise_project_window_resource
# -------------------------------------------------------------------------
def customise_project_activity_data_resource(r, tablename):
if current.auth.s3_has_roles(("monitoring_evaluation", "ORG_ADMIN")):
# Normal Access
return
# Project Manager
if r.method == "update":
table = current.s3db.project_activity_data
if r.tablename == "project_activity_data":
record_id = r.id
else:
record_id = r.component_id
record = current.db(table.id == record_id).select(table.value,
limitby = (0, 1),
).first()
if record.value:
# Redirect to Read-only mode
from gluon import redirect
redirect(r.url(method="read"))
else:
# Cannot edit anything
for f in table.fields:
table[f].writable = False
# Except add a Real value
table.value.writable = True
# Or Amend the Comments
table.comments.writable = True
else:
s3db = current.s3db
table = s3db.project_window
record = current.db(table.deleted == False).select(table.start_date,
table.end_date,
limitby = (0, 1),
).first()
if record:
if record.start_date <= r.utcnow.date() <= record.end_date:
# Inside the time window: Project Manager may update Actuals
return
# Outside the time window: Project Manager cannot add the Actual value
s3db.project_activity_data.value.writable = False
s3db.configure("project_activity_data",
updateable = False,
)
settings.customise_project_activity_data_resource = customise_project_activity_data_resource
# -------------------------------------------------------------------------
def customise_project_organisation_resource(r, tablename):
root_org = current.auth.root_org_name()
if root_org == HNRC:
#from gluon import IS_IN_SET
currency_opts = {"EUR" : "EUR",
"CHF" : "CHF",
"HNL" : "L",
"USD" : "USD",
}
f = current.s3db.project_organisation.currency
f.represent = currency_represent
f.requires = IS_IN_SET(currency_opts)
settings.customise_project_organisation_resource = customise_project_organisation_resource
# -------------------------------------------------------------------------
def project_project_postprocess(form):
"""
When using Budget Monitoring (i.e. HNRC) then create the entries
"""
db = current.db
s3db = current.s3db
project_id = form.vars.id
# Read Budget Entity ID, Start Date and End Date
ptable = s3db.project_project
project = db(ptable.id == project_id).select(ptable.budget_entity_id,
ptable.name,
ptable.start_date,
ptable.end_date,
limitby = (0, 1),
).first()
if not project:
return
# Copy Project Name to Budget Name
budget_entity_id = project.budget_entity_id
btable = s3db.budget_budget
query = (btable.budget_entity_id == budget_entity_id)
budget = db(query).select(btable.id, # Needed for update_record
# If we want to provide smoothed default expected values
#btable.total_budget,
btable.currency,
# Assume Monthly
#btable.monitoring_frequency,
limitby = (0, 1),
).first()
if not budget:
return
# Build Budget Name from Project Name
project_name = project.name
# Check for duplicates
query = (btable.name == project_name) & \
(btable.id != budget.id)
duplicate = db(query).select(btable.id,
limitby = (0, 1),
).first()
if not duplicate:
budget_name = project_name[:128]
else:
# Need another Unique name
import uuid
budget_name = "%s %s" % (project_name[:91], uuid.uuid4())
budget.update_record(name = budget_name)
mtable = s3db.budget_monitoring
exists = db(mtable.budget_entity_id == budget_entity_id).select(mtable.id,
limitby = (0, 1),
)
if not exists:
# Create Monitoring Data entries
start_date = project.start_date
end_date = project.end_date
if not start_date or not end_date:
return
# Assume Monthly
#monitoring_frequency = budget.monitoring_frequency
#if not monitoring_frequency:
# return
#total_budget = budget.total_budget
currency = budget.currency
# Create entries for the 1st of every month between start_date and end_date
from dateutil import rrule
dates = list(rrule.rrule(rrule.MONTHLY, bymonthday=1, dtstart=start_date, until=end_date))
for d in dates:
mtable.insert(budget_entity_id = budget_entity_id,
# @ToDo: This needs to be modified whenever entries are manually edited
# Set/update this in budget_monitoring_onaccept
# - also check here that we don't exceed overall budget
start_date = start_date,
end_date = d,
currency = currency,
)
# Start date relates to previous entry
start_date = d
# -------------------------------------------------------------------------
def customise_project_programme_controller(**attr):
# Organisation needs to be an NS/Branch
ns_only("project_programme",
required = True,
branches = False,
updateable = True,
)
return attr
settings.customise_project_programme_controller = customise_project_programme_controller
# -------------------------------------------------------------------------
def customise_project_project_controller(**attr):
tablename = "project_project"
if current.request.controller == "inv":
# Very simple functionality all that is required
from gluon import IS_NOT_EMPTY
f = current.s3db.project_project.code
f.label = T("Code")
f.requires = IS_NOT_EMPTY()
# Lead Organisation needs to be an NS (not a branch)
ns_only(tablename,
required = True,
branches = False,
# default
#limit_filter_opts = True,
)
return attr
# Default Filter
from s3 import s3_set_default_filter
s3_set_default_filter("~.organisation_id",
user_org_default_filter,
tablename = "project_project")
# Load standard model
s3db = current.s3db
table = s3db[tablename]
# Disable Map Tab on Summary View
# - until we can support multiple Points per Record
settings.ui.summary = ({"common": True,
"name": "add",
"widgets": [{"method": "create"}],
},
#{"common": True,
# "name": "cms",
# "widgets": [{"method": "cms"}]
# },
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "charts",
"label": "Report",
"widgets": [{"method": "report",
"ajax_init": True}]
},
#{"name": "map",
# "label": "Map",
# "widgets": [{"method": "map",
# "ajax_init": True}],
# },
)
# @ToDo: S3SQLInlineComponent for Project orgs
# Get IDs for Partner NS/Partner Donor
# db = current.db
# ttable = db.org_organisation_type
# rows = db(ttable.deleted != True).select(ttable.id,
# ttable.name,
# )
# rc = []
# not_rc = []
# nappend = not_rc.append
# for row in rows:
# if row.name == RED_CROSS:
# rc.append(row.id)
# elif row.name == "Supplier":
# pass
# else:
# nappend(row.id)
# Custom Fields
table.organisation_id.label = T("Host National Society")
# Custom Crud Form
from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineLink
# Special cases for different NS
root_org = current.auth.root_org_name()
if root_org == HNRC:
# @ToDo: Use Inter-American Framework instead (when extending to Zone office)
# @ToDo: Add 'Business Line' (when extending to Zone office)
project_settings = settings.project
project_settings.details_tab = True
#project_settings.community_volunteers = True
# Done in a more structured way instead
objectives = None
outputs = None
project_settings.goals = True
project_settings.outcomes = True
project_settings.outputs = True
project_settings.indicators = True
project_settings.indicator_criteria = True
project_settings.status_from_activities = True
table.human_resource_id.label = T("Coordinator")
# Use Budget module instead of ProjectAnnualBudget
project_settings.multiple_budgets = False
project_settings.budget_monitoring = True
# Require start/end dates
table.start_date.requires = table.start_date.requires.other
table.end_date.requires = table.end_date.requires.other
budget = S3SQLInlineComponent("budget",
label = T("Budget"),
#link = False,
multiple = False,
fields = ["total_budget",
"currency",
#"monitoring_frequency",
],
)
btable = s3db.budget_budget
# Need to provide a name
import random, string
btable.name.default = "".join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(16))
btable.monitoring_frequency.default = 3 # Monthly
btable.currency.represent = currency_represent
currency_opts = {"EUR" : "EUR",
"CHF" : "CHF",
"HNL" : "L",
"USD" : "USD",
}
#from gluon import IS_IN_SET
btable.currency.requires = IS_IN_SET(currency_opts)
s3db.budget_monitoring.currency.represent = currency_represent
postprocess = project_project_postprocess
list_fields = s3db.get_config("project_project", "list_fields")
list_fields += [(T("Actual Progress"), "actual_progress_by_activities"),
(T("Planned Progress"), "planned_progress_by_activities"),
]
else:
objectives = "objectives"
outputs = S3SQLInlineComponent(
"output",
label = T("Outputs"),
fields = ["name", "status"],
)
budget = None
postprocess = None
if settings.get_project_programmes():
# Inject inline link for programmes including S3PopupLink
#from s3layouts import S3PopupLink
comment = s3db.project_programme_id.attr.comment
comment.vars = {"caller": "link_defaultprogramme",
"prefix": "project",
"parent": "programme_project",
}
programme = S3SQLInlineLink("programme",
field = "programme_id",
label = T("Program"),
multiple = False,
comment = comment,
)
else:
programme = None
from s3db.project import project_hazard_help_fields, \
project_theme_help_fields
crud_form = S3SQLCustomForm(
"organisation_id",
programme,
"name",
"code",
"description",
"status_id",
"start_date",
"end_date",
budget,
#S3SQLInlineComponent(
# "location",
# label = T("Locations"),
# fields = ["location_id"],
#),
# Outputs
outputs,
S3SQLInlineLink(
"hazard",
label = T("Hazards"),
field = "hazard_id",
help_field = project_hazard_help_fields,
cols = 4,
translate = True,
),
S3SQLInlineLink(
"sector",
label = T("Sectors"),
field = "sector_id",
cols = 4,
translate = True,
),
S3SQLInlineLink(
"theme",
label = T("Themes"),
field = "theme_id",
help_field = project_theme_help_fields,
cols = 4,
translate = True,
# Filter Theme by Sector
filterby = "theme_id:project_theme_sector.sector_id",
match = "sector_project.sector_id",
script = '''
$.filterOptionsS3({
'trigger':{'alias':'sector','name':'sector_id','inlineType':'link'},
'target':{'alias':'theme','name':'theme_id','inlineType':'link'},
'lookupPrefix':'project',
'lookupResource':'theme',
'lookupKey':'theme_id:project_theme_sector.sector_id',
'showEmptyField':false,
'tooltip':'project_theme_help_fields(id,name)'
})'''
),
objectives,
"human_resource_id",
# Disabled since we need organisation_id filtering to either organisation_type_id == RC or NOT
# & also hiding Branches from RCs
# & also rewriting for organisation_type_id via link table
# Partner NS
# S3SQLInlineComponent(
# "organisation",
# name = "partnerns",
# label = T("Partner National Societies"),
# fields = ["organisation_id",
# "comments",
# ],
# Filter Organisation by Type
# filter = ["organisation_id": {"filterby": "organisation_type_id",
# "filterfor": rc,
# }],
# filterby = dict(field = "role",
# options = [9])
# ),
# Partner Orgs
# S3SQLInlineComponent(
# "organisation",
# name = "partner",
# label = T("Partner Organizations"),
# fields = ["organisation_id",
# "comments",
# ],
# Filter Organisation by Type
# filter = ["organisation_id": {"filterby": "organisation_type_id",
# "filterfor": not_rc,
# }],
# filterby = dict(field = "role",
# options = [2])
# ),
# Donors
# S3SQLInlineComponent(
# "organisation",
# name = "donor",
# label = T("Donor(s)"),
# fields = ["organisation_id",
# "amount",
# "currency"],
# Filter Organisation by Type
# filter = ["organisation_id": {"filterby": "organisation_type_id",
# "filterfor": not_rc,
# }],
# filterby = dict(field = "role",
# options = [3])
# ),
#"budget",
#"currency",
"comments",
postprocess = postprocess,
)
s3db.configure(tablename,
crud_form = crud_form,
)
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.method == "grouped":
grouped = {"default":
{"title": T("Global Report of Projects Status"),
"fields": [(T("Project"), "name"),
(T("Program"), "programme.name"),
(T("Donor"), "donor.organisation_id"),
(T("Budget"), "budget.total_budget"),
(T("Location"), "location.location_id"),
"start_date",
"end_date",
],
"orderby": ["name",
],
"aggregate": [("sum", "budget.total_budget"),
],
},
}
from s3 import S3DateFilter, S3OptionsFilter
filter_widgets = [S3DateFilter("date",
label = T("Time Period"),
hide_time = True,
),
S3OptionsFilter("programme_project.programme_id",
label = T("Programs"),
),
S3OptionsFilter("theme_project.theme_id",
label = T("Themes"),
),
S3OptionsFilter("sector_project.sector_id",
label = T("Sectors"),
),
S3OptionsFilter("beneficiary.parameter_id",
label = T("Beneficiaries"),
),
S3OptionsFilter("hazard_project.hazard_id",
label = T("Hazards"),
),
]
s3db.configure(tablename,
filter_widgets = filter_widgets,
grouped = grouped,
)
elif r.component:
if r.component_name == "organisation":
component_id = r.component_id
if component_id:
# No r.component.record :/
ctable = s3db.project_organisation
crecord = current.db(ctable.id == component_id).select(ctable.role,
limitby = (0, 1),
).first()
if crecord.role == settings.get_project_organisation_lead_role():
ns_only("project_organisation",
required = True,
branches = False,
updateable = True,
)
#from s3db.org import org_organisation_requires
#ctable.organisation_id.requires = \
# org_organisation_requires(required = True,
# # Only allowed to add Projects for Orgs
# # that the user has write access to
# updateable = True,
# )
else:
# Lead Organisation needs to be an NS (not a branch)
ns_only(tablename,
required = True,
branches = False,
# default
#limit_filter_opts = True,
)
# Set the Host NS filter as Visible so that the default filter works
filter_widgets = s3db.get_config(tablename, "filter_widgets")
for widget in filter_widgets:
if widget.field == "organisation_id":
widget.opts.hidden = False
break
return result
s3.prep = custom_prep
return attr
settings.customise_project_project_controller = customise_project_project_controller
# -------------------------------------------------------------------------
#def customise_project_beneficiary_resource(r, tablename):
# """
# Link Project Beneficiaries to Activity Type
# """
# if r.interactive and r.component:
# if r.tablename == "project_project":
# # We are a component of the Project
# project_id = r.id
# elif r.tablename == "project_location":
# # We are a component of the Project Location
# project_id = r.record.project_id
# else:
# # Unknown!
# return
# db = current.db
# s3db = current.s3db
# # Filter Activity Type by Sector
# ltable = s3db.project_sector_project
# rows = db(ltable.project_id == project_id).select(ltable.sector_id)
# sectors = [row.sector_id for row in rows]
# ltable = s3db.project_activity_type_sector
# rows = db(ltable.sector_id.belongs(sectors)).select(ltable.activity_type_id)
# filteropts = [row.activity_type_id for row in rows]
# def postprocess(form):
# # Update project_location.activity_type
# beneficiary_id = form.vars.get("id", None)
# table = db.project_beneficiary
# row = db(table.id == beneficiary_id).select(table.project_location_id,
# limitby = (0, 1),
# ).first()
# if not row:
# return
# project_location_id = row.project_location_id
# if not project_location_id:
# return
# ltable = db.project_beneficiary_activity_type
# row = db(ltable.beneficiary_id == beneficiary_id).select(ltable.activity_type_id,
# limitby = (0, 1),
# ).first()
# if not row:
# return
# activity_type_id = row.activity_type_id
# ltable = s3db.project_activity_type_location
# query = (ltable.project_location_id == project_location_id) & \
# (ltable.activity_type_id == activity_type_id)
# exists = db(query).select(ltable.id,
# limitby = (0, 1),
# ).first()
# if not exists:
# ltable.insert(project_location_id = project_location_id,
# activity_type_id = activity_type_id,
# )
# from s3 import S3SQLCustomForm, S3SQLInlineLink
# crud_form = S3SQLCustomForm(#"project_id",
# "project_location_id",
# S3SQLInlineLink("activity_type",
# field = "activity_type_id",
# filterby = "id",
# options = filteropts,
# label = T("Activity Type"),
# multiple = False,
# ),
# "parameter_id",
# "value",
# "target_value",
# "date",
# "end_date",
# "comments",
# postprocess = postprocess,
# )
# s3db.configure(tablename,
# crud_form = crud_form,
# )
# elif not r.component:
# # Report
# from s3 import S3OptionsFilter
# resource = r.resource
# filter_widgets = resource.get_config("filter_widgets")
# filter_widgets.insert(1,
# S3OptionsFilter("beneficiary_activity_type.activity_type_id",
# label = T("Activity Type"),
# ))
# report_options = resource.get_config("report_options")
# report_options.rows.append("beneficiary_activity_type.activity_type_id")
# # Same object so would be added twice
# #report_options.cols.append("beneficiary_activity_type.activity_type_id")
# resource.configure(filter_widgets = filter_widgets,
# report_options = report_options,
# )
# Only used for activity_types which aren't used by HNRC
#settings.customise_project_beneficiary_resource = customise_project_beneficiary_resource
# -------------------------------------------------------------------------
#def customise_project_indicator_resource(r, tablename):
# table = current.s3db.project_indicator
# table.definition.label = T("Indicator Definition")
# table.measures.label = T("Indicator Criteria")
#settings.customise_project_indicator_resource = customise_project_indicator_resource
# -------------------------------------------------------------------------
def customise_project_indicator_data_resource(r, tablename):
table = current.s3db.project_indicator_data
f = table.start_date
f.readable = f.writable = True
f.label = T("Start Date")
table.end_date.label = T("End Date")
if r.method == "update":
has_role = current.auth.s3_has_role
if has_role("monitoring_evaluation") or has_role("ORG_ADMIN"):
# Normal Access
return
# Project Manager
if r.tablename == "project_indicator_data":
record_id = r.id
else:
record_id = r.component_id
record = current.db(table.id == record_id).select(table.value,
limitby = (0, 1),
).first()
if record.value:
# Redirect to Read-only mode
# @ToDo: Remove 'Update' button from the read-only page
from gluon import redirect
redirect(r.url(method="read"))
else:
# Cannot edit anything
for f in table.fields:
table[f].writable = False
# Except add a Real value
table.value.writable = True
# Or Amend the Comments
table.comments.writable = True
settings.customise_project_indicator_data_resource = customise_project_indicator_data_resource
# -------------------------------------------------------------------------
def customise_project_location_resource(r, tablename):
s3db = current.s3db
table = s3db.project_location
table.name.readable = False
table.percentage.readable = table.percentage.writable = False
#ist_fields = s3db.get_config(tablename, "list_fields")
#try:
# list_fields.remove((T("Activity Types"), "activity_type.name"))
#except:
# # Already removed
# pass
settings.customise_project_location_resource = customise_project_location_resource
# -------------------------------------------------------------------------
def customise_project_location_controller(**attr):
s3 = current.response.s3
# Custom postp
#standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp (just does same thing but different)
#if callable(standard_postp):
# output = standard_postp(r, output)
if r.representation == "plain":
# Map Popup
from gluon import A, TABLE, TR, TD, B#, URL
s3db = current.s3db
table = s3db.project_project
project_id = r.record.project_id
resource = s3db.resource("project_project",
id = project_id,
)
list_fields = ("name",
"status_id",
"start_date",
"end_date",
"budget.total_budget",
"budget.currency",
"hazard_project.hazard_id",
"sector_project.sector_id",
"theme_project.theme_id",
# Contact
"human_resource_id",
"overall_status_by_indicators",
)
data = resource.select(list_fields, represent=True)
record = data.rows[0]
item = TABLE(TR(TD(B("%s:" % table.name.label)),
TD(record["project_project.name"]),
),
TR(TD(B("%s:" % table.status_id.label)),
TD(record["project_project.status_id"]),
),
TR(TD(B("%s:" % table.start_date.label)),
TD(record["project_project.start_date"]),
),
TR(TD(B("%s:" % table.end_date.label)),
TD(record["project_project.end_date"]),
),
TR(TD(B("%s:" % T("Budget"))),
TD("%s %s" % (record["budget_budget.currency"],
record["budget_budget.total_budget"])),
),
TR(TD(B("%s:" % s3db.project_hazard_project.hazard_id.label)),
TD(record["project_hazard_project.hazard_id"]),
),
TR(TD(B("%s:" % s3db.project_sector_project.sector_id.label)),
TD(record["project_sector_project.sector_id"]),
),
TR(TD(B("%s:" % s3db.project_theme_project.theme_id.label)),
TD(record["project_theme_project.theme_id"]),
),
TR(TD(B("%s:" % table.human_resource_id.label)),
TD(record["project_project.human_resource_id"]),
),
TR(TD(B("%s:" % T("Cumulative Status"))),
TD(record["project_project.overall_status_by_indicators"]),
),
)
title = s3.crud_strings["project_project"].title_display
# Assume authorised to see details
popup_url = URL(f = "project",
args = [project_id],
)
details_btn = A(T("Open"),
_href = popup_url,
_class = "btn",
_id = "details-btn",
_target = "_blank",
)
output = {"item": item,
"title": title,
"details_btn": details_btn,
}
return output
s3.postp = custom_postp
return attr
settings.customise_project_location_controller = customise_project_location_controller
# -------------------------------------------------------------------------
def inv_req_approver_update_roles(person_id):
"""
Update the req_approver role to have the right realms
# see hrm_certification_onaccept
"""
db = current.db
s3db = current.s3db
# Lookup User Account
ltable = s3db.pr_person_user
ptable = s3db.pr_person
query = (ptable.id == person_id) & \
(ptable.pe_id == ltable.pe_id)
user = db(query).select(ltable.user_id,
limitby = (0, 1),
)
if not user:
return
user_id = user.first().user_id
# What realms should this user have the req_approver role for?
table = s3db.inv_req_approver
rows = db(table.person_id == person_id).select(table.pe_id)
realms = [row.pe_id for row in rows]
# Lookup the req_approver group_id
gtable = db.auth_group
role = db(gtable.uuid == "req_approver").select(gtable.id,
limitby = (0, 1),
).first()
group_id = role.id
# Delete all req_approver roles for this user
mtable = db.auth_membership
query = (mtable.user_id == user_id) & \
(mtable.group_id == group_id)
db(query).delete()
# Create required req_approver roles for this user
for pe_id in realms:
mtable.insert(user_id = user_id,
group_id = group_id,
pe_id = pe_id,
)
# -------------------------------------------------------------------------
def inv_req_approver_onaccept(form):
"""
Ensure that the Approver has the req_approver role for the correct realms
"""
person_id = form.vars.get("person_id")
inv_req_approver_update_roles(person_id)
if form.record:
# Update form
# - has the person changed?
if form.record.person_id != person_id:
# Also update the old person
inv_req_approver_update_roles(form.record.person_id)
# -------------------------------------------------------------------------
def inv_req_approver_ondelete(form):
# Update the req_approver roles for this person
inv_req_approver_update_roles(form.person_id)
# -------------------------------------------------------------------------
def customise_inv_req_approver_resource(r, tablename):
db = current.db
s3db = current.s3db
auth = current.auth
f = s3db.inv_req_approver.pe_id
if auth.s3_has_role("ADMIN"):
# Filter to Red Cross entities
ttable = s3db.org_organisation_type
try:
type_id = db(ttable.name == RED_CROSS).select(ttable.id,
cache = s3db.cache,
limitby = (0, 1),
).first().id
except AttributeError:
# No IFRC prepop done - skip (e.g. testing impacts of CSS changes in this theme)
return
otable = s3db.org_organisation
btable = s3db.org_organisation_branch
ltable = db.org_organisation_organisation_type
rows = db(ltable.organisation_type_id == type_id).select(ltable.organisation_id)
all_rc_organisation_ids = [row.organisation_id for row in rows]
query = (btable.deleted != True) & \
(btable.branch_id.belongs(all_rc_organisation_ids))
branches = db(query).select(btable.branch_id)
root_ns_organisation_ids = list(set(all_rc_organisation_ids) - set(row.branch_id for row in branches))
root_ns = db(otable.id.belongs(root_ns_organisation_ids)).select(otable.pe_id)
pe_ids = [row.pe_id for row in root_ns]
# Find all child Orgs/Sites of these
entity_types = ["org_organisation"] + list(auth.org_site_types.keys())
from s3db.pr import pr_get_descendants
child_pe_ids = pr_get_descendants(pe_ids, entity_types=entity_types)
entities = pe_ids + child_pe_ids
else:
# Filter to entities the user has the ORG_ADMIN or logs_manager role for
# Lookup which realms the user has the roles for
gtable = db.auth_group
mtable = db.auth_membership
query = (mtable.user_id == auth.user.id) & \
(mtable.group_id == gtable.id) & \
(gtable.uuid.belongs(("ORG_ADMIN", "logs_manager")))
memberships = db(query).select(mtable.pe_id)
pe_ids = [m.pe_id for m in memberships]
if None in pe_ids:
# Default Realm(s)
pe_ids.remove(None)
from s3db.pr import pr_default_realms
realms = pr_default_realms(auth.user["pe_id"])
if realms:
pe_ids += realms
# Find all child Orgs/Sites of these
entity_types = ["org_organisation"] + list(auth.org_site_types.keys())
from s3db.pr import pr_get_descendants
child_pe_ids = pr_get_descendants(pe_ids, entity_types=entity_types)
entities = pe_ids + child_pe_ids
if len(entities) == 1:
f.default = entities[0]
f.readable = f.writable = False
return
# Default to NS (most-common use-case)
otable = s3db.org_organisation
org = db(otable.id == auth.root_org()).select(otable.pe_id,
limitby = (0, 1),
).first()
org_pe_id = org.pe_id
if org_pe_id in entities:
f.default = org_pe_id
from s3 import IS_ONE_OF
from s3db.pr import pr_PersonEntityRepresent
f.requires = IS_ONE_OF(db, "pr_pentity.pe_id",
pr_PersonEntityRepresent(show_type = False),
filterby = "pe_id",
filter_opts = entities,
sort = True
)
s3db.configure(tablename,
onaccept = inv_req_approver_onaccept,
ondelete = inv_req_approver_ondelete,
)
settings.customise_inv_req_approver_resource = customise_inv_req_approver_resource
# -------------------------------------------------------------------------
def customise_inv_req_project_resource(r, tablename):
"""
Customise reponse from options.s3json
"""
from s3 import IS_ONE_OF#, S3Represent
s3db = current.s3db
ptable = s3db.project_project
project_represent = S3Represent(lookup = "project_project",
fields = ["code"],
)
query = ((ptable.end_date == None) | \
(ptable.end_date > r.utcnow)) & \
(ptable.deleted == False)
the_set = current.db(query)
s3db.inv_req_project.project_id.requires = IS_ONE_OF(the_set, "project_project.id",
project_represent,
sort = True,
)
settings.customise_inv_req_project_resource = customise_inv_req_project_resource
# -------------------------------------------------------------------------
def inv_req_onaccept(form):
"""
Update realm if site_id changes
- Lighter then using update_realm on every update (this is much rarer edge case))
- Can hardcode the component handling
"""
if form.record:
# Update form
req_id = form.vars.id
db = current.db
table = db.inv_req
if site_id not in record:
record = db(table.id == req_id).select(table.id,
table.site_id,
limitby = (0, 1),
).first()
if record.site_id != form.record.site_id:
realm_entity = current.auth.get_realm_entity(table, record)
db(table.id == req_id).update(realm_entity = realm_entity)
# Update Request Items
db(current.s3db.inv_req_item.req_id == req_id).update(realm_entity = realm_entity)
# -------------------------------------------------------------------------
def on_req_approve(req_id):
"""
Remove Dashboard Alert
"""
s3db = current.s3db
ntable = s3db.auth_user_notification
query = (ntable.user_id == current.auth.user.id) & \
(ntable.type == "req_approve") & \
(ntable.record_id == req_id)
resource = s3db.resource("auth_user_notification", filter = query)
resource.delete()
# -------------------------------------------------------------------------
def on_req_approved(req_id, record, site_ids):
"""
Notify the Warehouse Operator(s)
- Email
- Dashboard Alert
"""
#from gluon import URL
from s3 import s3_str, S3DateTime
from .controllers import inv_operators_for_sites
T = current.T
db = current.db
s3db = current.s3db
session_s3 = current.session.s3
ui_language = session_s3.language
url = "%s%s" % (settings.get_base_public_url(),
URL(c="inv", f="req",
args = [req_id, "req_item"],
))
req_ref = record.req_ref
date_required = record.date_required
date_represent = S3DateTime.date_represent # We want Dates not datetime which table.date_required uses
send_email = current.msg.send_by_pe_id
subject_T = T("Request Approved for Items from your Warehouse")
message_T = T("A new Request, %(reference)s, has been Approved for shipment from %(site)s by %(date_required)s. Please review at: %(url)s")
alert_T = T("Request %(reference)s for items from %(site)s by %(date_required)s")
insert = s3db.auth_user_notification.insert
sites = inv_operators_for_sites(site_ids)
for site_id in sites:
site = sites[site_id]
site_name = site["name"]
# Send Localised Alerts & Mail(s)
languages = {}
operators = site["operators"]
for row in operators:
language = row["auth_user.language"]
if language not in languages:
languages[language] = []
languages[language].append((row["pr_person_user.pe_id"], row["pr_person_user.user_id"]))
for language in languages:
T.force(language)
session_s3.language = language # for date_represent
date = date_represent(date_required)
subject = "%s: %s" % (s3_str(subject_T), req_ref)
message = s3_str(message_T) % {"date_required": date,
"reference": req_ref,
"site": site_name,
"url": url,
}
alert = s3_str(alert_T) % {"date_required": date,
"reference": req_ref,
"site": site_name,
}
users = languages[language]
for user in users:
send_email(user[0],
subject = subject,
message = message,
)
insert(user_id = user[1],
name = alert,
url = url,
type = "req_fulfil",
tablename = "inv_req",
record_id = req_id,
)
# Restore language for UI
session_s3.language = ui_language
T.force(ui_language)
# -------------------------------------------------------------------------
def on_req_submit(req_id, record, site, approvers):
"""
Notify the Approvers
- Email
- Dashboard Alert
"""
#from gluon import URL
from s3 import s3_fullname, s3_str, S3DateTime
T = current.T
db = current.db
s3db = current.s3db
session_s3 = current.session.s3
ui_language = session_s3.language
url = "%s%s" % (settings.get_base_public_url(),
URL(c="inv", f="req",
args = [req_id],
))
req_ref = record.req_ref
date_required = record.date_required
date_represent = S3DateTime.date_represent # We want Dates not datetime which table.date_required uses
requester = s3_fullname(record.requester_id)
site_name = site.name
send_email = current.msg.send_by_pe_id
subject_T = T("Request submitted for Approval")
message_T = T("A new Request, %(reference)s, has been submitted for Approval by %(person)s for delivery to %(site)s by %(date_required)s. Please review at: %(url)s")
alert_T = T("A new Request, %(reference)s, has been submitted for Approval by %(person)s for delivery to %(site)s by %(date_required)s")
insert = s3db.auth_user_notification.insert
# Send Localised Alerts & Mail(s)
languages = {}
for row in approvers:
language = row["auth_user.language"]
if language not in languages:
languages[language] = []
languages[language].append((row["pr_person_user.pe_id"], row["pr_person_user.user_id"]))
for language in languages:
T.force(language)
session_s3.language = language # for date_represent
date = date_represent(date_required)
subject = "%s: %s" % (s3_str(subject_T), req_ref)
message = s3_str(message_T) % {"date_required": date_represent(date_required),
"reference": req_ref,
"person": requester,
"site": site_name,
"url": url,
}
alert = s3_str(alert_T) % {"date_required": date,
"reference": req_ref,
"person": requester,
"site": site_name,
}
users = languages[language]
for user in users:
send_email(user[0],
subject = subject,
message = message,
)
insert(user_id = user[1],
name = alert,
url = url,
type = "req_approve",
tablename = "inv_req",
record_id = req_id,
)
# Restore language for UI
session_s3.language = ui_language
T.force(ui_language)
# -------------------------------------------------------------------------
def customise_inv_req_resource(r, tablename):
from gluon import IS_NOT_EMPTY
from s3db.inv import inv_ReqRefRepresent
auth = current.auth
s3db = current.s3db
table = s3db.inv_req
f = table.req_ref
f.represent = inv_ReqRefRepresent(show_link = True,
pdf = True,
)
f.requires = IS_NOT_EMPTY()
f.widget = None
table.priority.readable = table.priority.writable = False
table.date.label = T("Date of Issue")
table.date_required.label = T("Requested Delivery Date")
table.site_id.label = T("Deliver To")
LOGS_ADMIN = auth.s3_has_roles(("ORG_ADMIN",
"wh_operator",
"logs_manager",
))
if not LOGS_ADMIN:
table.requester_id.writable = False
MINE = r.get_vars.get("mine")
if r.tablename == tablename:
if MINE:
# Filter
from s3 import FS
r.resource.add_filter(FS("requester_id") == auth.s3_logged_in_person())
from gluon import IS_EMPTY_OR#, IS_IN_SET
from s3 import IS_ONE_OF, S3GroupedOptionsWidget, S3SQLCustomForm, S3SQLInlineComponent#, S3Represent
from s3layouts import S3PopupLink
db = current.db
# Link to Projects
ptable = s3db.project_project
project_represent = S3Represent(lookup = "project_project",
fields = ["code"],
)
query = ((ptable.end_date == None) | \
(ptable.end_date > r.utcnow)) & \
(ptable.deleted == False)
the_set = db(query)
f = s3db.inv_req_project.project_id
f.label = T("Project Code")
f.requires = IS_ONE_OF(the_set, "project_project.id",
project_represent,
sort = True,
)
f.comment = S3PopupLink(c = "inv",
f = "project",
label = T("Create Project"),
tooltip = T("If you don't see the project in the list, you can add a new one by clicking link 'Create Project'."),
vars = {"caller": "inv_req_sub_project_req_project_id",
"parent": "req_project",
},
)
crud_fields = [f for f in table.fields if table[f].readable]
crud_fields.insert(0, "req_project.project_id")
req_id = r.id
if req_id:
# Never opens in Component Tab, always breaks out
atable = s3db.inv_req_approver_req
approved = db(atable.req_id == req_id).select(atable.id,
limitby = (0, 1),
)
if approved:
crud_fields.insert(-1, S3SQLInlineComponent("approver",
name = "approver",
label = T("Approved By"),
fields = [("", "person_id"),
("", "title"),
],
readonly = True,
))
if r.method == "read" or \
r.record.workflow_status in (3, 4, 5) or \
not auth.s3_has_permission("update",
r.table,
record_id = req_id,
):
# Read-only form
if r.record.transport_req:
transport_type = True
else:
transport_type = False
else:
# Update form
transport_type = True
else:
# Create form
transport_type = True
if transport_type:
# Filtered components
s3db.add_components("inv_req",
inv_req_tag = ({"name": "transport_type",
"joinby": "req_id",
"filterby": {"tag": "transport_type"},
"multiple": False,
},
),
)
# Individual settings for specific tag components
components_get = s3db.resource(tablename).components.get
transport_type = components_get("transport_type")
f = transport_type.table.value
f.requires = IS_EMPTY_OR(IS_IN_SET(transport_opts))
f.represent = S3Represent(options = transport_opts)
f.widget = S3GroupedOptionsWidget(options = transport_opts,
multiple = False,
cols = 4,
sort = False,
)
insert_index = crud_fields.index("transport_req") + 1
crud_fields.insert(insert_index, ("", "transport_type.value"))
import json
SEPARATORS = (",", ":")
s3 = current.response.s3
s3.jquery_ready.append('''S3.showHidden('%s',%s,'%s')''' % \
("transport_req", json.dumps(["sub_transport_type_value"], separators=SEPARATORS), "inv_req"))
crud_form = S3SQLCustomForm(*crud_fields)
s3db.configure(tablename,
crud_form = crud_form,
)
set_method = s3db.set_method
# Custom Request Form
set_method("inv", "req",
method = "form",
action = PrintableShipmentForm,
)
from s3 import S3OptionsFilter
filter_widgets = [S3OptionsFilter("workflow_status",
cols = 3,
),
]
list_fields = ["req_ref",
"date",
"site_id",
(T("Details"), "details"),
"workflow_status",
#"commit_status",
"transit_status",
"fulfil_status",
]
if LOGS_ADMIN and not MINE:
list_fields.insert(2, "date_required")
list_fields.insert(4, "requester_id")
#filter_widgets += [
# ]
s3db.add_custom_callback(tablename,
"onaccept",
inv_req_onaccept,
)
s3db.configure(tablename,
filter_widgets = filter_widgets,
list_fields = list_fields,
on_req_approve = on_req_approve,
on_req_approved = on_req_approved,
on_req_submit = on_req_submit,
)
settings.customise_inv_req_resource = customise_inv_req_resource
# -------------------------------------------------------------------------
def customise_inv_req_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.component_name == "req_item":
s3db = current.s3db
workflow_status = r.record.workflow_status
if workflow_status == 2: # Submitted for Approval
show_site_and_po = True
# Are we a Logistics Approver?
from s3db.inv import inv_req_approvers
approvers = inv_req_approvers(r.record.site_id)
person_id = current.auth.s3_logged_in_person()
if person_id in approvers and approvers[person_id]["matcher"]:
# Have we already approved?
atable = s3db.inv_req_approver_req
query = (atable.req_id == r.id) & \
(atable.person_id == person_id)
approved = current.db(query).select(atable.id,
limitby = (0, 1),
)
if not approved:
# Allow User to Match
settings.req.prompt_match = True
elif workflow_status == 3: # Approved
show_site_and_po = True
else:
show_site_and_po = False
if show_site_and_po:
# Show in read-only form
r.component.table.site_id.readable = True
# Show in list_fields
oitable = s3db.inv_order_item
def inv_order_item_represent(record_id):
"""
Probably few enough Request Items not to need an S3Represent sub-class
"""
if record_id == None:
return T("Not being Purchased")
else:
order_item = current.db(oitable.id == record_id).select(oitable.purchase_ref,
limitby = (0, 1),
).first()
return order_item.purchase_ref or T("Not yet entered")
oitable.id.represent = inv_order_item_represent
order_label = T("%(PO)s Number") % \
{"PO": settings.get_proc_shortname()}
list_fields = ["item_id",
"item_pack_id",
"site_id",
(order_label, "order_item.id"),
"quantity",
"quantity_transit",
"quantity_fulfil",
]
r.component.configure(list_fields = list_fields)
return result
s3.prep = custom_prep
return attr
settings.customise_inv_req_controller = customise_inv_req_controller
# -------------------------------------------------------------------------
def inv_req_item_onaccept(form):
"""
Update realm's affiliations if site_id changes
"""
if form.record:
# Update form
form_vars_get = form.vars.get
if form_vars_get("site_id") != form.record.site_id:
# Item has been Requested from a specific site so this needs to be affiliated to the realm
db = current.db
table = db.inv_req
record = db(table.id == form_vars_get("req_id")).select(table.id,
table.site_id,
)
# Update affiliations
current.auth.get_realm_entity(table, record)
# -------------------------------------------------------------------------
def customise_inv_req_item_resource(r, tablename):
current.s3db.add_custom_callback(tablename,
"onaccept",
inv_req_item_onaccept,
)
settings.customise_inv_req_item_resource = customise_inv_req_item_resource
# -------------------------------------------------------------------------
def customise_supply_item_category_resource(r, tablename):
from s3db.supply import supply_ItemCategoryRepresent
s3db = current.s3db
table = s3db.supply_item_category
#root_org = current.auth.root_org_name()
#if root_org == HNRC:
# Not using Assets Module
table.can_be_asset.readable = table.can_be_asset.writable = False
table.parent_item_category_id.represent = supply_ItemCategoryRepresent(show_catalog = False,
use_code = False,
)
settings.customise_supply_item_category_resource = customise_supply_item_category_resource
# -------------------------------------------------------------------------
def customise_supply_item_resource(r, tablename):
from s3db.supply import supply_ItemCategoryRepresent
s3db = current.s3db
table = s3db.supply_item
table.brand.readable = table.brand.writable = False
table.model.readable = table.model.writable = False
table.year.readable = table.year.writable = False
table.length.readable = table.length.writable = False
table.width.readable = table.width.writable = False
table.height.readable = table.height.writable = False
table.item_category_id.represent = supply_ItemCategoryRepresent(show_catalog = False,
use_code = False,
)
if r.tablename == tablename:
# Brand & Year not used
r.resource.configure(filter_widgets = None)
settings.customise_supply_item_resource = customise_supply_item_resource
# =============================================================================
class PrintableShipmentForm(S3Method):
""" REST Method Handler for Printable Shipment Forms """
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point for REST interface.
@param r: the S3Request instance
@param attr: controller attributes
@note: always returns PDF, disregarding the requested format
"""
output = {}
if r.http == "GET":
if r.id:
tablename = r.tablename
if tablename == "inv_req":
output = self.request_form(r, **attr)
elif tablename == "inv_send":
output = self.waybill(r, **attr)
elif tablename == "inv_recv":
output = self.goods_received_note(r, **attr)
else:
# Not supported
r.error(405, current.ERROR.BAD_METHOD)
else:
# Record ID is required
r.error(400, current.ERROR.BAD_REQUEST)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
def request_form(self, r, **attr):
"""
Request Form
@param r: the S3Request instance
@param attr: controller attributes
"""
T = current.T
s3db = current.s3db
# Master record (=inv_req)
resource = s3db.resource(r.tablename,
id = r.id,
components = ["req_item"],
)
# Columns and data for the form header
header_fields = ["req_ref",
"date",
"date_required",
(T("Deliver to"), "site_id"),
(T("Reason for Request"), "purpose"),
"requester_id",
"site_id$site_id:inv_warehouse.contact",
"comments",
]
header_data = resource.select(header_fields,
start = 0,
limit = 1,
represent = True,
show_links = False,
raw_data = True,
)
if not header_data:
r.error(404, current.ERROR.BAD_RECORD)
# Generate PDF header
pdf_header = self.request_form_header(header_data)
# Filename from send_ref
header_row = header_data.rows[0]
pdf_filename = header_row["_row"]["inv_req.req_ref"]
# Component (=req_item)
component = resource.components["req_item"]
body_fields = ["item_id",
"item_pack_id",
"quantity",
"comments",
]
# Aggregate methods and column names
aggregate = [("sum", "inv_req_item.quantity"),
]
# Generate the JSON data dict
json_data = self._json_data(component,
body_fields,
aggregate = aggregate,
)
# Generate the grouped items table
from s3 import S3GroupedItemsTable
output = S3GroupedItemsTable(component,
data = json_data,
totals_label = T("Total"),
title = T("Logistics Requisition"),
pdf_header = pdf_header,
pdf_footer = self.request_form_footer,
)
# ...and export it as PDF
return output.pdf(r, filename=pdf_filename)
# -------------------------------------------------------------------------
@classmethod
def request_form_header(cls, data):
"""
Header for Request Forms
@param data: the S3ResourceData for the inv_req
"""
row = data.rows[0]
labels = dict((rfield.colname, rfield.label) for rfield in data.rfields)
def row_(left, right):
return cls._header_row(left, right, row=row, labels=labels)
from gluon import DIV, H2, H4, TABLE, TD, TH, TR, P
T = current.T
# Get organisation name and logo
from .layouts import OM
name, logo = OM().render()
# The title
title = H2(T("Logistics Requisition"))
# Waybill details
dtable = TABLE(
TR(TD(DIV(logo, H4(name)), _colspan = 2),
TD(DIV(title), _colspan = 2),
),
row_("inv_req.req_ref", None),
row_("inv_req.date", "inv_req.date_required"),
row_("inv_req.site_id", "inv_req.purpose"),
row_("inv_req.requester_id", "inv_warehouse.contact"),
)
# Waybill comments
ctable = TABLE(TR(TH(T("Comments"))),
TR(TD(row["inv_req.comments"])),
)
return DIV(dtable, P(" "), ctable)
# -------------------------------------------------------------------------
@staticmethod
def request_form_footer(r):
"""
Footer for Request Forms
@param r: the S3Request
"""
from gluon import TABLE, TD, TH, TR
from s3db.pr import pr_PersonRepresent
from s3db.inv import inv_req_approvers
T = current.T
header = TR(TH(" "),
TH(T("Name")),
TH(T("Signature")),
TH(T("Date")),
)
record = r.record
requester = record.requester_id
approvers = inv_req_approvers(record.site_id)
person_ids = [requester] + list(approvers)
names = pr_PersonRepresent().bulk(person_ids)
signature_rows = [TR(TH(T("Requester")),
TD(names[requester]),
)]
append = signature_rows.append
for approver in approvers:
append(TR(TH(approvers[approver]["title"]),
TD(names[approver]),
))
return TABLE(header,
*signature_rows
)
# -------------------------------------------------------------------------
def waybill(self, r, **attr):
"""
Waybill
@param r: the S3Request instance
@param attr: controller attributes
"""
T = current.T
s3db = current.s3db
# Component declarations to distinguish between the
# origin and destination warehouses
s3db.add_components("inv_send",
inv_warehouse = ({"name": "origin",
"joinby": "site_id",
"pkey": "site_id",
"filterby": False,
"multiple": False,
},
{"name": "destination",
"joinby": "site_id",
"pkey": "to_site_id",
"filterby": False,
"multiple": False,
},
),
)
# Master record (=inv_send)
resource = s3db.resource(r.tablename,
id = r.id,
components = ["origin",
"destination",
"track_item",
],
)
# Columns and data for the form header
header_fields = ["send_ref",
# @ToDo: Will ned updating to use inv_send_req
#"req_ref",
"date",
"delivery_date",
(T("Origin"), "site_id"),
(T("Destination"), "to_site_id"),
"sender_id",
"origin.contact",
"recipient_id",
"destination.contact",
"transported_by",
"transport_ref",
(T("Delivery Address"), "destination.location_id"),
"comments",
]
header_data = resource.select(header_fields,
start = 0,
limit = 1,
represent = True,
show_links = False,
raw_data = True,
)
if not header_data:
r.error(404, current.ERROR.BAD_RECORD)
# Generate PDF header
pdf_header = self.waybill_header(header_data)
# Filename from send_ref
header_row = header_data.rows[0]
pdf_filename = header_row["_row"]["inv_send.send_ref"]
# Component (=inv_track_item)
component = resource.components["track_item"]
body_fields = [#"bin",
"layout_id",
"item_id",
"item_pack_id",
"quantity",
(T("Total Volume (m3)"), "total_volume"),
(T("Total Weight (kg)"), "total_weight"),
"supply_org_id",
"inv_item_status",
]
# Any extra fields needed for virtual fields
component.configure(extra_fields = ["item_id$weight",
"item_id$volume",
],
)
# Aggregate methods and column names
aggregate = [("sum", "inv_track_item.quantity"),
("sum", "inv_track_item.total_volume"),
("sum", "inv_track_item.total_weight"),
]
# Generate the JSON data dict
json_data = self._json_data(component,
body_fields,
aggregate = aggregate,
)
# Generate the grouped items table
from s3 import S3GroupedItemsTable
output = S3GroupedItemsTable(component,
data = json_data,
totals_label = T("Total"),
title = T("Waybill"),
pdf_header = pdf_header,
pdf_footer = self.waybill_footer,
)
# ...and export it as PDF
return output.pdf(r, filename=pdf_filename)
# -------------------------------------------------------------------------
@classmethod
def waybill_header(cls, data):
"""
Header for Waybills
@param data: the S3ResourceData for the inv_send
"""
row = data.rows[0]
labels = dict((rfield.colname, rfield.label) for rfield in data.rfields)
def row_(left, right):
return cls._header_row(left, right, row=row, labels=labels)
from gluon import DIV, H2, H4, TABLE, TD, TH, TR, P
T = current.T
# Get organisation name and logo
from .layouts import OM
name, logo = OM().render()
# The title
title = H2(T("Waybill"))
# Waybill details
dtable = TABLE(
TR(TD(DIV(logo, H4(name)), _colspan = 2),
TD(DIV(title), _colspan = 2),
),
# @ToDo: Will ned updating to use inv_send_req
row_("inv_send.send_ref", None
#"inv_send.req_ref",
),
row_("inv_send.date", "inv_send.delivery_date"),
row_("inv_send.site_id", "inv_send.to_site_id"),
row_("inv_send.sender_id", "inv_send.recipient_id"),
row_("inv_origin_warehouse.contact",
"inv_destination_warehouse.contact",
),
row_("inv_send.transported_by", "inv_send.transport_ref"),
row_("inv_destination_warehouse.location_id", None),
)
# Waybill comments
ctable = TABLE(TR(TH(T("Comments"))),
TR(TD(row["inv_send.comments"])),
)
return DIV(dtable, P(" "), ctable)
# -------------------------------------------------------------------------
@staticmethod
def waybill_footer(r):
"""
Footer for Waybills
@param r: the S3Request
"""
T = current.T
from gluon import TABLE, TD, TH, TR
return TABLE(TR(TH(T("Shipment")),
TH(T("Date")),
TH(T("Function")),
TH(T("Name")),
TH(T("Signature")),
TH(T("Status")),
),
TR(TD(T("Sent by"))),
TR(TD(T("Transported by"))),
TR(TH(T("Received by")),
TH(T("Date")),
TH(T("Function")),
TH(T("Name")),
TH(T("Signature")),
TH(T("Status")),
),
TR(TD(" ")),
)
# -------------------------------------------------------------------------
def goods_received_note(self, r, **attr):
"""
GRN (Goods Received Note)
@param r: the S3Request instance
@param attr: controller attributes
"""
T = current.T
s3db = current.s3db
# Master record (=inv_recv)
resource = s3db.resource(r.tablename,
id = r.id,
components = ["track_item"],
)
# Columns and data for the form header
header_fields = ["eta",
"date",
(T("Origin"), "from_site_id"),
(T("Destination"), "site_id"),
"sender_id",
"recipient_id",
"send_ref",
"recv_ref",
"comments",
]
header_data = resource.select(header_fields,
start = 0,
limit = 1,
represent = True,
show_links = False,
raw_data = True,
)
if not header_data:
r.error(404, current.ERROR.BAD_RECORD)
# Generate PDF header
pdf_header = self.goods_received_note_header(header_data)
# Filename from send_ref
header_row = header_data.rows[0]
pdf_filename = header_row["_row"]["inv_recv.recv_ref"]
# Component (=inv_track_item)
component = resource.components["track_item"]
body_fields = [#"recv_bin",
"recv_bin_id",
"item_id",
"item_pack_id",
"recv_quantity",
(T("Total Volume (m3)"), "total_recv_volume"),
(T("Total Weight (kg)"), "total_recv_weight"),
"supply_org_id",
"inv_item_status",
]
# Any extra fields needed for virtual fields
component.configure(extra_fields = ["item_id$weight",
"item_id$volume",
],
)
# Aggregate methods and column names
aggregate = [("sum", "inv_track_item.recv_quantity"),
("sum", "inv_track_item.total_recv_volume"),
("sum", "inv_track_item.total_recv_weight"),
]
# Generate the JSON data dict
json_data = self._json_data(component,
body_fields,
aggregate = aggregate,
)
# Generate the grouped items table
from s3 import S3GroupedItemsTable
output = S3GroupedItemsTable(component,
data = json_data,
totals_label = T("Total"),
title = T("Goods Received Note"),
pdf_header = pdf_header,
pdf_footer = self.goods_received_note_footer,
)
# ...and export it as PDF
return output.pdf(r, filename=pdf_filename)
# -------------------------------------------------------------------------
@classmethod
def goods_received_note_header(cls, data):
"""
Header for Goods Received Notes
@param data: the S3ResourceData for the inv_recv
"""
row = data.rows[0]
labels = dict((rfield.colname, rfield.label) for rfield in data.rfields)
def row_(left, right):
return cls._header_row(left, right, row=row, labels=labels)
from gluon import DIV, H2, H4, TABLE, TD, TH, TR, P
T = current.T
# Get organisation name and logo
from .layouts import OM
name, logo = OM().render()
# The title
title = H2(T("Goods Received Note"))
# GRN details
dtable = TABLE(TR(TD(DIV(logo,
H4(name),
),
_colspan = 2,
),
TD(DIV(title),
_colspan = 2,
),
),
row_("inv_recv.eta", "inv_recv.date"),
row_("inv_recv.from_site_id", "inv_recv.site_id"),
row_("inv_recv.sender_id", "inv_recv.recipient_id"),
row_("inv_recv.send_ref", "inv_recv.recv_ref"),
)
# GRN comments
ctable = TABLE(TR(TH(T("Comments"))),
TR(TD(row["inv_recv.comments"])),
)
return DIV(dtable, P(" "), ctable)
# -------------------------------------------------------------------------
@staticmethod
def goods_received_note_footer(r):
"""
Footer for Goods Received Notes
@param r: the S3Request
"""
T = current.T
from gluon import TABLE, TD, TH, TR
return TABLE(TR(TH(T("Delivered by")),
TH(T("Date")),
TH(T("Function")),
TH(T("Name")),
TH(T("Signature")),
TH(T("Status")),
),
TR(TD(T(" "))),
TR(TH(T("Received by")),
TH(T("Date")),
TH(T("Function")),
TH(T("Name")),
TH(T("Signature")),
TH(T("Status")),
),
TR(TD(" ")),
)
# -------------------------------------------------------------------------
@staticmethod
def _header_row(left, right, row=None, labels=None):
"""
Helper function to generate a 2-column table row
for the PDF header
@param left: the column name for the left column
@param right: the column name for the right column,
or None for an empty column
@param row: the S3ResourceData row
@param labels: dict of labels {colname: label}
"""
from gluon import TD, TH, TR
if right:
header_row = TR(TH(labels[left]),
TD(row[left]),
TH(labels[right]),
TD(row[right]),
)
else:
header_row = TR(TH(labels[left]),
TD(row[left],
_colspan = 3,
),
)
return header_row
# -------------------------------------------------------------------------
@staticmethod
def _json_data(component, list_fields, aggregate=None):
"""
Extract, group and aggregate the data for the form body
@param component: the component (S3Resource)
@param list_fields: the columns for the form body
(list of field selectors)
@param aggregate: aggregation methods and fields,
a list of tuples (method, column name)
"""
# Extract the data
data = component.select(list_fields,
limit = None,
raw_data = True,
represent = True,
show_links = False,
)
# Get the column names and labels
columns = []
append_column = columns.append
labels = {}
for rfield in data.rfields:
colname = rfield.colname
append_column(colname)
labels[colname] = rfield.label
# Group and aggregate the items
from s3 import S3GroupedItems
gi = S3GroupedItems(data.rows,
aggregate = aggregate,
)
# Convert into JSON-serializable dict for S3GroupedItemsTable
json_data = gi.json(fields = columns,
labels = labels,
as_dict = True,
)
return json_data
# END =========================================================================
|
flavour/eden
|
modules/templates/RMS/config.py
|
Python
|
mit
| 296,245
|
[
"VisIt"
] |
c6c902abd338c8431581456d0acfaa79a2ff2e229a2f32d5bb1f2d2ac4eb0bb5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.